2844 mb(); |
2844 mb(); |
2845 } |
2845 } |
2846 |
2846 |
2847 /** Stop all other CPUs |
2847 /** Stop all other CPUs |
2848 |
2848 |
2849 Call with kernel locked |
2849 Call with kernel unlocked, returns with kernel locked. |
2850 */ |
2850 Returns mask of CPUs halted plus current CPU. |
2851 void TStopIPI::StopCPUs() |
2851 */ |
2852 { |
2852 TUint32 TStopIPI::StopCPUs() |
|
2853 { |
|
2854 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"TStopIPI::StopCPUs()"); |
|
2855 TScheduler& s = TheScheduler; |
2853 iFlag = 0; |
2856 iFlag = 0; |
|
2857 NKern::ThreadEnterCS(); |
|
2858 |
|
2859 // Stop any cores powering up or down for now |
|
2860 // A core already on the way down will stop just before the transition to SHUTDOWN_FINAL |
|
2861 // A core already on the way up will carry on powering up |
|
2862 TInt irq = s.iGenIPILock.LockIrqSave(); |
|
2863 ++s.iCCDeferCount; // stops bits in iIpiAcceptCpus being cleared, but doesn't stop them being set |
|
2864 // but iIpiAcceptCpus | s.iCpusComingUp is constant |
|
2865 TUint32 act2 = s.iIpiAcceptCpus; // CPUs still accepting IPIs |
|
2866 TUint32 cu = s.iCpusComingUp; // CPUs powering up |
|
2867 s.iGenIPILock.UnlockIrqRestore(irq); |
|
2868 TUint32 cores = act2 | cu; |
|
2869 if (cu) |
|
2870 { |
|
2871 // wait for CPUs coming up to start accepting IPIs |
|
2872 while (cores & ~s.iIpiAcceptCpus) |
|
2873 { |
|
2874 __snooze(); // snooze until cores have come up |
|
2875 } |
|
2876 } |
|
2877 NKern::Lock(); |
2854 QueueAllOther(&Isr); // send IPIs to all other CPUs |
2878 QueueAllOther(&Isr); // send IPIs to all other CPUs |
2855 WaitEntry(); // wait for other CPUs to reach the ISR |
2879 WaitEntry(); // wait for other CPUs to reach the ISR |
2856 } |
2880 return cores; |
2857 |
2881 } |
|
2882 |
|
2883 |
|
2884 /** Release the stopped CPUs |
|
2885 |
|
2886 Call with kernel locked, returns with kernel unlocked. |
|
2887 */ |
2858 void TStopIPI::ReleaseCPUs() |
2888 void TStopIPI::ReleaseCPUs() |
2859 { |
2889 { |
2860 iFlag = 1; // allow other CPUs to proceed |
2890 __e32_atomic_store_rel32(&iFlag, 1); // allow other CPUs to proceed |
2861 WaitCompletion(); // wait for them to finish with this IPI |
2891 WaitCompletion(); // wait for them to finish with this IPI |
|
2892 NKern::Unlock(); |
|
2893 TheScheduler.CCUnDefer(); |
|
2894 NKern::ThreadLeaveCS(); |
2862 } |
2895 } |
2863 |
2896 |
2864 void TStopIPI::Isr(TGenericIPI* a) |
2897 void TStopIPI::Isr(TGenericIPI* a) |
2865 { |
2898 { |
2866 TStopIPI* s = (TStopIPI*)a; |
2899 TStopIPI* s = (TStopIPI*)a; |
2867 while (!s->iFlag) |
2900 while (!__e32_atomic_load_acq32(&s->iFlag)) |
2868 { |
2901 { |
2869 __chill(); |
2902 __chill(); |
2870 } |
2903 } |
|
2904 __e32_io_completion_barrier(); |
2871 } |
2905 } |
2872 |
2906 |
2873 |
2907 |
2874 /****************************************************************************** |
2908 /****************************************************************************** |
2875 * TCoreCycler - general method to execute something on all active cores |
2909 * TCoreCycler - general method to execute something on all active cores |