1281 |
1294 |
1282 @param The number of the CPU to which this thread should be locked, or |
1295 @param The number of the CPU to which this thread should be locked, or |
1283 KCpuAny if it should be able to run on any CPU. |
1296 KCpuAny if it should be able to run on any CPU. |
1284 @return The previous affinity mask. |
1297 @return The previous affinity mask. |
1285 */ |
1298 */ |
1286 TUint32 NThreadBase::SetCpuAffinity(TUint32 aAffinity) |
1299 TUint32 NSchedulable::SetCpuAffinityT(TUint32 aAffinity) |
1287 { |
1300 { |
1288 // check aAffinity is valid |
1301 // check aAffinity is valid |
1289 AcqSLock(); |
1302 NThreadBase* t = 0; |
1290 TUint32 old_aff = iParent->iCpuAffinity; |
1303 NThreadGroup* g = 0; |
1291 TBool migrate = FALSE; |
1304 NSchedulable* p = iParent; |
|
1305 if (!p) |
|
1306 g = (NThreadGroup*)this, p=g; |
|
1307 else |
|
1308 t = (NThreadBase*)this; |
|
1309 if (iParent && iParent!=this) |
|
1310 g = (NThreadGroup*)iParent; |
|
1311 TUint32 old_aff = p->iCpuAffinity; |
1292 TBool make_ready = FALSE; |
1312 TBool make_ready = FALSE; |
1293 TSubScheduler* ss0 = &SubScheduler(); |
1313 TSubScheduler* ss0 = &SubScheduler(); |
1294 TSubScheduler* ss = 0; |
1314 TSubScheduler* ss = 0; |
1295 __KTRACE_OPT(KNKERN,DEBUGPRINT("%T nSetCpu %08x->%08x, F:%d R:%02x PR:%02x",this,iParent->iCpuAffinity,aAffinity,iParent->iFreezeCpu,iReady,iParent->iReady)); |
1315 #ifdef KNKERN |
1296 if (i_NThread_Initial) |
1316 if (iParent) |
|
1317 { |
|
1318 __KTRACE_OPT(KNKERN,DEBUGPRINT("%T nSetCpu %08x->%08x, F:%d R:%02x PR:%02x",this,iParent->iCpuAffinity,aAffinity,iParent->iFreezeCpu,iReady,iParent->iReady)); |
|
1319 } |
|
1320 else |
|
1321 { |
|
1322 __KTRACE_OPT(KNKERN,DEBUGPRINT("%G nSetCpu %08x->%08x, F:%d R:%02x",this,iCpuAffinity,aAffinity,iFreezeCpu,iReady)); |
|
1323 } |
|
1324 #endif |
|
1325 if (t && t->i_NThread_Initial) |
1297 goto done; // can't change affinity of initial thread |
1326 goto done; // can't change affinity of initial thread |
1298 iParent->iCpuAffinity = aAffinity; // set new affinity, might not take effect yet |
1327 if (aAffinity == NTHREADBASE_CPU_AFFINITY_MASK) |
1299 if (!iParent->iReady) |
1328 { |
|
1329 p->iTransientCpu = 0; |
|
1330 } |
|
1331 else if ( (aAffinity & (KCpuAffinityPref|NTHREADBASE_CPU_AFFINITY_MASK)) == KCpuAffinityPref) |
|
1332 { |
|
1333 p->iTransientCpu = 0; |
|
1334 p->iPreferredCpu = TUint8((aAffinity & (EReadyCpuMask|EReadyCpuSticky)) | EReadyOffset); |
|
1335 } |
|
1336 else if ( (aAffinity & (KCpuAffinityTransient|KCpuAffinityPref|NTHREADBASE_CPU_AFFINITY_MASK)) == KCpuAffinityTransient) |
|
1337 { |
|
1338 p->iTransientCpu = TUint8(aAffinity & EReadyCpuMask) | EReadyOffset; |
|
1339 } |
|
1340 else |
|
1341 p->iCpuAffinity = NSchedulable::PreprocessCpuAffinity(aAffinity); // set new affinity, might not take effect yet |
|
1342 if (!p->iReady) |
1300 goto done; // thread/group not currently on a ready list so can just change affinity |
1343 goto done; // thread/group not currently on a ready list so can just change affinity |
1301 migrate = !CheckCpuAgainstAffinity(iParent->iReady & EReadyCpuMask, aAffinity); // TRUE if thread's current CPU is incompatible with the new affinity |
1344 |
1302 if (!migrate) |
1345 // Check if the thread needs to migrate or can stay where it is |
|
1346 if (!p->ShouldMigrate(p->iReady & EReadyCpuMask)) |
1303 goto done; // don't need to move thread, so just change affinity |
1347 goto done; // don't need to move thread, so just change affinity |
1304 ss = TheSubSchedulers + (iParent->iReady & EReadyCpuMask); |
1348 ss = TheSubSchedulers + (p->iReady & EReadyCpuMask); |
1305 ss->iReadyListLock.LockOnly(); |
1349 ss->iReadyListLock.LockOnly(); |
1306 if (iParent->iCurrent) |
1350 if (p->iCurrent) |
1307 { |
1351 { |
1308 iParent->iCpuChange = TRUE; // mark CPU change pending |
1352 p->iCpuChange = TRUE; // mark CPU change pending |
1309 if (ss == ss0) |
1353 if (ss == ss0) |
1310 RescheduleNeeded(); |
1354 RescheduleNeeded(); |
1311 else |
1355 else |
1312 // kick other CPU now so migration happens before acquisition of fast mutex |
1356 // kick other CPU now so migration happens before acquisition of fast mutex |
1313 send_resched_ipi_and_wait(iParent->iReady & EReadyCpuMask); |
1357 send_resched_ipi_and_wait(p->iReady & EReadyCpuMask); |
1314 } |
1358 } |
1315 else |
1359 else |
1316 { |
1360 { |
1317 // Note: Need to know here if any thread in group would return TRUE from CheckFastMutexDefer() |
1361 // Note: Need to know here if any thread in group would return TRUE from CheckFastMutexDefer() |
1318 // This is handled by the scheduler - when a thread belonging to a group is context switched |
1362 // This is handled by the scheduler - when a thread belonging to a group is context switched |
1319 // out while holding a fast mutex its iFastMutexDefer is set to 1 and the group's iFreezeCpu |
1363 // out while holding a fast mutex its iFastMutexDefer is set to 1 and the group's iFreezeCpu |
1320 // is incremented. |
1364 // is incremented. |
1321 if (iParent->iFreezeCpu || (iParent==this && CheckFastMutexDefer())) |
1365 if (p->iFreezeCpu || (iParent==this && t->CheckFastMutexDefer())) |
1322 iParent->iCpuChange = TRUE; // CPU frozen or fast mutex held so just mark deferred CPU migration |
1366 p->iCpuChange = TRUE; // CPU frozen or fast mutex held so just mark deferred CPU migration |
1323 else |
1367 else |
1324 { |
1368 { |
1325 ss->Remove(iParent); |
1369 ss->SSRemoveEntry(p); |
1326 iParent->iReady = 0; |
1370 p->iReady = 0; |
1327 make_ready = TRUE; |
1371 make_ready = TRUE; |
1328 } |
1372 } |
1329 } |
1373 } |
1330 ss->iReadyListLock.UnlockOnly(); |
1374 ss->iReadyListLock.UnlockOnly(); |
1331 if (make_ready) |
1375 if (make_ready) |
1332 iParent->ReadyT(0); |
1376 p->ReadyT(0); |
1333 done: |
1377 done: |
1334 RelSLock(); |
|
1335 return old_aff; |
1378 return old_aff; |
|
1379 } |
|
1380 |
|
1381 /** Force the current thread onto a particular CPU |
|
1382 |
|
1383 @pre Kernel must not be locked. |
|
1384 @pre Call in a thread context. |
|
1385 @pre Current thread must not be in a group |
|
1386 @pre Current thread must not hold a fast mutex |
|
1387 @pre Current thread must have an active CPU freeze |
|
1388 @pre Current thread must not be an initial thread |
|
1389 |
|
1390 @param The number of the CPU to which this thread should be moved |
|
1391 */ |
|
1392 void NKern::JumpTo(TInt aCpu) |
|
1393 { |
|
1394 // check aAffinity is valid |
|
1395 NThreadBase* t = NKern::CurrentThread(); |
|
1396 __KTRACE_OPT(KNKERN,DEBUGPRINT("%T NJumpTo %d", t, aCpu)); |
|
1397 if (NKern::HeldFastMutex()) |
|
1398 __crash(); |
|
1399 t->LAcqSLock(); |
|
1400 if (t->iParent!=t) |
|
1401 __crash(); |
|
1402 if (!t->iFreezeCpu) |
|
1403 __crash(); |
|
1404 if (t->i_NThread_Initial) |
|
1405 __crash(); |
|
1406 if (TUint(aCpu) >= (TUint)NKern::NumberOfCpus()) |
|
1407 __crash(); |
|
1408 TUint8 fc = (TUint8)(aCpu | NSchedulable::EReadyOffset); |
|
1409 if (t->iCurrent != fc) |
|
1410 { |
|
1411 t->iForcedCpu = fc; |
|
1412 t->iCpuChange = TRUE; |
|
1413 RescheduleNeeded(); |
|
1414 } |
|
1415 t->RelSLockU(); // reschedules and jumps to new CPU |
|
1416 } |
|
1417 |
|
1418 TBool NSchedulable::ShouldMigrate(TInt aCpu) |
|
1419 { |
|
1420 // Check if the thread's current CPU is compatible with the new affinity |
|
1421 TUint32 active = TheScheduler.iThreadAcceptCpus; |
|
1422 |
|
1423 // If it can't stay where it is, migrate |
|
1424 if (!CheckCpuAgainstAffinity(aCpu, iCpuAffinity, active)) |
|
1425 return TRUE; |
|
1426 |
|
1427 TInt cpu = iTransientCpu ? iTransientCpu : iPreferredCpu; |
|
1428 |
|
1429 // No preferred or transient CPU, so can stay where it is |
|
1430 if (!cpu) |
|
1431 return FALSE; |
|
1432 |
|
1433 // If thread isn't on preferred CPU but could be, migrate |
|
1434 cpu &= EReadyCpuMask; |
|
1435 if (cpu!=aCpu && CheckCpuAgainstAffinity(cpu, iCpuAffinity, active)) |
|
1436 return TRUE; |
|
1437 return FALSE; |
1336 } |
1438 } |
1337 |
1439 |
1338 |
1440 |
1339 /****************************************************************************** |
1441 /****************************************************************************** |
1340 * Thread wait state |
1442 * Thread wait state |
2404 pC->iParent = aGroup; |
2536 pC->iParent = aGroup; |
2405 aGroup->iNThreadList.AddHead(pC); |
2537 aGroup->iNThreadList.AddHead(pC); |
2406 if (!aGroup->iReady) |
2538 if (!aGroup->iReady) |
2407 { |
2539 { |
2408 aGroup->iPriority = pC->iPriority; |
2540 aGroup->iPriority = pC->iPriority; |
2409 ss.AddHead(aGroup); |
2541 ss.SSAddEntryHead(aGroup); |
2410 aGroup->iReady = TUint8(ss.iCpuNum | NSchedulable::EReadyOffset); |
2542 aGroup->iReady = TUint8(ss.iCpuNum | NSchedulable::EReadyOffset); |
2411 } |
2543 } |
2412 else if (pC->iPriority > aGroup->iPriority) |
2544 else if (pC->iPriority > aGroup->iPriority) |
2413 { |
2545 ss.SSChgEntryP(aGroup, pC->iPriority); |
2414 ss.ChangePriority(aGroup, pC->iPriority); |
|
2415 } |
|
2416 pC->iReady = NSchedulable::EReadyGroup; |
2546 pC->iReady = NSchedulable::EReadyGroup; |
2417 aGroup->iCurrent = aGroup->iReady; |
2547 aGroup->iCurrent = aGroup->iReady; |
2418 ss.iReadyListLock.UnlockOnly(); |
2548 ss.iReadyListLock.UnlockOnly(); |
2419 ++aGroup->iThreadCount; |
2549 ++aGroup->iThreadCount; |
|
2550 TUint64 now = NKern::Timestamp(); |
|
2551 aGroup->iLastStartTime.i64 = now; |
|
2552 if (++aGroup->iActiveState == 1) |
|
2553 aGroup->iLastActivationTime.i64 = now; |
2420 goto done; |
2554 goto done; |
2421 } |
2555 } |
2422 } |
2556 } |
2423 // this thread needs to migrate to another CPU |
2557 // this thread needs to migrate to another CPU |
2424 pC->iNewParent = aGroup; |
2558 pC->iNewParent = aGroup; |
2442 NKern::Unlock(); |
2576 NKern::Unlock(); |
2443 } |
2577 } |
2444 |
2578 |
2445 |
2579 |
2446 /****************************************************************************** |
2580 /****************************************************************************** |
|
2581 * Iterable Doubly Linked List |
|
2582 ******************************************************************************/ |
|
2583 TInt SIterDQIterator::Step(SIterDQLink*& aObj, TInt aMaxSteps) |
|
2584 { |
|
2585 if (aMaxSteps <= 0) |
|
2586 aMaxSteps = KMaxCpus + 3; |
|
2587 SIterDQLink* p = Next(); |
|
2588 SIterDQLink* q = p; |
|
2589 __NK_ASSERT_DEBUG(p!=0); |
|
2590 for(; p->IsIterator() && --aMaxSteps>0; p=p->Next()) |
|
2591 {} |
|
2592 if (p->IsObject()) |
|
2593 { |
|
2594 // found object |
|
2595 Deque(); |
|
2596 InsertAfter(p); |
|
2597 aObj = p; |
|
2598 return KErrNone; |
|
2599 } |
|
2600 if (p->IsAnchor()) |
|
2601 { |
|
2602 // reached end of list |
|
2603 if (p != q) |
|
2604 { |
|
2605 Deque(); |
|
2606 InsertBefore(p); // put at the end |
|
2607 } |
|
2608 aObj = 0; |
|
2609 return KErrEof; |
|
2610 } |
|
2611 // Maximum allowed number of other iterators skipped |
|
2612 Deque(); |
|
2613 InsertAfter(p); |
|
2614 aObj = 0; |
|
2615 return KErrGeneral; |
|
2616 } |
|
2617 |
|
2618 |
|
2619 /****************************************************************************** |
2447 * Priority Lists |
2620 * Priority Lists |
2448 ******************************************************************************/ |
2621 ******************************************************************************/ |
2449 |
2622 |
2450 #ifndef __PRI_LIST_MACHINE_CODED__ |
2623 #ifndef __PRI_LIST_MACHINE_CODED__ |
2451 /** Returns the priority of the highest priority item present on a priority list. |
2624 /** Returns the priority of the highest priority item present on a priority list. |
2452 |
2625 |
2453 @return The highest priority present or -1 if the list is empty. |
2626 @return The highest priority present or -1 if the list is empty. |
2454 */ |
2627 */ |
2455 EXPORT_C TInt TPriListBase::HighestPriority() |
2628 EXPORT_C TInt TPriListBase::HighestPriority() |
2456 { |
2629 { |
2457 // TUint64 present = MAKE_TUINT64(iPresent[1], iPresent[0]); |
|
2458 // return __e32_find_ms1_64(present); |
|
2459 return __e32_find_ms1_64(iPresent64); |
2630 return __e32_find_ms1_64(iPresent64); |
2460 } |
2631 } |
2461 |
2632 |
2462 |
2633 |
2463 /** Finds the highest priority item present on a priority list. |
2634 /** Finds the highest priority item present on a priority list. |
2562 |
2733 |
2563 /****************************************************************************** |
2734 /****************************************************************************** |
2564 * Generic IPIs |
2735 * Generic IPIs |
2565 ******************************************************************************/ |
2736 ******************************************************************************/ |
2566 |
2737 |
2567 TGenIPIList::TGenIPIList() |
|
2568 : iGenIPILock(TSpinLock::EOrderGenericIPIList) |
|
2569 { |
|
2570 } |
|
2571 |
|
2572 TGenIPIList GenIPIList; |
|
2573 |
|
2574 extern "C" { |
2738 extern "C" { |
2575 extern void send_generic_ipis(TUint32); |
2739 extern void send_generic_ipis(TUint32); |
2576 |
2740 |
2577 void generic_ipi_isr(TSubScheduler* aS) |
2741 void generic_ipi_isr(TSubScheduler* aS) |
2578 { |
2742 { |
|
2743 TScheduler& s = TheScheduler; |
2579 TGenericIPI* ipi = aS->iNextIPI; |
2744 TGenericIPI* ipi = aS->iNextIPI; |
2580 if (!ipi) |
2745 if (!ipi) |
2581 return; |
2746 return; |
2582 TUint32 m = aS->iCpuMask; |
2747 TUint32 m = aS->iCpuMask; |
2583 SDblQueLink* anchor = &GenIPIList.iA; |
2748 SDblQueLink* anchor = &s.iGenIPIList.iA; |
2584 while (ipi != anchor) |
2749 while (ipi != anchor) |
2585 { |
2750 { |
2586 __e32_atomic_and_acq32(&ipi->iCpusIn, ~m); |
2751 __e32_atomic_and_acq32(&ipi->iCpusIn, ~m); |
2587 (*ipi->iFunc)(ipi); |
2752 (*ipi->iFunc)(ipi); |
2588 TInt irq = GenIPIList.iGenIPILock.LockIrqSave(); |
2753 TInt irq = s.iGenIPILock.LockIrqSave(); |
2589 TGenericIPI* n = (TGenericIPI*)ipi->iNext; |
2754 TGenericIPI* n = (TGenericIPI*)ipi->iNext; |
2590 ipi->iCpusOut &= ~m; |
2755 ipi->iCpusOut &= ~m; |
2591 if (ipi->iCpusOut == 0) |
2756 if (ipi->iCpusOut == 0) |
2592 { |
2757 { |
2593 ipi->Deque(); |
2758 ipi->Deque(); |
2609 __KTRACE_OPT(KSCHED2,DEBUGPRINT("GenIPI F=%08x M=%08x", aFunc, aCpuMask)); |
2774 __KTRACE_OPT(KSCHED2,DEBUGPRINT("GenIPI F=%08x M=%08x", aFunc, aCpuMask)); |
2610 iFunc = aFunc; |
2775 iFunc = aFunc; |
2611 TScheduler& s = TheScheduler; |
2776 TScheduler& s = TheScheduler; |
2612 TInt i; |
2777 TInt i; |
2613 TUint32 ipis = 0; |
2778 TUint32 ipis = 0; |
2614 TInt irq = GenIPIList.iGenIPILock.LockIrqSave(); |
2779 TInt irq = s.iGenIPILock.LockIrqSave(); |
2615 if (aCpuMask & 0x80000000u) |
2780 if (aCpuMask & 0x80000000u) |
2616 { |
2781 { |
2617 if (aCpuMask==0xffffffffu) |
2782 if (aCpuMask==0xffffffffu) |
2618 aCpuMask = s.iActiveCpus2; |
2783 aCpuMask = s.iIpiAcceptCpus; |
2619 else if (aCpuMask==0xfffffffeu) |
2784 else if (aCpuMask==0xfffffffeu) |
2620 aCpuMask = s.iActiveCpus2 &~ SubScheduler().iCpuMask; |
2785 aCpuMask = s.iIpiAcceptCpus &~ SubScheduler().iCpuMask; |
2621 else |
2786 else |
2622 aCpuMask = 0; |
2787 aCpuMask = 0; |
2623 } |
2788 } |
2624 iCpusIn = aCpuMask; |
2789 iCpusIn = aCpuMask; |
2625 iCpusOut = aCpuMask; |
2790 iCpusOut = aCpuMask; |
2626 if (!aCpuMask) |
2791 if (!aCpuMask) |
2627 { |
2792 { |
2628 GenIPIList.iGenIPILock.UnlockIrqRestore(irq); |
2793 s.iGenIPILock.UnlockIrqRestore(irq); |
2629 iNext = 0; |
2794 iNext = 0; |
2630 return; |
2795 return; |
2631 } |
2796 } |
2632 GenIPIList.Add(this); |
2797 s.iGenIPIList.Add(this); |
2633 for (i=0; i<s.iNumCpus; ++i) |
2798 for (i=0; i<s.iNumCpus; ++i) |
2634 { |
2799 { |
2635 if (!(aCpuMask & (1<<i))) |
2800 if (!(aCpuMask & (1<<i))) |
2636 continue; |
2801 continue; |
2637 TSubScheduler& ss = *s.iSub[i]; |
2802 TSubScheduler& ss = *s.iSub[i]; |
2679 mb(); |
2844 mb(); |
2680 } |
2845 } |
2681 |
2846 |
2682 /** Stop all other CPUs |
2847 /** Stop all other CPUs |
2683 |
2848 |
2684 Call with kernel locked |
2849 Call with kernel unlocked, returns with kernel locked. |
2685 */ |
2850 Returns mask of CPUs halted plus current CPU. |
2686 void TStopIPI::StopCPUs() |
2851 */ |
2687 { |
2852 TUint32 TStopIPI::StopCPUs() |
|
2853 { |
|
2854 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"TStopIPI::StopCPUs()"); |
|
2855 TScheduler& s = TheScheduler; |
2688 iFlag = 0; |
2856 iFlag = 0; |
|
2857 NKern::ThreadEnterCS(); |
|
2858 |
|
2859 // Stop any cores powering up or down for now |
|
2860 // A core already on the way down will stop just before the transition to SHUTDOWN_FINAL |
|
2861 // A core already on the way up will carry on powering up |
|
2862 TInt irq = s.iGenIPILock.LockIrqSave(); |
|
2863 ++s.iCCDeferCount; // stops bits in iIpiAcceptCpus being cleared, but doesn't stop them being set |
|
2864 // but iIpiAcceptCpus | s.iCpusComingUp is constant |
|
2865 TUint32 act2 = s.iIpiAcceptCpus; // CPUs still accepting IPIs |
|
2866 TUint32 cu = s.iCpusComingUp; // CPUs powering up |
|
2867 s.iGenIPILock.UnlockIrqRestore(irq); |
|
2868 TUint32 cores = act2 | cu; |
|
2869 if (cu) |
|
2870 { |
|
2871 // wait for CPUs coming up to start accepting IPIs |
|
2872 while (cores & ~s.iIpiAcceptCpus) |
|
2873 { |
|
2874 __snooze(); // snooze until cores have come up |
|
2875 } |
|
2876 } |
|
2877 NKern::Lock(); |
2689 QueueAllOther(&Isr); // send IPIs to all other CPUs |
2878 QueueAllOther(&Isr); // send IPIs to all other CPUs |
2690 WaitEntry(); // wait for other CPUs to reach the ISR |
2879 WaitEntry(); // wait for other CPUs to reach the ISR |
2691 } |
2880 return cores; |
2692 |
2881 } |
|
2882 |
|
2883 |
|
2884 /** Release the stopped CPUs |
|
2885 |
|
2886 Call with kernel locked, returns with kernel unlocked. |
|
2887 */ |
2693 void TStopIPI::ReleaseCPUs() |
2888 void TStopIPI::ReleaseCPUs() |
2694 { |
2889 { |
2695 iFlag = 1; // allow other CPUs to proceed |
2890 __e32_atomic_store_rel32(&iFlag, 1); // allow other CPUs to proceed |
2696 WaitCompletion(); // wait for them to finish with this IPI |
2891 WaitCompletion(); // wait for them to finish with this IPI |
|
2892 NKern::Unlock(); |
|
2893 TheScheduler.CCUnDefer(); |
|
2894 NKern::ThreadLeaveCS(); |
2697 } |
2895 } |
2698 |
2896 |
2699 void TStopIPI::Isr(TGenericIPI* a) |
2897 void TStopIPI::Isr(TGenericIPI* a) |
2700 { |
2898 { |
2701 TStopIPI* s = (TStopIPI*)a; |
2899 TStopIPI* s = (TStopIPI*)a; |
2702 while (!s->iFlag) |
2900 while (!__e32_atomic_load_acq32(&s->iFlag)) |
2703 { |
2901 { |
2704 __chill(); |
2902 __chill(); |
2705 } |
2903 } |
2706 } |
2904 __e32_io_completion_barrier(); |
2707 |
2905 } |
2708 |
2906 |
|
2907 |
|
2908 /****************************************************************************** |
|
2909 * TCoreCycler - general method to execute something on all active cores |
|
2910 ******************************************************************************/ |
|
2911 TCoreCycler::TCoreCycler() |
|
2912 { |
|
2913 iCores = 0; |
|
2914 iG = 0; |
|
2915 } |
|
2916 |
|
2917 void TCoreCycler::Init() |
|
2918 { |
|
2919 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"TCoreCycler::Init()"); |
|
2920 TScheduler& s = TheScheduler; |
|
2921 NKern::ThreadEnterCS(); |
|
2922 iG = NKern::LeaveGroup(); |
|
2923 NThread* t = NKern::CurrentThread(); |
|
2924 if (t->iCoreCycling) |
|
2925 { |
|
2926 __crash(); |
|
2927 } |
|
2928 t->iCoreCycling = TRUE; |
|
2929 |
|
2930 // Stop any cores powering up or down for now |
|
2931 // A core already on the way down will stop just before the transition to SHUTDOWN_FINAL |
|
2932 // A core already on the way up will carry on powering up |
|
2933 TInt irq = s.iGenIPILock.LockIrqSave(); |
|
2934 ++s.iCCDeferCount; // stops bits in iIpiAcceptCpus being cleared, but doesn't stop them being set |
|
2935 // but iIpiAcceptCpus | s.iCpusComingUp is constant |
|
2936 TUint32 act2 = s.iIpiAcceptCpus; // CPUs still accepting IPIs |
|
2937 TUint32 cu = s.iCpusComingUp; // CPUs powering up |
|
2938 TUint32 gd = s.iCpusGoingDown; // CPUs no longer accepting IPIs on the way down |
|
2939 s.iGenIPILock.UnlockIrqRestore(irq); |
|
2940 if (gd) |
|
2941 { |
|
2942 // wait for CPUs going down to reach INACTIVE state |
|
2943 TUint32 remain = gd; |
|
2944 FOREVER |
|
2945 { |
|
2946 TInt i; |
|
2947 for (i=0; i<KMaxCpus; ++i) |
|
2948 { |
|
2949 if (remain & (1u<<i)) |
|
2950 { |
|
2951 // platform specific function returns TRUE when core has detached from SMP cluster |
|
2952 if (s.iSub[i]->Detached()) |
|
2953 remain &= ~(1u<<i); // core is now down |
|
2954 } |
|
2955 } |
|
2956 if (!remain) |
|
2957 break; // all done |
|
2958 else |
|
2959 { |
|
2960 __snooze(); // snooze until cores have gone down |
|
2961 } |
|
2962 } |
|
2963 } |
|
2964 iCores = act2 | cu; |
|
2965 if (cu) |
|
2966 { |
|
2967 // wait for CPUs coming up to start accepting IPIs |
|
2968 while (iCores & ~s.iIpiAcceptCpus) |
|
2969 { |
|
2970 __snooze(); // snooze until cores have come up |
|
2971 } |
|
2972 } |
|
2973 iFrz = NKern::FreezeCpu(); |
|
2974 if (iFrz) |
|
2975 __crash(); // already frozen so won't be able to migrate :-( |
|
2976 iInitialCpu = NKern::CurrentCpu(); |
|
2977 iCurrentCpu = iInitialCpu; |
|
2978 iRemain = iCores; |
|
2979 } |
|
2980 |
|
2981 TInt TCoreCycler::Next() |
|
2982 { |
|
2983 NThread* t = NKern::CurrentThread(); |
|
2984 if (iCores == 0) |
|
2985 { |
|
2986 Init(); |
|
2987 return KErrNone; |
|
2988 } |
|
2989 if (NKern::CurrentCpu() != iCurrentCpu) |
|
2990 __crash(); |
|
2991 iRemain &= ~(1u<<iCurrentCpu); |
|
2992 TInt nextCpu = iRemain ? __e32_find_ms1_32(iRemain) : iInitialCpu; |
|
2993 if (nextCpu != iCurrentCpu) |
|
2994 { |
|
2995 NKern::JumpTo(nextCpu); |
|
2996 iCurrentCpu = nextCpu; |
|
2997 if (NKern::CurrentCpu() != iCurrentCpu) |
|
2998 __crash(); |
|
2999 } |
|
3000 if (iRemain) |
|
3001 { |
|
3002 return KErrNone; |
|
3003 } |
|
3004 NKern::EndFreezeCpu(iFrz); |
|
3005 iCores = 0; |
|
3006 TScheduler& s = TheScheduler; |
|
3007 s.CCUnDefer(); |
|
3008 t->iCoreCycling = FALSE; |
|
3009 if (iG) |
|
3010 NKern::JoinGroup(iG); |
|
3011 NKern::ThreadLeaveCS(); |
|
3012 return KErrEof; |
|
3013 } |