24 #include "nk_priv.h" |
24 #include "nk_priv.h" |
25 #include <nk_irq.h> |
25 #include <nk_irq.h> |
26 |
26 |
27 TSpinLock NEventHandler::TiedLock(TSpinLock::EOrderEventHandlerTied); |
27 TSpinLock NEventHandler::TiedLock(TSpinLock::EOrderEventHandlerTied); |
28 |
28 |
|
29 |
|
30 const TUint8 KClassFromPriority[KNumPriorities] = |
|
31 { |
|
32 0, 0, 0, 0, 0, 0, 0, 0, // priorities 0-7 |
|
33 0, 0, 0, 0, 1, 1, 1, 1, // priorities 8-15 |
|
34 2, 2, 2, 2, 2, 2, 2, 2, // priorities 16-23 |
|
35 2, 2, 2, 3, 3, 3, 3, 3, // priorities 24-31 |
|
36 3, 3, 3, 3, 3, 3, 3, 3, // priorities 32-39 |
|
37 3, 3, 3, 3, 3, 3, 3, 3, // priorities 40-47 |
|
38 3, 3, 3, 3, 3, 3, 3, 3, // priorities 48-55 |
|
39 3, 3, 3, 3, 3, 3, 3, 3 // priorities 56-63 |
|
40 }; |
|
41 |
|
42 |
29 /****************************************************************************** |
43 /****************************************************************************** |
30 * TScheduler |
44 * TScheduler |
31 ******************************************************************************/ |
45 ******************************************************************************/ |
32 |
46 |
33 // TScheduler resides in .bss so other fields are zero-initialised |
47 // TScheduler resides in .bss so other fields are zero-initialised |
34 TScheduler::TScheduler() |
48 TScheduler::TScheduler() |
35 : iActiveCpus1(1), // only boot CPU for now |
49 : iThreadAcceptCpus(1), // only boot CPU for now |
36 iActiveCpus2(1), // only boot CPU for now |
50 iIpiAcceptCpus(1), // only boot CPU for now |
|
51 iGenIPILock(TSpinLock::EOrderGenericIPIList), |
|
52 iIdleBalanceLock(TSpinLock::EOrderEnumerate), |
37 iIdleSpinLock(TSpinLock::EOrderIdleDFCList), |
53 iIdleSpinLock(TSpinLock::EOrderIdleDFCList), |
38 iCpusNotIdle(1) // only boot CPU for now |
54 iCpusNotIdle(1), // only boot CPU for now |
|
55 iEnumerateLock(TSpinLock::EOrderEnumerate), |
|
56 iBalanceListLock(TSpinLock::EOrderReadyList), |
|
57 iBalanceTimer(&BalanceTimerExpired, this, 1), |
|
58 iCCSyncIDFC(&CCSyncDone, 0), |
|
59 iCCReactivateDfc(&CCReactivateDfcFn, this, 3), |
|
60 iCCRequestLevel(1), // only boot CPU for now |
|
61 iCCRequestDfc(&CCRequestDfcFn, this, 2), |
|
62 iCCPowerDownDfc(&CCIndirectPowerDown, this, 0), |
|
63 iCCIpiReactIDFC(&CCIpiReactivateFn, this) |
39 { |
64 { |
40 TInt i; |
65 TInt i; |
41 for (i=0; i<KMaxCpus; ++i) |
66 for (i=0; i<KMaxCpus; ++i) |
42 { |
67 { |
43 TSubScheduler* s = TheSubSchedulers + i; |
68 TSubScheduler* s = TheSubSchedulers + i; |
44 iSub[i] = s; |
69 iSub[i] = s; |
45 s->iScheduler = this; |
70 s->iScheduler = this; |
46 s->iCpuNum = TUint32(i); |
71 s->iCpuNum = TUint32(i); |
47 s->iCpuMask = 1u<<i; |
72 s->iCpuMask = 1u<<i; |
48 } |
73 s->iLbCounter = TUint8(NSchedulable::ELbState_PerCpu + i); |
|
74 } |
|
75 iLbCounter = (TUint8)NSchedulable::ELbState_Global; |
|
76 iNeedBal = 1; // stop anyone trying to kick rebalancer before it has been created |
49 } |
77 } |
50 |
78 |
51 |
79 |
52 /** Return a pointer to the scheduler |
80 /** Return a pointer to the scheduler |
53 Intended for use by the crash debugger, not for general device driver use. |
81 Intended for use by the crash debugger, not for general device driver use. |
65 * TSubScheduler |
93 * TSubScheduler |
66 ******************************************************************************/ |
94 ******************************************************************************/ |
67 |
95 |
68 // TSubScheduler resides in .bss so other fields are zero-initialised |
96 // TSubScheduler resides in .bss so other fields are zero-initialised |
69 TSubScheduler::TSubScheduler() |
97 TSubScheduler::TSubScheduler() |
70 : TPriListBase(KNumPriorities), |
98 : iExIDfcLock(TSpinLock::EOrderExIDfcQ), |
71 iExIDfcLock(TSpinLock::EOrderExIDfcQ), |
|
72 iReadyListLock(TSpinLock::EOrderReadyList), |
99 iReadyListLock(TSpinLock::EOrderReadyList), |
73 iKernLockCount(1), |
100 iKernLockCount(1), |
74 iEventHandlerLock(TSpinLock::EOrderEventHandlerList) |
101 iEventHandlerLock(TSpinLock::EOrderEventHandlerList) |
75 { |
102 { |
76 } |
103 } |
77 |
104 |
|
105 void TSubScheduler::SSAddEntry(NSchedulable* aEntry) |
|
106 { |
|
107 if (aEntry->iParent!=aEntry || !((NThreadBase*)aEntry)->i_NThread_Initial) |
|
108 { |
|
109 TInt c = KClassFromPriority[aEntry->iPriority]; |
|
110 ++iPriClassThreadCount[c]; |
|
111 ++iRdyThreadCount; |
|
112 } |
|
113 iSSList.Add(aEntry); |
|
114 } |
|
115 |
|
116 void TSubScheduler::SSAddEntryHead(NSchedulable* aEntry) |
|
117 { |
|
118 if (aEntry->iParent!=aEntry || !((NThreadBase*)aEntry)->i_NThread_Initial) |
|
119 { |
|
120 TInt c = KClassFromPriority[aEntry->iPriority]; |
|
121 ++iPriClassThreadCount[c]; |
|
122 ++iRdyThreadCount; |
|
123 } |
|
124 iSSList.AddHead(aEntry); |
|
125 } |
|
126 |
|
127 void TSubScheduler::SSRemoveEntry(NSchedulable* aEntry) |
|
128 { |
|
129 if (aEntry->iParent!=aEntry || !((NThreadBase*)aEntry)->i_NThread_Initial) |
|
130 { |
|
131 TInt c = KClassFromPriority[aEntry->iPriority]; |
|
132 --iPriClassThreadCount[c]; |
|
133 --iRdyThreadCount; |
|
134 } |
|
135 iSSList.Remove(aEntry); |
|
136 } |
|
137 |
|
138 void TSubScheduler::SSChgEntryP(NSchedulable* aEntry, TInt aNewPriority) |
|
139 { |
|
140 if (aEntry->iParent!=aEntry || !((NThreadBase*)aEntry)->i_NThread_Initial) |
|
141 { |
|
142 TInt c0 = KClassFromPriority[aEntry->iPriority]; |
|
143 TInt c1 = KClassFromPriority[aNewPriority]; |
|
144 if (c0 != c1) |
|
145 { |
|
146 --iPriClassThreadCount[c0]; |
|
147 ++iPriClassThreadCount[c1]; |
|
148 } |
|
149 } |
|
150 iSSList.ChangePriority(aEntry, aNewPriority); |
|
151 } |
|
152 |
78 |
153 |
79 /****************************************************************************** |
154 /****************************************************************************** |
80 * NSchedulable |
155 * NSchedulable |
81 ******************************************************************************/ |
156 ******************************************************************************/ |
|
157 TUint32 NSchedulable::PreprocessCpuAffinity(TUint32 aAffinity) |
|
158 { |
|
159 if (!(aAffinity & NTHREADBASE_CPU_AFFINITY_MASK)) |
|
160 return aAffinity; |
|
161 TUint32 x = aAffinity & ~NTHREADBASE_CPU_AFFINITY_MASK; |
|
162 if (x & (x-1)) |
|
163 return aAffinity; |
|
164 return __e32_find_ls1_32(x); |
|
165 } |
|
166 |
82 void NSchedulable::AcqSLock() |
167 void NSchedulable::AcqSLock() |
83 { |
168 { |
84 iSSpinLock.LockOnly(); |
169 iSSpinLock.LockOnly(); |
85 if (iParent!=this && iParent) |
170 if (iParent!=this && iParent) |
86 iParent->AcqSLock(); |
171 iParent->AcqSLock(); |
255 { |
340 { |
256 __chill(); |
341 __chill(); |
257 } |
342 } |
258 } |
343 } |
259 |
344 |
|
345 |
|
346 /** Return the total CPU time so far used by the specified thread. |
|
347 |
|
348 @return The total CPU time in units of 1/NKern::CpuTimeMeasFreq(). |
|
349 */ |
|
350 EXPORT_C TUint64 NKern::ThreadCpuTime(NThread* aThread) |
|
351 { |
|
352 NSchedulable::SCpuStats stats; |
|
353 NKern::Lock(); |
|
354 aThread->GetCpuStats(NSchedulable::E_RunTime, stats); |
|
355 NKern::Unlock(); |
|
356 return stats.iRunTime; |
|
357 } |
|
358 |
|
359 void NSchedulable::GetCpuStats(TUint aMask, NSchedulable::SCpuStats& aOut) |
|
360 { |
|
361 AcqSLock(); |
|
362 GetCpuStatsT(aMask, aOut); |
|
363 RelSLock(); |
|
364 } |
|
365 |
|
366 void NSchedulable::GetCpuStatsT(TUint aMask, NSchedulable::SCpuStats& aOut) |
|
367 { |
|
368 TSubScheduler* ss = 0; |
|
369 NThread* t = 0; |
|
370 TBool initial = FALSE; |
|
371 if (!IsGroup()) |
|
372 t = (NThread*)this; |
|
373 if (t && t->i_NThread_Initial) |
|
374 ss = &TheSubSchedulers[iLastCpu], initial = TRUE; |
|
375 else if (iReady) |
|
376 { |
|
377 if (IsGroup()) |
|
378 ss = &TheSubSchedulers[iReady & NSchedulable::EReadyCpuMask]; |
|
379 else if (iParent->iReady) |
|
380 ss = &TheSubSchedulers[iParent->iReady & NSchedulable::EReadyCpuMask]; |
|
381 } |
|
382 if (ss) |
|
383 ss->iReadyListLock.LockOnly(); |
|
384 TUint64 now = NKern::Timestamp(); |
|
385 if (aMask & (E_RunTime|E_RunTimeDelta)) |
|
386 { |
|
387 aOut.iRunTime = iTotalCpuTime.i64; |
|
388 if (iCurrent || (initial && !ss->iCurrentThread)) |
|
389 aOut.iRunTime += (now - ss->iLastTimestamp.i64); |
|
390 if (aMask & E_RunTimeDelta) |
|
391 { |
|
392 aOut.iRunTimeDelta = aOut.iRunTime - iSavedCpuTime.i64; |
|
393 iSavedCpuTime.i64 = aOut.iRunTime; |
|
394 } |
|
395 } |
|
396 if (aMask & (E_ActiveTime|E_ActiveTimeDelta)) |
|
397 { |
|
398 aOut.iActiveTime = iTotalActiveTime.i64; |
|
399 if (iActiveState) |
|
400 aOut.iActiveTime += (now - iLastActivationTime.i64); |
|
401 if (aMask & E_ActiveTimeDelta) |
|
402 { |
|
403 aOut.iActiveTimeDelta = aOut.iActiveTime - iSavedActiveTime.i64; |
|
404 iSavedActiveTime.i64 = aOut.iActiveTime; |
|
405 } |
|
406 } |
|
407 if (aMask & E_LastRunTime) |
|
408 { |
|
409 if (iCurrent) |
|
410 aOut.iLastRunTime = 0; |
|
411 else |
|
412 aOut.iLastRunTime = now - iLastRunTime.i64; |
|
413 } |
|
414 if (aMask & E_LastActiveTime) |
|
415 { |
|
416 if (iActiveState) |
|
417 aOut.iLastActiveTime = 0; |
|
418 else |
|
419 aOut.iLastActiveTime = now - iLastRunTime.i64; |
|
420 } |
|
421 if (ss) |
|
422 ss->iReadyListLock.UnlockOnly(); |
|
423 } |
|
424 |
|
425 |
260 /****************************************************************************** |
426 /****************************************************************************** |
261 * NThreadGroup |
427 * NThreadGroup |
262 ******************************************************************************/ |
428 ******************************************************************************/ |
263 |
429 |
264 |
430 |
279 void NSchedulable::ReadyT(TUint aMode) |
445 void NSchedulable::ReadyT(TUint aMode) |
280 { |
446 { |
281 CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NSchedulable::ReadyT"); |
447 CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NSchedulable::ReadyT"); |
282 __KTRACE_OPT(KNKERN,DEBUGPRINT("%T nReadyT(%x)",this,aMode)); |
448 __KTRACE_OPT(KNKERN,DEBUGPRINT("%T nReadyT(%x)",this,aMode)); |
283 NThreadBase* t = (NThreadBase*)this; |
449 NThreadBase* t = (NThreadBase*)this; |
|
450 if (iParent && !iActiveState) |
|
451 { |
|
452 iActiveState=1; |
|
453 iLastActivationTime.i64 = NKern::Timestamp(); |
|
454 if (iParent!=this && ++iParent->iActiveState==1) |
|
455 iParent->iLastActivationTime.i64 = iLastActivationTime.i64; |
|
456 } |
284 #ifdef _DEBUG |
457 #ifdef _DEBUG |
285 if (!iParent) |
458 if (!iParent) |
286 t = (NThreadBase*)0xface0fff; |
459 t = (NThreadBase*)0xface0fff; |
287 #endif |
460 #endif |
288 __NK_ASSERT_DEBUG(!iReady && (!iParent || (!t->iWaitState.iWtC.iWtStFlags && !t->iPauseCount && !t->iSuspended))); |
461 __NK_ASSERT_DEBUG(!iReady && (!iParent || (!t->iWaitState.iWtC.iWtStFlags && !t->iSuspended))); |
289 TSubScheduler& ss0 = SubScheduler(); |
462 TSubScheduler& ss0 = SubScheduler(); |
|
463 TScheduler& s = TheScheduler; |
|
464 TBool reactivate = FALSE; |
|
465 TBool no_ipi = FALSE; |
290 NSchedulable* g = this; |
466 NSchedulable* g = this; |
291 if (iParent != this && iParent) |
467 if (iParent != this && iParent) |
292 { |
468 { |
293 NThreadGroup* tg = (NThreadGroup*)iParent; |
469 NThreadGroup* tg = (NThreadGroup*)iParent; |
294 iReady = EReadyGroup; |
470 iReady = EReadyGroup; |
299 TInt gp = tg->iPriority; |
475 TInt gp = tg->iPriority; |
300 TSubScheduler& ss = TheSubSchedulers[tg->iReady & EReadyCpuMask]; |
476 TSubScheduler& ss = TheSubSchedulers[tg->iReady & EReadyCpuMask]; |
301 ss.iReadyListLock.LockOnly(); |
477 ss.iReadyListLock.LockOnly(); |
302 TInt hp = ss.HighestPriority(); |
478 TInt hp = ss.HighestPriority(); |
303 if (iPriority>gp) |
479 if (iPriority>gp) |
304 ss.ChangePriority(tg, iPriority); |
480 { |
|
481 ss.SSChgEntryP(tg, iPriority); |
|
482 } |
305 if (iPriority>hp || (iPriority==hp && ss.iCurrentThread && ss.iCurrentThread->iTime==0)) |
483 if (iPriority>hp || (iPriority==hp && ss.iCurrentThread && ss.iCurrentThread->iTime==0)) |
306 { |
484 { |
307 if (&ss == &ss0) |
485 if (&ss == &ss0) |
308 RescheduleNeeded(); // reschedule on this processor |
486 RescheduleNeeded(); // reschedule on this processor |
309 else |
487 else |
310 ss0.iReschedIPIs |= ss.iCpuMask; // will kick the other CPU when this CPU reenables preemption |
488 ss0.iReschedIPIs |= ss.iCpuMask; // will kick the other CPU when this CPU reenables preemption |
311 } |
489 } |
312 if ((aMode & ENewTimeslice) && t->iTime==0 && (iNext!=this || ss.iQueue[iPriority])) |
490 if ((aMode & ENewTimeslice) && t->iTime==0 && (iNext!=this || ss.EntryAtPriority(iPriority)) ) |
313 t->iTime = t->iTimeslice; |
491 t->iTime = t->iTimeslice; |
314 ss.iReadyListLock.UnlockOnly(); |
492 ss.iReadyListLock.UnlockOnly(); |
|
493 |
|
494 ss0.iMadeReadyCounter++; |
315 return; |
495 return; |
316 } |
496 } |
317 tg->iNThreadList.Add(this); |
497 tg->iNThreadList.Add(this); |
318 tg->iPriority = iPriority; // first in group |
498 tg->iPriority = iPriority; // first in group |
319 g = tg; // fall through to add group to subscheduler |
499 g = tg; // fall through to add group to subscheduler |
320 } |
500 } |
|
501 TInt priClass = -1; |
321 TInt cpu = -1; |
502 TInt cpu = -1; |
|
503 TUint32 active = TheScheduler.iThreadAcceptCpus; |
|
504 if (g!=t || !t->i_NThread_Initial) |
|
505 priClass = KClassFromPriority[g->iPriority]; |
|
506 if (g->iForcedCpu) |
|
507 { |
|
508 cpu = iForcedCpu & EReadyCpuMask; // handles core cycling case (No.1 below) |
|
509 if (active & (1u<<cpu)) |
|
510 goto cpu_ok; |
|
511 else |
|
512 goto single_cpu_reactivate; |
|
513 } |
322 if (aMode & EUnPause) |
514 if (aMode & EUnPause) |
323 { |
515 { |
324 cpu = (g->iEventState & EThreadCpuMask)>>EThreadCpuShift; |
516 cpu = (g->iEventState & EThreadCpuMask)>>EThreadCpuShift; |
325 if (CheckCpuAgainstAffinity(cpu, g->iCpuAffinity)) |
517 if (CheckCpuAgainstAffinity(cpu, g->iCpuAffinity)) |
326 goto cpu_ok; |
518 goto cpu_ok; |
327 } |
519 cpu = -1; |
328 else if (g->iFreezeCpu) |
520 } |
|
521 if (g->iFreezeCpu) |
329 { |
522 { |
330 cpu = g->iLastCpu; |
523 cpu = g->iLastCpu; |
331 if (!CheckCpuAgainstAffinity(cpu, g->iCpuAffinity)) |
524 goto cpu_ok; |
332 g->iCpuChange = TRUE; |
525 } |
333 } |
526 if (!(g->iCpuAffinity & NTHREADBASE_CPU_AFFINITY_MASK)) |
334 else if (!(g->iCpuAffinity & NTHREADBASE_CPU_AFFINITY_MASK)) |
527 { |
335 cpu = g->iCpuAffinity; |
528 cpu = g->iCpuAffinity; |
336 else if ((aMode & EPreferSameCpu) && (g->iCpuAffinity & ss0.iCpuMask)) |
529 if (!(active & (1u<<cpu))) |
|
530 goto single_cpu_reactivate; |
|
531 goto cpu_ok; |
|
532 } |
|
533 if ((aMode & EPreferSameCpu) && CheckCpuAgainstAffinity(ss0.iCpuNum, g->iCpuAffinity, active)) |
337 cpu = ss0.iCpuNum; |
534 cpu = ss0.iCpuNum; |
|
535 else if (iTransientCpu && CheckCpuAgainstAffinity(iTransientCpu & EReadyCpuMask, g->iCpuAffinity)) |
|
536 cpu = iTransientCpu & EReadyCpuMask; |
|
537 else if (iPreferredCpu && CheckCpuAgainstAffinity(iPreferredCpu & EReadyCpuMask, g->iCpuAffinity, active)) |
|
538 cpu = iPreferredCpu & EReadyCpuMask; |
338 if (cpu < 0) |
539 if (cpu < 0) |
339 { |
540 { |
340 // pick a cpu |
541 // pick a cpu |
341 TScheduler& s = TheScheduler; |
542 TUint32 m = g->iCpuAffinity & active; |
342 TUint32 m = g->iCpuAffinity & s.iActiveCpus1; |
543 TInt lastCpu = g->iLastCpu; |
343 TInt i; |
544 TInt i = lastCpu; |
344 TInt lowest_p = KMaxTInt; |
545 TInt lcp = KMaxTInt; |
345 for (i=0; i<s.iNumCpus; ++i) |
546 TInt lco = KMaxTInt; |
346 { |
547 TInt cpunp = -1; |
347 TSubScheduler& ss = *s.iSub[i]; |
548 TInt idle_cpu = -1; |
348 if (!(m & ss.iCpuMask)) |
549 do { |
349 continue; |
550 if (m & (1u<<i)) |
350 TInt hp = ss.HighestPriority(); |
551 { |
351 if (hp < lowest_p) |
552 TSubScheduler& ss = *s.iSub[i]; |
352 { |
553 TInt nInC = ss.iPriClassThreadCount[priClass]; |
353 lowest_p = hp; |
554 if (nInC < lco) |
354 cpu = i; |
555 lco=nInC, cpunp=i; |
355 continue; |
556 TInt hp = ss.HighestPriority(); |
356 } |
557 if (idle_cpu<0 && hp<=0) |
357 if (hp > lowest_p) |
558 idle_cpu = i; |
358 continue; |
559 if (hp < iPriority) |
359 if (cpu>=0 && g->iLastCpu!=i) |
560 { |
360 continue; |
561 if (i == lastCpu) |
361 lowest_p = hp; |
562 { |
362 cpu = i; |
563 cpu = i; |
363 } |
564 if (hp <= 0) |
|
565 break; |
|
566 lcp = -1; |
|
567 } |
|
568 if (nInC < lcp) |
|
569 lcp=nInC, cpu=i; |
|
570 } |
|
571 } |
|
572 if (++i == s.iNumCpus) |
|
573 i = 0; |
|
574 } while (i != lastCpu); |
|
575 if (idle_cpu>=0 && cpu!=idle_cpu) |
|
576 cpu = idle_cpu; |
|
577 else if (cpu<0) |
|
578 cpu = cpunp; |
|
579 } |
|
580 if (cpu<0) |
|
581 { |
|
582 single_cpu_reactivate: |
|
583 /* CORE_CONTROL |
|
584 Might have no CPU at this point due to all CPUs specified by |
|
585 iCpuAffinity being off or in the process of shutting down. |
|
586 There are three possibilities: |
|
587 1. This thread is 'core cycling'. In that case it will be |
|
588 allowed to move to a 'shutting down' CPU. The CPU will |
|
589 not be permitted to shut down entirely until all core cycling |
|
590 has completed. This is already handled above. |
|
591 2. There are one or more CPUs which this thread could run on which |
|
592 are shutting down. In that case, pick one, abort the shutdown |
|
593 process and put this thread on it. |
|
594 3. All CPUs which this thread can run on are off. In that case, |
|
595 assign the thread to one of them and initiate power up of that core. |
|
596 */ |
|
597 TUint32 affm = AffinityToMask(g->iCpuAffinity); |
|
598 TInt irq = s.iGenIPILock.LockIrqSave(); |
|
599 if (cpu < 0) |
|
600 { |
|
601 if (affm & s.iCCReactivateCpus) |
|
602 cpu = __e32_find_ls1_32(affm & s.iCCReactivateCpus); |
|
603 else if (affm & s.iIpiAcceptCpus) |
|
604 cpu = __e32_find_ls1_32(affm & s.iIpiAcceptCpus); |
|
605 else |
|
606 cpu = __e32_find_ls1_32(affm), no_ipi = TRUE; |
|
607 } |
|
608 TUint32 cm = 1u<<cpu; |
|
609 if (!((s.iCCReactivateCpus|s.iThreadAcceptCpus) & cm)) |
|
610 { |
|
611 s.iCCReactivateCpus |= (1u<<cpu); |
|
612 reactivate = TRUE; |
|
613 } |
|
614 s.iGenIPILock.UnlockIrqRestore(irq); |
364 } |
615 } |
365 cpu_ok: |
616 cpu_ok: |
366 __NK_ASSERT_ALWAYS(cpu>=0); |
617 __NK_ASSERT_ALWAYS(cpu>=0); |
|
618 if (g->iFreezeCpu && !CheckCpuAgainstAffinity(cpu, g->iCpuAffinity)) |
|
619 g->iCpuChange = TRUE; |
367 if (g->TiedEventReadyInterlock(cpu)) |
620 if (g->TiedEventReadyInterlock(cpu)) |
368 { |
621 { |
369 __KTRACE_OPT(KSCHED2,DEBUGPRINT("ReadyT->CPU %dD",cpu)); |
622 __KTRACE_OPT(KSCHED2,DEBUGPRINT("ReadyT->CPU %dD",cpu)); |
370 ++g->iPauseCount; |
623 ++g->iPauseCount; |
371 // ((TDfc*)g->i_IDfcMem)->Add(); |
624 } |
372 return; |
625 else |
373 } |
626 { |
374 __KTRACE_OPT(KSCHED2,DEBUGPRINT("ReadyT->CPU %d",cpu)); |
627 __KTRACE_OPT(KSCHED2,DEBUGPRINT("ReadyT->CPU %d",cpu)); |
375 TSubScheduler& ss = TheSubSchedulers[cpu]; |
628 TSubScheduler& ss = TheSubSchedulers[cpu]; |
376 ss.iReadyListLock.LockOnly(); |
629 ss.iReadyListLock.LockOnly(); |
377 TInt hp = ss.HighestPriority(); |
630 TInt hp = ss.HighestPriority(); |
378 if (g->iPriority>hp || (g->iPriority==hp && ss.iCurrentThread && ss.iCurrentThread->iTime==0)) |
631 if (g->iPriority>hp || (g->iPriority==hp && ss.iCurrentThread && ss.iCurrentThread->iTime==0)) |
379 { |
632 { |
380 if (&ss == &ss0) |
633 if (&ss == &ss0) |
381 RescheduleNeeded(); // reschedule on this processor |
634 RescheduleNeeded(); // reschedule on this processor |
382 else |
635 else if (!no_ipi) |
383 ss0.iReschedIPIs |= ss.iCpuMask; // will kick the other CPU when this CPU reenables preemption |
636 ss0.iReschedIPIs |= ss.iCpuMask; // will kick the other CPU when this CPU reenables preemption |
384 } |
637 } |
385 ss.Add(g); |
638 ss.SSAddEntry(g); |
386 g->iReady = TUint8(cpu | EReadyOffset); |
639 g->iReady = TUint8(cpu | EReadyOffset); |
387 if ((aMode & ENewTimeslice) && iParent && t->iTime==0 && g->iNext!=g) |
640 if ((aMode & ENewTimeslice) && iParent && t->iTime==0 && g->iNext!=g) |
388 t->iTime = t->iTimeslice; |
641 t->iTime = t->iTimeslice; |
389 ss.iReadyListLock.UnlockOnly(); |
642 if (!g->iLbLink.iNext && !(g->iParent && t->i_NThread_Initial)) |
|
643 { |
|
644 ss.iLbQ.Add(&g->iLbLink); |
|
645 g->iLbState = ss.iLbCounter; |
|
646 if (!s.iNeedBal && (!g->iParent || !(t->iRebalanceAttr & 1))) |
|
647 { |
|
648 s.iNeedBal = 1; |
|
649 reactivate = TRUE; |
|
650 } |
|
651 } |
|
652 if (g->iForcedCpu == g->iReady) |
|
653 { |
|
654 g->iLastCpu = (TUint8)cpu; |
|
655 g->iForcedCpu = 0; // iForcedCpu has done its job - iFreezeCpu will keep the thread on the right CPU |
|
656 } |
|
657 ss.iReadyListLock.UnlockOnly(); |
|
658 ss0.iMadeReadyCounter++; |
|
659 } |
|
660 if (reactivate) |
|
661 s.iCCReactivateDfc.Add(); |
390 } |
662 } |
391 |
663 |
392 |
664 |
393 NThread* TSubScheduler::SelectNextThread() |
665 NThread* TSubScheduler::SelectNextThread() |
394 { |
666 { |
446 __KTRACE_OPT(KSCHED2,DEBUGPRINT("Rschd<-%T WS: %02x %02x (%08x) P:%02x S:%1x", ot, |
719 __KTRACE_OPT(KSCHED2,DEBUGPRINT("Rschd<-%T WS: %02x %02x (%08x) P:%02x S:%1x", ot, |
447 ot->iWaitState.iWtC.iWtStFlags, ot->iWaitState.iWtC.iWtObjType, ot->iWaitState.iWtC.iWtObj, ot->iPauseCount, ot->iSuspended)); |
720 ot->iWaitState.iWtC.iWtStFlags, ot->iWaitState.iWtC.iWtObjType, ot->iWaitState.iWtC.iWtObj, ot->iPauseCount, ot->iSuspended)); |
448 TInt wtst = ot->iWaitState.DoWait(); |
721 TInt wtst = ot->iWaitState.DoWait(); |
449 if (wtst>=0 && wtst!=NThread::EWaitFastMutex) |
722 if (wtst>=0 && wtst!=NThread::EWaitFastMutex) |
450 ot->iTime = ot->iTimeslice; |
723 ot->iTime = ot->iTimeslice; |
|
724 if (wtst==KErrDied || ot->iSuspended || (!(ot->iWaitState.iWtC.iWtStFlags & NThreadWaitState::EWtStObstructed) && wtst>=0) ) |
|
725 { |
|
726 ot->iActiveState = 0; |
|
727 ot->iParent->iTransientCpu = 0; |
|
728 if (ot->iParent != ot) |
|
729 --ot->iParent->iActiveState; |
|
730 } |
451 ot->UnReadyT(); |
731 ot->UnReadyT(); |
452 if (ot->iNewParent) |
732 if (ot->iNewParent) |
453 { |
733 { |
454 ot->iParent = ot->iNewParent, ++((NThreadGroup*)ot->iParent)->iThreadCount; |
734 ot->iParent = ot->iNewParent, ++((NThreadGroup*)ot->iParent)->iThreadCount; |
455 wmb(); // must make sure iParent is updated before iNewParent is cleared |
735 wmb(); // must make sure iParent is updated before iNewParent is cleared |
456 ot->iNewParent = 0; |
736 ot->iNewParent = 0; |
|
737 if (ot->iActiveState && ++ot->iParent->iActiveState==1) |
|
738 ot->iParent->iLastActivationTime.i64 = NKern::Timestamp(); |
457 } |
739 } |
458 ot->iCpuChange = FALSE; |
740 ot->iCpuChange = FALSE; |
459 } |
741 } |
460 else if (ot->iNewParent) |
742 else if (ot->iNewParent) |
461 { |
743 { |
465 ot->iParent = ot->iNewParent; |
747 ot->iParent = ot->iNewParent; |
466 ot->iCpuChange = FALSE; |
748 ot->iCpuChange = FALSE; |
467 ++((NThreadGroup*)ot->iParent)->iThreadCount; |
749 ++((NThreadGroup*)ot->iParent)->iThreadCount; |
468 wmb(); // must make sure iParent is updated before iNewParent is cleared |
750 wmb(); // must make sure iParent is updated before iNewParent is cleared |
469 ot->iNewParent = 0; |
751 ot->iNewParent = 0; |
470 } |
752 TUint64 now = NKern::Timestamp(); |
471 else if (ot->iParent->iCpuChange && !ot->iParent->iFreezeCpu) |
753 if (!ot->iParent->iCurrent) |
472 { |
754 ot->iParent->iLastStartTime.i64 = now; |
473 if (!CheckCpuAgainstAffinity(iCpuNum, ot->iParent->iCpuAffinity)) |
755 if (++ot->iParent->iActiveState==1) |
474 { |
756 ot->iParent->iLastActivationTime.i64 = now; |
475 if (ot->iParent==ot) |
757 } |
476 { |
758 else if (ot->iParent->iCpuChange) |
477 if (!fmd_done) |
759 { |
478 fmd_res = ot->CheckFastMutexDefer(), fmd_done = TRUE; |
760 if (ot->iForcedCpu) |
479 if (!fmd_res) |
761 migrate = TRUE; |
|
762 else if (!ot->iParent->iFreezeCpu) |
|
763 { |
|
764 if (ot->iParent->ShouldMigrate(iCpuNum)) |
|
765 { |
|
766 if (ot->iParent==ot) |
480 { |
767 { |
481 __KTRACE_OPT(KSCHED2,DEBUGPRINT("Rschd<-%T A:%08x",ot,ot->iParent->iCpuAffinity)); |
768 if (!fmd_done) |
482 ot->UnReadyT(); |
769 fmd_res = ot->CheckFastMutexDefer(), fmd_done = TRUE; |
483 migrate = TRUE; |
770 if (!fmd_res) |
484 ot->iCpuChange = FALSE; |
771 migrate = TRUE; |
485 } |
772 } |
|
773 else |
|
774 gmigrate = TRUE; |
486 } |
775 } |
487 else |
776 else |
488 { |
777 { |
489 __KTRACE_OPT(KSCHED2,DEBUGPRINT("Rschd<-%T GA:%08x",ot,ot->iParent->iCpuAffinity)); |
|
490 Remove(ot->iParent); |
|
491 ot->iParent->iReady = 0; |
|
492 gmigrate = TRUE; |
|
493 ot->iCpuChange = FALSE; |
778 ot->iCpuChange = FALSE; |
494 ot->iParent->iCpuChange = FALSE; |
779 ot->iParent->iCpuChange = FALSE; |
495 } |
780 } |
496 } |
781 } |
497 else |
782 if (migrate) |
498 { |
783 { |
|
784 __KTRACE_OPT(KSCHED2,DEBUGPRINT("Rschd<-%T A:%08x",ot,ot->iParent->iCpuAffinity)); |
|
785 ot->UnReadyT(); |
|
786 ot->iCpuChange = FALSE; |
|
787 } |
|
788 else if (gmigrate) |
|
789 { |
|
790 __KTRACE_OPT(KSCHED2,DEBUGPRINT("Rschd<-%T GA:%08x",ot,ot->iParent->iCpuAffinity)); |
|
791 SSRemoveEntry(ot->iParent); |
|
792 ot->iParent->iReady = 0; |
499 ot->iCpuChange = FALSE; |
793 ot->iCpuChange = FALSE; |
500 ot->iParent->iCpuChange = FALSE; |
794 ot->iParent->iCpuChange = FALSE; |
501 } |
795 } |
502 } |
796 } |
503 no_ot: |
797 no_ot: |
504 NSchedulable* g = (NSchedulable*)First(); |
798 NSchedulable* g = (NSchedulable*)iSSList.First(); |
505 TBool rrcg = FALSE; |
799 TBool rrcg = FALSE; |
506 if (g && g->IsGroup()) |
800 if (g && g->IsGroup()) |
507 { |
801 { |
508 t = (NThread*)((NThreadGroup*)g)->iNThreadList.First(); |
802 t = (NThread*)((NThreadGroup*)g)->iNThreadList.First(); |
509 if (g->iNext!=g) |
803 if (g->iNext!=g) |
578 ot->ReadyT(NThreadBase::ENewTimeslice); // new timeslice if it's queued behind another thread at same priority |
867 ot->ReadyT(NThreadBase::ENewTimeslice); // new timeslice if it's queued behind another thread at same priority |
579 if (gmigrate) |
868 if (gmigrate) |
580 ot->iParent->ReadyT(0); // new timeslice if it's queued behind another thread at same priority |
869 ot->iParent->ReadyT(0); // new timeslice if it's queued behind another thread at same priority |
581 if (ot) |
870 if (ot) |
582 { |
871 { |
|
872 TBool dead = ot->iWaitState.ThreadIsDead(); |
|
873 if (dead && ot->iLbLink.iNext) |
|
874 ot->LbUnlink(); |
583 ot->RelSLock(); |
875 ot->RelSLock(); |
584 |
876 |
585 // DFC to signal thread is now dead |
877 // DFC to signal thread is now dead |
586 if (ot->iWaitState.ThreadIsDead() && ot->iWaitState.iWtC.iKillDfc) |
878 if (dead && ot->iWaitState.iWtC.iKillDfc && __e32_atomic_tau_ord8(&ot->iACount, 1, 0xff, 0)==1) |
|
879 { |
|
880 ot->RemoveFromEnumerateList(); |
587 ot->iWaitState.iWtC.iKillDfc->DoEnque(); |
881 ot->iWaitState.iWtC.iKillDfc->DoEnque(); |
|
882 } |
|
883 } |
|
884 if (iCCSyncPending) |
|
885 { |
|
886 iCCSyncPending = 0; |
|
887 iReschedIPIs |= 0x80000000u; // update iCCSyncCpus when kernel is finally unlocked |
588 } |
888 } |
589 __KTRACE_OPT(KSCHED,DEBUGPRINT("Rschd->%T",t)); |
889 __KTRACE_OPT(KSCHED,DEBUGPRINT("Rschd->%T",t)); |
590 __NK_ASSERT_ALWAYS(!t || t->iParent); // must be a thread not a group |
890 __NK_ASSERT_ALWAYS(!t || t->iParent); // must be a thread not a group |
591 return t; // could return NULL |
891 return t; // could return NULL |
592 } |
892 } |
593 |
893 |
|
894 void NSchedulable::LbUnlink() |
|
895 { |
|
896 if (iLbState & ELbState_PerCpu) |
|
897 { |
|
898 TSubScheduler* ss = &TheSubSchedulers[iLbState & ELbState_CpuMask]; |
|
899 ss->iReadyListLock.LockOnly(); |
|
900 if (iLbState == ss->iLbCounter) |
|
901 { |
|
902 iLbLink.Deque(); |
|
903 iLbLink.iNext = 0; |
|
904 iLbState = ELbState_Inactive; |
|
905 } |
|
906 ss->iReadyListLock.UnlockOnly(); |
|
907 } |
|
908 else if ((iLbState & ELbState_CpuMask) == ELbState_Global) |
|
909 { |
|
910 TScheduler& s = TheScheduler; |
|
911 s.iBalanceListLock.LockOnly(); |
|
912 if (iLbState == s.iLbCounter) |
|
913 { |
|
914 iLbLink.Deque(); |
|
915 iLbLink.iNext = 0; |
|
916 iLbState = ELbState_Inactive; |
|
917 } |
|
918 s.iBalanceListLock.UnlockOnly(); |
|
919 } |
|
920 if (iLbState != ELbState_Inactive) |
|
921 { |
|
922 // load balancer is running so we can't dequeue the thread |
|
923 iLbState |= ELbState_ExtraRef; // indicates extra ref has been taken |
|
924 __e32_atomic_tau_ord8(&iACount, 1, 1, 0); // extra ref will be removed by load balancer |
|
925 } |
|
926 } |
|
927 |
|
928 TBool NSchedulable::TakeRef() |
|
929 { |
|
930 return __e32_atomic_tau_ord8(&iACount, 1, 1, 0); |
|
931 } |
|
932 |
|
933 TBool NSchedulable::DropRef() |
|
934 { |
|
935 if (__e32_atomic_tau_ord8(&iACount, 1, 0xff, 0)!=1) |
|
936 return EFalse; |
|
937 TDfc* d = 0; |
|
938 AcqSLock(); |
|
939 if (iParent) |
|
940 { |
|
941 // it's a thread |
|
942 NThreadBase* t = (NThreadBase*)this; |
|
943 if (t->iWaitState.ThreadIsDead() && t->iWaitState.iWtC.iKillDfc) |
|
944 d = t->iWaitState.iWtC.iKillDfc; |
|
945 RelSLock(); |
|
946 t->RemoveFromEnumerateList(); |
|
947 } |
|
948 else |
|
949 { |
|
950 NThreadGroup* g = (NThreadGroup*)this; |
|
951 d = g->iDestructionDfc; |
|
952 RelSLock(); |
|
953 g->RemoveFromEnumerateList(); |
|
954 } |
|
955 if (d) |
|
956 d->DoEnque(); |
|
957 return ETrue; |
|
958 } |
|
959 |
|
960 void NSchedulable::RemoveFromEnumerateList() |
|
961 { |
|
962 TScheduler& s = TheScheduler; |
|
963 s.iEnumerateLock.LockOnly(); |
|
964 if (iEnumerateLink.Next()) |
|
965 { |
|
966 iEnumerateLink.Deque(); |
|
967 iEnumerateLink.SetNext(0); |
|
968 } |
|
969 s.iEnumerateLock.UnlockOnly(); |
|
970 } |
594 |
971 |
595 void NThreadBase::UnReadyT() |
972 void NThreadBase::UnReadyT() |
596 { |
973 { |
597 if (iParent!=this) |
974 if (iParent!=this) |
598 { |
975 { |
813 newp = hp; |
1202 newp = hp; |
814 g = tg; |
1203 g = tg; |
815 } |
1204 } |
816 if (newp <= ss->HighestPriority()) |
1205 if (newp <= ss->HighestPriority()) |
817 RescheduleNeeded(); |
1206 RescheduleNeeded(); |
818 ss->ChangePriority(g, newp); |
1207 ss->SSChgEntryP(g, newp); |
819 out: |
1208 out: |
820 ss->iReadyListLock.UnlockOnly(); |
1209 ss->iReadyListLock.UnlockOnly(); |
821 } |
1210 } |
822 |
1211 |
823 |
1212 |
|
1213 /****************************************************************************** |
|
1214 * Pull threads on idle |
|
1215 ******************************************************************************/ |
|
1216 |
|
1217 const TInt KMaxTries = 4; |
|
1218 |
|
1219 struct SIdlePullThread |
|
1220 { |
|
1221 SIdlePullThread(); |
|
1222 void Finish(TBool aDone); |
|
1223 |
|
1224 NSchedulable* iS; |
|
1225 TInt iPri; |
|
1226 NSchedulable* iOld[KMaxCpus]; |
|
1227 }; |
|
1228 |
|
1229 SIdlePullThread::SIdlePullThread() |
|
1230 { |
|
1231 iS = 0; |
|
1232 iPri = 0; |
|
1233 TInt i; |
|
1234 for (i=0; i<KMaxCpus; ++i) |
|
1235 iOld[i] = 0; |
|
1236 } |
|
1237 |
|
1238 void SIdlePullThread::Finish(TBool aComplete) |
|
1239 { |
|
1240 if (aComplete && iS) |
|
1241 { |
|
1242 iS->AcqSLock(); |
|
1243 iS->SetCpuAffinityT(NKern::CurrentCpu() | KCpuAffinityTransient); |
|
1244 iS->RelSLock(); |
|
1245 } |
|
1246 if (iS) |
|
1247 iS->DropRef(); |
|
1248 TInt i; |
|
1249 for (i=0; i<KMaxCpus; ++i) |
|
1250 if (iOld[i]) |
|
1251 iOld[i]->DropRef(); |
|
1252 } |
|
1253 |
|
1254 void TSubScheduler::IdlePullSearch(SIdlePullThread& a, TSubScheduler* aDest) |
|
1255 { |
|
1256 NSchedulable* orig = a.iS; |
|
1257 TInt dcpu = aDest->iCpuNum; |
|
1258 volatile TUint32& flags = *(volatile TUint32*)&aDest->iRescheduleNeededFlag; |
|
1259 iReadyListLock.LockOnly(); |
|
1260 if (iRdyThreadCount>1) // if there's only 1 it'll be running so leave it alone |
|
1261 { |
|
1262 TUint64 pres = iSSList.iPresent64; |
|
1263 TInt tries = iRdyThreadCount; |
|
1264 if (tries > KMaxTries) |
|
1265 tries = KMaxTries; |
|
1266 NSchedulable* q = 0; |
|
1267 NSchedulable* p = 0; |
|
1268 TInt pri = -1; |
|
1269 for (; tries>0 && !flags; --tries) |
|
1270 { |
|
1271 if (p) |
|
1272 { |
|
1273 p = (NSchedulable*)(p->iNext); |
|
1274 if (p == q) |
|
1275 pri = -1; |
|
1276 } |
|
1277 if (pri<0) |
|
1278 { |
|
1279 pri = __e32_find_ms1_64(pres); |
|
1280 if (pri < 0) |
|
1281 break; |
|
1282 pres &= ~(TUint64(1)<<pri); |
|
1283 q = (NSchedulable*)iSSList.iQueue[pri]; |
|
1284 p = q; |
|
1285 } |
|
1286 NThreadBase* t = 0; |
|
1287 if (p->iParent) |
|
1288 t = (NThreadBase*)p; |
|
1289 if (p->iCurrent) |
|
1290 continue; // running on other CPU so leave it alone |
|
1291 if (p->iFreezeCpu) |
|
1292 continue; // can't run on this CPU - frozen to current CPU |
|
1293 if (t && t->iCoreCycling) |
|
1294 continue; // currently cycling through cores so leave alone |
|
1295 if (t && t->iHeldFastMutex && t->iLinkedObjType==NThreadBase::EWaitNone) |
|
1296 continue; // can't run on this CPU - fast mutex held |
|
1297 if (p->iCpuChange) |
|
1298 continue; // already being migrated so leave it alone |
|
1299 if (!CheckCpuAgainstAffinity(dcpu, p->iCpuAffinity)) |
|
1300 continue; // can't run on this CPU - hard affinity |
|
1301 if (p->iPreferredCpu & NSchedulable::EReadyCpuSticky) |
|
1302 continue; // don't want to move it on idle, only on periodic balance |
|
1303 if (pri > a.iPri) |
|
1304 { |
|
1305 if (p->TakeRef()) |
|
1306 { |
|
1307 a.iS = p; |
|
1308 a.iPri = pri; |
|
1309 break; |
|
1310 } |
|
1311 } |
|
1312 } |
|
1313 } |
|
1314 iReadyListLock.UnlockOnly(); |
|
1315 if (orig && orig!=a.iS) |
|
1316 a.iOld[iCpuNum] = orig; |
|
1317 } |
|
1318 |
|
1319 void NKern::Idle() |
|
1320 { |
|
1321 TScheduler& s = TheScheduler; |
|
1322 TSubScheduler& ss0 = SubScheduler(); // OK since idle thread locked to CPU |
|
1323 ss0.iCurrentThread->iSavedSP = 0; // will become nonzero if a reschedule occurs |
|
1324 TUint32 m0 = ss0.iCpuMask; |
|
1325 volatile TUint32& flags = *(volatile TUint32*)&ss0.iRescheduleNeededFlag; |
|
1326 if (s.iThreadAcceptCpus & m0) // if this CPU is shutting down, don't try to pull threads |
|
1327 { |
|
1328 SIdlePullThread ipt; |
|
1329 NKern::Lock(); |
|
1330 s.iIdleBalanceLock.LockOnly(); |
|
1331 TUint32 active = s.iThreadAcceptCpus; |
|
1332 TUint32 srchm = active &~ m0; |
|
1333 if (srchm && srchm!=active) |
|
1334 { |
|
1335 TUint32 randomizer = *(volatile TUint32*)&s.iIdleBalanceLock; |
|
1336 TInt nact = __e32_bit_count_32(srchm); |
|
1337 while (srchm) |
|
1338 { |
|
1339 TUint32 srchm2 = srchm; |
|
1340 if (nact > 1) |
|
1341 { |
|
1342 randomizer = 69069*randomizer+41; |
|
1343 TUint32 lose = randomizer % TUint32(nact); |
|
1344 for (; lose; --lose) |
|
1345 srchm2 = srchm2 & (srchm2-1); |
|
1346 } |
|
1347 TInt cpu = __e32_find_ls1_32(srchm2); |
|
1348 TSubScheduler* ss = &TheSubSchedulers[cpu]; |
|
1349 ss->IdlePullSearch(ipt, &ss0); |
|
1350 if (flags) |
|
1351 break; |
|
1352 srchm &= ~(1u<<cpu); |
|
1353 --nact; |
|
1354 } |
|
1355 } |
|
1356 s.iIdleBalanceLock.UnlockOnly(); |
|
1357 ipt.Finish(!srchm); |
|
1358 NKern::Unlock(); |
|
1359 } |
|
1360 DoIdle(); |
|
1361 } |
|
1362 |
|
1363 |