kernel/eka/nkernsmp/sched.cpp
branchRCL_3
changeset 44 3e88ff8f41d5
parent 43 c1f20ce4abcf
equal deleted inserted replaced
43:c1f20ce4abcf 44:3e88ff8f41d5
    24 #include "nk_priv.h"
    24 #include "nk_priv.h"
    25 #include <nk_irq.h>
    25 #include <nk_irq.h>
    26 
    26 
    27 TSpinLock	NEventHandler::TiedLock(TSpinLock::EOrderEventHandlerTied);
    27 TSpinLock	NEventHandler::TiedLock(TSpinLock::EOrderEventHandlerTied);
    28 
    28 
    29 
       
    30 const TUint8 KClassFromPriority[KNumPriorities] =
       
    31 	{
       
    32 	0,	0,	0,	0,	0,	0,	0,	0,				// priorities 0-7
       
    33 	0,	0,	0,	0,	1,	1,	1,	1,				// priorities 8-15
       
    34 	2,	2,	2,	2,	2,	2,	2,	2,				// priorities 16-23
       
    35 	2,	2,	2,	3,	3,	3,	3,	3,				// priorities 24-31
       
    36 	3,	3,	3,	3,	3,	3,	3,	3,				// priorities 32-39
       
    37 	3,	3,	3,	3,	3,	3,	3,	3,				// priorities 40-47
       
    38 	3,	3,	3,	3,	3,	3,	3,	3,				// priorities 48-55
       
    39 	3,	3,	3,	3,	3,	3,	3,	3				// priorities 56-63
       
    40 	};
       
    41 
       
    42 
       
    43 /******************************************************************************
    29 /******************************************************************************
    44  * TScheduler
    30  * TScheduler
    45  ******************************************************************************/
    31  ******************************************************************************/
    46 
    32 
    47 // TScheduler resides in .bss so other fields are zero-initialised
    33 // TScheduler resides in .bss so other fields are zero-initialised
    48 TScheduler::TScheduler()
    34 TScheduler::TScheduler()
    49 	:	iThreadAcceptCpus(1),	// only boot CPU for now
    35 	:	iActiveCpus1(1),	// only boot CPU for now
    50 		iIpiAcceptCpus(1),		// only boot CPU for now
    36 		iActiveCpus2(1),	// only boot CPU for now
    51 		iGenIPILock(TSpinLock::EOrderGenericIPIList),
       
    52 		iIdleBalanceLock(TSpinLock::EOrderEnumerate),
       
    53 		iIdleSpinLock(TSpinLock::EOrderIdleDFCList),
    37 		iIdleSpinLock(TSpinLock::EOrderIdleDFCList),
    54 		iCpusNotIdle(1),	// only boot CPU for now
    38 		iCpusNotIdle(1)		// only boot CPU for now
    55 		iEnumerateLock(TSpinLock::EOrderEnumerate),
       
    56 		iBalanceListLock(TSpinLock::EOrderReadyList),
       
    57 		iBalanceTimer(&BalanceTimerExpired, this, 1),
       
    58 		iCCSyncIDFC(&CCSyncDone, 0),
       
    59 		iCCReactivateDfc(&CCReactivateDfcFn, this, 3),
       
    60 		iCCRequestLevel(1),		// only boot CPU for now
       
    61 		iCCRequestDfc(&CCRequestDfcFn, this, 2),
       
    62 		iCCPowerDownDfc(&CCIndirectPowerDown, this, 0),
       
    63 		iCCIpiReactIDFC(&CCIpiReactivateFn, this),
       
    64 		iFreqChgDfc(&DoFrequencyChanged, this, 6)
       
    65 	{
    39 	{
    66 	TInt i;
    40 	TInt i;
    67 	for (i=0; i<KMaxCpus; ++i)
    41 	for (i=0; i<KMaxCpus; ++i)
    68 		{
    42 		{
    69 		TSubScheduler* s = TheSubSchedulers + i;
    43 		TSubScheduler* s = TheSubSchedulers + i;
    70 		iSub[i] = s;
    44 		iSub[i] = s;
    71 		s->iScheduler = this;
    45 		s->iScheduler = this;
    72 		s->iCpuNum = TUint32(i);
    46 		s->iCpuNum = TUint32(i);
    73 		s->iCpuMask = 1u<<i;
    47 		s->iCpuMask = 1u<<i;
    74 		s->iLbCounter = TUint8(NSchedulable::ELbState_PerCpu + i);
    48 		}
    75 		}
       
    76 	iLbCounter = (TUint8)NSchedulable::ELbState_Global;
       
    77 	iNeedBal = 1;	// stop anyone trying to kick rebalancer before it has been created
       
    78 	}
    49 	}
    79 
    50 
    80 
    51 
    81 /** Return a pointer to the scheduler
    52 /** Return a pointer to the scheduler
    82 	Intended for use by the crash debugger, not for general device driver use.
    53 	Intended for use by the crash debugger, not for general device driver use.
    94  * TSubScheduler
    65  * TSubScheduler
    95  ******************************************************************************/
    66  ******************************************************************************/
    96 
    67 
    97 // TSubScheduler resides in .bss so other fields are zero-initialised
    68 // TSubScheduler resides in .bss so other fields are zero-initialised
    98 TSubScheduler::TSubScheduler()
    69 TSubScheduler::TSubScheduler()
    99 	:	iExIDfcLock(TSpinLock::EOrderExIDfcQ),
    70 	:	TPriListBase(KNumPriorities),
       
    71 		iExIDfcLock(TSpinLock::EOrderExIDfcQ),
   100 		iReadyListLock(TSpinLock::EOrderReadyList),
    72 		iReadyListLock(TSpinLock::EOrderReadyList),
   101 		iKernLockCount(1),
    73 		iKernLockCount(1),
   102 		iEventHandlerLock(TSpinLock::EOrderEventHandlerList)
    74 		iEventHandlerLock(TSpinLock::EOrderEventHandlerList)
   103 	{
    75 	{
   104 	}
    76 	}
   105 
    77 
   106 void TSubScheduler::SSAddEntry(NSchedulable* aEntry)
       
   107 	{
       
   108 	if (aEntry->iParent!=aEntry || !((NThreadBase*)aEntry)->i_NThread_Initial)
       
   109 		{
       
   110 		TInt c = KClassFromPriority[aEntry->iPriority];
       
   111 		++iPriClassThreadCount[c];
       
   112 		++iRdyThreadCount;
       
   113 		}
       
   114 	iSSList.Add(aEntry);
       
   115 	}
       
   116 
       
   117 void TSubScheduler::SSAddEntryHead(NSchedulable* aEntry)
       
   118 	{
       
   119 	if (aEntry->iParent!=aEntry || !((NThreadBase*)aEntry)->i_NThread_Initial)
       
   120 		{
       
   121 		TInt c = KClassFromPriority[aEntry->iPriority];
       
   122 		++iPriClassThreadCount[c];
       
   123 		++iRdyThreadCount;
       
   124 		}
       
   125 	iSSList.AddHead(aEntry);
       
   126 	}
       
   127 
       
   128 void TSubScheduler::SSRemoveEntry(NSchedulable* aEntry)
       
   129 	{
       
   130 	if (aEntry->iParent!=aEntry || !((NThreadBase*)aEntry)->i_NThread_Initial)
       
   131 		{
       
   132 		TInt c = KClassFromPriority[aEntry->iPriority];
       
   133 		--iPriClassThreadCount[c];
       
   134 		--iRdyThreadCount;
       
   135 		}
       
   136 	iSSList.Remove(aEntry);
       
   137 	}
       
   138 
       
   139 void TSubScheduler::SSChgEntryP(NSchedulable* aEntry, TInt aNewPriority)
       
   140 	{
       
   141 	if (aEntry->iParent!=aEntry || !((NThreadBase*)aEntry)->i_NThread_Initial)
       
   142 		{
       
   143 		TInt c0 = KClassFromPriority[aEntry->iPriority];
       
   144 		TInt c1 = KClassFromPriority[aNewPriority];
       
   145 		if (c0 != c1)
       
   146 			{
       
   147 			--iPriClassThreadCount[c0];
       
   148 			++iPriClassThreadCount[c1];
       
   149 			}
       
   150 		}
       
   151 	iSSList.ChangePriority(aEntry, aNewPriority);
       
   152 	}
       
   153 
       
   154 
    78 
   155 /******************************************************************************
    79 /******************************************************************************
   156  * NSchedulable
    80  * NSchedulable
   157  ******************************************************************************/
    81  ******************************************************************************/
   158 TUint32 NSchedulable::PreprocessCpuAffinity(TUint32 aAffinity)
       
   159 	{
       
   160 	if (!(aAffinity & NTHREADBASE_CPU_AFFINITY_MASK))
       
   161 		return aAffinity;
       
   162 	TUint32 x = aAffinity & ~NTHREADBASE_CPU_AFFINITY_MASK;
       
   163 	if (x & (x-1))
       
   164 		return aAffinity;
       
   165 	return __e32_find_ls1_32(x);
       
   166 	}
       
   167 
       
   168 void NSchedulable::AcqSLock()
    82 void NSchedulable::AcqSLock()
   169 	{
    83 	{
   170 	iSSpinLock.LockOnly();
    84 	iSSpinLock.LockOnly();
   171 	if (iParent!=this && iParent)
    85 	if (iParent!=this && iParent)
   172 		iParent->AcqSLock();
    86 		iParent->AcqSLock();
   341 		{
   255 		{
   342 		__chill();
   256 		__chill();
   343 		}
   257 		}
   344 	}
   258 	}
   345 
   259 
   346 
       
   347 /** Return the total CPU time so far used by the specified thread.
       
   348 
       
   349 	@return The total CPU time in units of 1/NKern::CpuTimeMeasFreq().
       
   350 */
       
   351 EXPORT_C TUint64 NKern::ThreadCpuTime(NThread* aThread)
       
   352 	{
       
   353 	NSchedulable::SCpuStats stats;
       
   354 	NKern::Lock();
       
   355 	aThread->GetCpuStats(NSchedulable::E_RunTime, stats);
       
   356 	NKern::Unlock();
       
   357 	return stats.iRunTime;
       
   358 	}
       
   359 
       
   360 void NSchedulable::GetCpuStats(TUint aMask, NSchedulable::SCpuStats& aOut)
       
   361 	{
       
   362 	AcqSLock();
       
   363 	GetCpuStatsT(aMask, aOut);
       
   364 	RelSLock();
       
   365 	}
       
   366 
       
   367 void NSchedulable::GetCpuStatsT(TUint aMask, NSchedulable::SCpuStats& aOut)
       
   368 	{
       
   369 	TSubScheduler* ss = 0;
       
   370 	NThread* t = 0;
       
   371 	TBool initial = FALSE;
       
   372 	if (!IsGroup())
       
   373 		t = (NThread*)this;
       
   374 	if (t && t->i_NThread_Initial)
       
   375 		ss = &TheSubSchedulers[iLastCpu], initial = TRUE;
       
   376 	else if (iReady)
       
   377 		{
       
   378 		if (IsGroup())
       
   379 			ss = &TheSubSchedulers[iReady & NSchedulable::EReadyCpuMask];
       
   380 		else if (iParent->iReady)
       
   381 			ss = &TheSubSchedulers[iParent->iReady & NSchedulable::EReadyCpuMask];
       
   382 		}
       
   383 	if (ss)
       
   384 		ss->iReadyListLock.LockOnly();
       
   385 	TUint64 now = NKern::Timestamp();
       
   386 	if (aMask & (E_RunTime|E_RunTimeDelta))
       
   387 		{
       
   388 		aOut.iRunTime = iTotalCpuTime.i64;
       
   389 		if (iCurrent || (initial && !ss->iCurrentThread))
       
   390 			aOut.iRunTime += (now - ss->iLastTimestamp.i64);
       
   391 		if (aMask & E_RunTimeDelta)
       
   392 			{
       
   393 			aOut.iRunTimeDelta = aOut.iRunTime - iSavedCpuTime.i64;
       
   394 			iSavedCpuTime.i64 = aOut.iRunTime;
       
   395 			}
       
   396 		}
       
   397 	if (aMask & (E_ActiveTime|E_ActiveTimeDelta))
       
   398 		{
       
   399 		aOut.iActiveTime = iTotalActiveTime.i64;
       
   400 		if (iActiveState)
       
   401 			aOut.iActiveTime += (now - iLastActivationTime.i64);
       
   402 		if (aMask & E_ActiveTimeDelta)
       
   403 			{
       
   404 			aOut.iActiveTimeDelta = aOut.iActiveTime - iSavedActiveTime.i64;
       
   405 			iSavedActiveTime.i64 = aOut.iActiveTime;
       
   406 			}
       
   407 		}
       
   408 	if (aMask & E_LastRunTime)
       
   409 		{
       
   410 		if (iCurrent)
       
   411 			aOut.iLastRunTime = 0;
       
   412 		else
       
   413 			aOut.iLastRunTime = now - iLastRunTime.i64;
       
   414 		}
       
   415 	if (aMask & E_LastActiveTime)
       
   416 		{
       
   417 		if (iActiveState)
       
   418 			aOut.iLastActiveTime = 0;
       
   419 		else
       
   420 			aOut.iLastActiveTime = now - iLastRunTime.i64;
       
   421 		}
       
   422 	if (ss)
       
   423 		ss->iReadyListLock.UnlockOnly();
       
   424 	}
       
   425 
       
   426 
       
   427 /******************************************************************************
   260 /******************************************************************************
   428  * NThreadGroup
   261  * NThreadGroup
   429  ******************************************************************************/
   262  ******************************************************************************/
   430 
   263 
   431 
   264 
   446 void NSchedulable::ReadyT(TUint aMode)
   279 void NSchedulable::ReadyT(TUint aMode)
   447 	{
   280 	{
   448 	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NSchedulable::ReadyT");
   281 	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NSchedulable::ReadyT");
   449 	__KTRACE_OPT(KNKERN,DEBUGPRINT("%T nReadyT(%x)",this,aMode));
   282 	__KTRACE_OPT(KNKERN,DEBUGPRINT("%T nReadyT(%x)",this,aMode));
   450 	NThreadBase* t = (NThreadBase*)this;
   283 	NThreadBase* t = (NThreadBase*)this;
   451 	if (iParent && !iActiveState)
       
   452 		{
       
   453 		iActiveState=1;
       
   454 		iLastActivationTime.i64 = NKern::Timestamp();
       
   455 		if (iParent!=this && ++iParent->iActiveState==1)
       
   456 			iParent->iLastActivationTime.i64 = iLastActivationTime.i64;
       
   457 		}
       
   458 #ifdef _DEBUG
   284 #ifdef _DEBUG
   459 	if (!iParent)
   285 	if (!iParent)
   460 		t = (NThreadBase*)0xface0fff;
   286 		t = (NThreadBase*)0xface0fff;
   461 #endif
   287 #endif
   462 	__NK_ASSERT_DEBUG(!iReady && (!iParent || (!t->iWaitState.iWtC.iWtStFlags && !t->iSuspended)));
   288 	__NK_ASSERT_DEBUG(!iReady && (!iParent || (!t->iWaitState.iWtC.iWtStFlags && !t->iPauseCount && !t->iSuspended)));
   463 	TSubScheduler& ss0 = SubScheduler();
   289 	TSubScheduler& ss0 = SubScheduler();
   464 	TScheduler& s = TheScheduler;
       
   465 	TBool reactivate = FALSE;
       
   466 	TBool no_ipi = FALSE;
       
   467 	NSchedulable* g = this;
   290 	NSchedulable* g = this;
   468 	if (iParent != this && iParent)
   291 	if (iParent != this && iParent)
   469 		{
   292 		{
   470 		NThreadGroup* tg = (NThreadGroup*)iParent;
   293 		NThreadGroup* tg = (NThreadGroup*)iParent;
   471 		iReady = EReadyGroup;
   294 		iReady = EReadyGroup;
   476 			TInt gp = tg->iPriority;
   299 			TInt gp = tg->iPriority;
   477 			TSubScheduler& ss = TheSubSchedulers[tg->iReady & EReadyCpuMask];
   300 			TSubScheduler& ss = TheSubSchedulers[tg->iReady & EReadyCpuMask];
   478 			ss.iReadyListLock.LockOnly();
   301 			ss.iReadyListLock.LockOnly();
   479 			TInt hp = ss.HighestPriority();
   302 			TInt hp = ss.HighestPriority();
   480 			if (iPriority>gp)
   303 			if (iPriority>gp)
   481 				{
   304 				ss.ChangePriority(tg, iPriority);
   482 				ss.SSChgEntryP(tg, iPriority);
       
   483 				}
       
   484 			if (iPriority>hp || (iPriority==hp && ss.iCurrentThread && ss.iCurrentThread->iTime==0))
   305 			if (iPriority>hp || (iPriority==hp && ss.iCurrentThread && ss.iCurrentThread->iTime==0))
   485 				{
   306 				{
   486 				if (&ss == &ss0)
   307 				if (&ss == &ss0)
   487 					RescheduleNeeded();					// reschedule on this processor
   308 					RescheduleNeeded();					// reschedule on this processor
   488 				else
   309 				else
   489 					ss0.iReschedIPIs |= ss.iCpuMask;	// will kick the other CPU when this CPU reenables preemption
   310 					ss0.iReschedIPIs |= ss.iCpuMask;	// will kick the other CPU when this CPU reenables preemption
   490 				}
   311 				}
   491 			if ((aMode & ENewTimeslice) && t->iTime==0 && (iNext!=this || ss.EntryAtPriority(iPriority)) )
   312 			if ((aMode & ENewTimeslice) && t->iTime==0 && (iNext!=this || ss.iQueue[iPriority]))
   492 				t->iTime = t->iTimeslice;
   313 				t->iTime = t->iTimeslice;
   493 			ss.iReadyListLock.UnlockOnly();
   314 			ss.iReadyListLock.UnlockOnly();
   494 
       
   495 			ss0.iMadeReadyCounter++;
       
   496 			return;
   315 			return;
   497 			}
   316 			}
   498 		tg->iNThreadList.Add(this);
   317 		tg->iNThreadList.Add(this);
   499 		tg->iPriority = iPriority;	// first in group
   318 		tg->iPriority = iPriority;	// first in group
   500 		g = tg;						// fall through to add group to subscheduler
   319 		g = tg;						// fall through to add group to subscheduler
   501 		}
   320 		}
   502 	TInt priClass = -1;
       
   503 	TInt cpu = -1;
   321 	TInt cpu = -1;
   504 	TUint32 active = TheScheduler.iThreadAcceptCpus;
       
   505 	if (g!=t || !t->i_NThread_Initial)
       
   506 		priClass = KClassFromPriority[g->iPriority];
       
   507 	if (g->iForcedCpu)
       
   508 		{
       
   509 		cpu = iForcedCpu & EReadyCpuMask;	// handles core cycling case (No.1 below)
       
   510 		if (active & (1u<<cpu))
       
   511 			goto cpu_ok;
       
   512 		else
       
   513 			goto single_cpu_reactivate;
       
   514 		}
       
   515 	if (aMode & EUnPause)
   322 	if (aMode & EUnPause)
   516 		{
   323 		{
   517 		cpu = (g->iEventState & EThreadCpuMask)>>EThreadCpuShift;
   324 		cpu = (g->iEventState & EThreadCpuMask)>>EThreadCpuShift;
   518 		if (CheckCpuAgainstAffinity(cpu, g->iCpuAffinity))
   325 		if (CheckCpuAgainstAffinity(cpu, g->iCpuAffinity))
   519 			goto cpu_ok;
   326 			goto cpu_ok;
   520 		cpu = -1;
   327 		}
   521 		}
   328 	else if (g->iFreezeCpu)
   522 	if (g->iFreezeCpu)
       
   523 		{
   329 		{
   524 		cpu = g->iLastCpu;
   330 		cpu = g->iLastCpu;
   525 		goto cpu_ok;
   331 		if (!CheckCpuAgainstAffinity(cpu, g->iCpuAffinity))
   526 		}
   332 			g->iCpuChange = TRUE;
   527 	if (!(g->iCpuAffinity & NTHREADBASE_CPU_AFFINITY_MASK))
   333 		}
   528 		{
   334 	else if (!(g->iCpuAffinity & NTHREADBASE_CPU_AFFINITY_MASK))
   529 		cpu = g->iCpuAffinity;
   335 		cpu = g->iCpuAffinity;
   530 		if (!(active & (1u<<cpu)))
   336 	else if ((aMode & EPreferSameCpu) && (g->iCpuAffinity & ss0.iCpuMask))
   531 			goto single_cpu_reactivate;
       
   532 		goto cpu_ok;
       
   533 		}
       
   534 	if ((aMode & EPreferSameCpu) && CheckCpuAgainstAffinity(ss0.iCpuNum, g->iCpuAffinity, active))
       
   535 		cpu = ss0.iCpuNum;
   337 		cpu = ss0.iCpuNum;
   536 	else if (iTransientCpu && CheckCpuAgainstAffinity(iTransientCpu & EReadyCpuMask, g->iCpuAffinity))
       
   537 		cpu = iTransientCpu & EReadyCpuMask;
       
   538 	else if (iPreferredCpu && CheckCpuAgainstAffinity(iPreferredCpu & EReadyCpuMask, g->iCpuAffinity, active))
       
   539 		cpu = iPreferredCpu & EReadyCpuMask;
       
   540 	if (cpu < 0)
   338 	if (cpu < 0)
   541 		{
   339 		{
   542 		// pick a cpu
   340 		// pick a cpu
   543 		TUint32 m = g->iCpuAffinity & active;
   341 		TScheduler& s = TheScheduler;
   544 		TInt lastCpu = g->iLastCpu;
   342 		TUint32 m = g->iCpuAffinity & s.iActiveCpus1;
   545 		TInt i = lastCpu;
   343 		TInt i;
   546 		TInt lcp = KMaxTInt;
   344 		TInt lowest_p = KMaxTInt;
   547 		TInt lco = KMaxTInt;
   345 		for (i=0; i<s.iNumCpus; ++i)
   548 		TInt cpunp = -1;
   346 			{
   549 		TInt idle_cpu = -1;
   347 			TSubScheduler& ss = *s.iSub[i];
   550 		do	{
   348 			if (!(m & ss.iCpuMask))
   551 			if (m & (1u<<i))
   349 				continue;
   552 				{
   350 			TInt hp = ss.HighestPriority();
   553 				TSubScheduler& ss = *s.iSub[i];
   351 			if (hp < lowest_p)
   554 				TInt nInC = ss.iPriClassThreadCount[priClass];
   352 				{
   555 				if (nInC < lco)
   353 				lowest_p = hp;
   556 					lco=nInC, cpunp=i;
   354 				cpu = i;
   557 				TInt hp = ss.HighestPriority();
   355 				continue;
   558 				if (idle_cpu<0 && hp<=0)
   356 				}
   559 					idle_cpu = i;
   357 			if (hp > lowest_p)
   560 				if (hp < iPriority)
   358 				continue;
   561 					{
   359 			if (cpu>=0 && g->iLastCpu!=i)
   562 					if (i == lastCpu)
   360 				continue;
   563 						{
   361 			lowest_p = hp;
   564 						cpu = i;
   362 			cpu = i;
   565 						if (hp <= 0)
   363 			}
   566 							break;
       
   567 						lcp = -1;
       
   568 						}
       
   569 					if (nInC < lcp)
       
   570 						lcp=nInC, cpu=i;
       
   571 					}
       
   572 				}
       
   573 			if (++i == s.iNumCpus)
       
   574 				i = 0;
       
   575 			} while (i != lastCpu);
       
   576 		if (idle_cpu>=0 && cpu!=idle_cpu)
       
   577 			cpu = idle_cpu;
       
   578 		else if (cpu<0)
       
   579 			cpu = cpunp;
       
   580 		}
       
   581 	if (cpu<0)
       
   582 		{
       
   583 single_cpu_reactivate:
       
   584 		/*	CORE_CONTROL
       
   585 			Might have no CPU at this point due to all CPUs specified by
       
   586 			iCpuAffinity being off or in the process of shutting down.
       
   587 			There are three possibilities:
       
   588 			1.	This thread is 'core cycling'. In that case it will be
       
   589 				allowed to move to a 'shutting down' CPU. The CPU will
       
   590 				not be permitted to shut down entirely until all core cycling
       
   591 				has completed. This is already handled above.
       
   592 			2.	There are one or more CPUs which this thread could run on which
       
   593 				are shutting down. In that case, pick one, abort the shutdown
       
   594 				process and put this thread on it.
       
   595 			3.	All CPUs which this thread can run on are off. In that case,
       
   596 				assign the thread to one of them and initiate power up of that core.
       
   597 		*/
       
   598 		TUint32 affm = AffinityToMask(g->iCpuAffinity);
       
   599 		TInt irq = s.iGenIPILock.LockIrqSave();
       
   600 		if (cpu < 0)
       
   601 			{
       
   602 			if (affm & s.iCCReactivateCpus)
       
   603 				cpu = __e32_find_ls1_32(affm & s.iCCReactivateCpus);
       
   604 			else if (affm & s.iIpiAcceptCpus)
       
   605 				cpu = __e32_find_ls1_32(affm & s.iIpiAcceptCpus);
       
   606 			else
       
   607 				cpu = __e32_find_ls1_32(affm), no_ipi = TRUE;
       
   608 			}
       
   609 		TUint32 cm = 1u<<cpu;
       
   610 		if (!((s.iCCReactivateCpus|s.iThreadAcceptCpus) & cm))
       
   611 			{
       
   612 			s.iCCReactivateCpus |= (1u<<cpu);
       
   613 			reactivate = TRUE;
       
   614 			}
       
   615 		s.iGenIPILock.UnlockIrqRestore(irq);
       
   616 		}
   364 		}
   617 cpu_ok:
   365 cpu_ok:
   618 	__NK_ASSERT_ALWAYS(cpu>=0);
   366 	__NK_ASSERT_ALWAYS(cpu>=0);
   619 	if (g->iFreezeCpu && !CheckCpuAgainstAffinity(cpu, g->iCpuAffinity))
       
   620 		g->iCpuChange = TRUE;
       
   621 	if (g->TiedEventReadyInterlock(cpu))
   367 	if (g->TiedEventReadyInterlock(cpu))
   622 		{
   368 		{
   623 		__KTRACE_OPT(KSCHED2,DEBUGPRINT("ReadyT->CPU %dD",cpu));
   369 		__KTRACE_OPT(KSCHED2,DEBUGPRINT("ReadyT->CPU %dD",cpu));
   624 		++g->iPauseCount;
   370 		++g->iPauseCount;
   625 		}
   371 //		((TDfc*)g->i_IDfcMem)->Add();
   626 	else
   372 		return;
   627 		{
   373 		}
   628 		__KTRACE_OPT(KSCHED2,DEBUGPRINT("ReadyT->CPU %d",cpu));
   374 	__KTRACE_OPT(KSCHED2,DEBUGPRINT("ReadyT->CPU %d",cpu));
   629 		TSubScheduler& ss = TheSubSchedulers[cpu];
   375 	TSubScheduler& ss = TheSubSchedulers[cpu];
   630 		ss.iReadyListLock.LockOnly();
   376 	ss.iReadyListLock.LockOnly();
   631 		TInt hp = ss.HighestPriority();
   377 	TInt hp = ss.HighestPriority();
   632 		if (g->iPriority>hp || (g->iPriority==hp && ss.iCurrentThread && ss.iCurrentThread->iTime==0))
   378 	if (g->iPriority>hp || (g->iPriority==hp && ss.iCurrentThread && ss.iCurrentThread->iTime==0))
   633 			{
   379 		{
   634 			if (&ss == &ss0)
   380 		if (&ss == &ss0)
   635 				RescheduleNeeded();					// reschedule on this processor
   381 			RescheduleNeeded();					// reschedule on this processor
   636 			else if (!no_ipi)
   382 		else
   637 				ss0.iReschedIPIs |= ss.iCpuMask;	// will kick the other CPU when this CPU reenables preemption
   383 			ss0.iReschedIPIs |= ss.iCpuMask;	// will kick the other CPU when this CPU reenables preemption
   638 			}
   384 		}
   639 		ss.SSAddEntry(g);
   385 	ss.Add(g);
   640 		g->iReady = TUint8(cpu | EReadyOffset);
   386 	g->iReady = TUint8(cpu | EReadyOffset);
   641 		if ((aMode & ENewTimeslice) && iParent && t->iTime==0 && g->iNext!=g)
   387 	if ((aMode & ENewTimeslice) && iParent && t->iTime==0 && g->iNext!=g)
   642 			t->iTime = t->iTimeslice;
   388 		t->iTime = t->iTimeslice;
   643 		if (!g->iLbLink.iNext && !(g->iParent && t->i_NThread_Initial))
   389 	ss.iReadyListLock.UnlockOnly();
   644 			{
       
   645 			ss.iLbQ.Add(&g->iLbLink);
       
   646 			g->iLbState = ss.iLbCounter;
       
   647 			if (!s.iNeedBal && (!g->iParent || !(t->iRebalanceAttr & 1)))
       
   648 				{
       
   649 				s.iNeedBal = 1;
       
   650 				reactivate = TRUE;
       
   651 				}
       
   652 			}
       
   653 		if (g->iForcedCpu == g->iReady)
       
   654 			{
       
   655 			g->iLastCpu = (TUint8)cpu;
       
   656 			g->iForcedCpu = 0;	// iForcedCpu has done its job - iFreezeCpu will keep the thread on the right CPU
       
   657 			}
       
   658 		ss.iReadyListLock.UnlockOnly();
       
   659 		ss0.iMadeReadyCounter++;
       
   660 		}
       
   661 	if (reactivate)
       
   662 		s.iCCReactivateDfc.Add();
       
   663 	}
   390 	}
   664 
   391 
   665 
   392 
   666 NThread* TSubScheduler::SelectNextThread()
   393 NThread* TSubScheduler::SelectNextThread()
   667 	{
   394 	{
   691 		// ASSUMPTION: If iNewParent set, ot can't hold a fast mutex (assertion in JoinGroup)
   418 		// ASSUMPTION: If iNewParent set, ot can't hold a fast mutex (assertion in JoinGroup)
   692 		TBool pfmd = (ot->iParent!=ot && !ot->iFastMutexDefer);
   419 		TBool pfmd = (ot->iParent!=ot && !ot->iFastMutexDefer);
   693 		if (ot->iTime==0 || pfmd)
   420 		if (ot->iTime==0 || pfmd)
   694 			{
   421 			{
   695 			// ot's timeslice has expired
   422 			// ot's timeslice has expired
   696 			ot->iParent->iTransientCpu = 0;
       
   697 			fmd_res = ot->CheckFastMutexDefer();
   423 			fmd_res = ot->CheckFastMutexDefer();
   698 			fmd_done = TRUE;
   424 			fmd_done = TRUE;
   699 			if (fmd_res)
   425 			if (fmd_res)
   700 				{
   426 				{
   701 				if (ot->iTime == 0)
   427 				if (ot->iTime == 0)
   720 		__KTRACE_OPT(KSCHED2,DEBUGPRINT("Rschd<-%T WS: %02x %02x (%08x) P:%02x S:%1x", ot, 
   446 		__KTRACE_OPT(KSCHED2,DEBUGPRINT("Rschd<-%T WS: %02x %02x (%08x) P:%02x S:%1x", ot, 
   721 							ot->iWaitState.iWtC.iWtStFlags, ot->iWaitState.iWtC.iWtObjType, ot->iWaitState.iWtC.iWtObj, ot->iPauseCount, ot->iSuspended));
   447 							ot->iWaitState.iWtC.iWtStFlags, ot->iWaitState.iWtC.iWtObjType, ot->iWaitState.iWtC.iWtObj, ot->iPauseCount, ot->iSuspended));
   722 		TInt wtst = ot->iWaitState.DoWait();
   448 		TInt wtst = ot->iWaitState.DoWait();
   723 		if (wtst>=0 && wtst!=NThread::EWaitFastMutex)
   449 		if (wtst>=0 && wtst!=NThread::EWaitFastMutex)
   724 			ot->iTime = ot->iTimeslice;
   450 			ot->iTime = ot->iTimeslice;
   725 		if (wtst==KErrDied || ot->iSuspended || (!(ot->iWaitState.iWtC.iWtStFlags & NThreadWaitState::EWtStObstructed) && wtst>=0) )
       
   726 			{
       
   727 			ot->iActiveState = 0;
       
   728 			ot->iParent->iTransientCpu = 0;
       
   729 			if (ot->iParent != ot)
       
   730 				--ot->iParent->iActiveState;
       
   731 			}
       
   732 		ot->UnReadyT();
   451 		ot->UnReadyT();
   733 		if (ot->iNewParent)
   452 		if (ot->iNewParent)
   734 			{
   453 			{
   735 			ot->iParent = ot->iNewParent, ++((NThreadGroup*)ot->iParent)->iThreadCount;
   454 			ot->iParent = ot->iNewParent, ++((NThreadGroup*)ot->iParent)->iThreadCount;
   736 			wmb();	// must make sure iParent is updated before iNewParent is cleared
   455 			wmb();	// must make sure iParent is updated before iNewParent is cleared
   737 			ot->iNewParent = 0;
   456 			ot->iNewParent = 0;
   738 			if (ot->iActiveState && ++ot->iParent->iActiveState==1)
       
   739 				ot->iParent->iLastActivationTime.i64 = NKern::Timestamp();
       
   740 			}
   457 			}
   741 		ot->iCpuChange = FALSE;
   458 		ot->iCpuChange = FALSE;
   742 		}
   459 		}
   743 	else if (ot->iNewParent)
   460 	else if (ot->iNewParent)
   744 		{
   461 		{
   748 		ot->iParent = ot->iNewParent;
   465 		ot->iParent = ot->iNewParent;
   749 		ot->iCpuChange = FALSE;
   466 		ot->iCpuChange = FALSE;
   750 		++((NThreadGroup*)ot->iParent)->iThreadCount;
   467 		++((NThreadGroup*)ot->iParent)->iThreadCount;
   751 		wmb();	// must make sure iParent is updated before iNewParent is cleared
   468 		wmb();	// must make sure iParent is updated before iNewParent is cleared
   752 		ot->iNewParent = 0;
   469 		ot->iNewParent = 0;
   753 		TUint64 now = NKern::Timestamp();
   470 		}
   754 		if (!ot->iParent->iCurrent)
   471 	else if (ot->iParent->iCpuChange && !ot->iParent->iFreezeCpu)
   755 			ot->iParent->iLastStartTime.i64 = now;
   472 		{
   756 		if (++ot->iParent->iActiveState==1)
   473 		if (!CheckCpuAgainstAffinity(iCpuNum, ot->iParent->iCpuAffinity))
   757 			ot->iParent->iLastActivationTime.i64 = now;
   474 			{
   758 		}
   475 			if (ot->iParent==ot)
   759 	else if (ot->iParent->iCpuChange)
   476 				{
   760 		{
   477 				if (!fmd_done)
   761 		if (ot->iForcedCpu)
   478 					fmd_res = ot->CheckFastMutexDefer(), fmd_done = TRUE;
   762 			migrate = TRUE;
   479 				if (!fmd_res)
   763 		else if (!ot->iParent->iFreezeCpu)
       
   764 			{
       
   765 			if (ot->iParent->ShouldMigrate(iCpuNum))
       
   766 				{
       
   767 				if (ot->iParent==ot)
       
   768 					{
   480 					{
   769 					if (!fmd_done)
   481 					__KTRACE_OPT(KSCHED2,DEBUGPRINT("Rschd<-%T A:%08x",ot,ot->iParent->iCpuAffinity));
   770 						fmd_res = ot->CheckFastMutexDefer(), fmd_done = TRUE;
   482 					ot->UnReadyT();
   771 					if (!fmd_res)
   483 					migrate = TRUE;
   772 						migrate = TRUE;
   484 					ot->iCpuChange = FALSE;
   773 					}
   485 					}
   774 				else
       
   775 					gmigrate = TRUE;
       
   776 				}
   486 				}
   777 			else
   487 			else
   778 				{
   488 				{
       
   489 				__KTRACE_OPT(KSCHED2,DEBUGPRINT("Rschd<-%T GA:%08x",ot,ot->iParent->iCpuAffinity));
       
   490 				Remove(ot->iParent);
       
   491 				ot->iParent->iReady = 0;
       
   492 				gmigrate = TRUE;
   779 				ot->iCpuChange = FALSE;
   493 				ot->iCpuChange = FALSE;
   780 				ot->iParent->iCpuChange = FALSE;
   494 				ot->iParent->iCpuChange = FALSE;
   781 				}
   495 				}
   782 			}
   496 			}
   783 		if (migrate)
   497 		else
   784 			{
   498 			{
   785 			__KTRACE_OPT(KSCHED2,DEBUGPRINT("Rschd<-%T A:%08x",ot,ot->iParent->iCpuAffinity));
       
   786 			ot->UnReadyT();
       
   787 			ot->iCpuChange = FALSE;
       
   788 			}
       
   789 		else if (gmigrate)
       
   790 			{
       
   791 			__KTRACE_OPT(KSCHED2,DEBUGPRINT("Rschd<-%T GA:%08x",ot,ot->iParent->iCpuAffinity));
       
   792 			SSRemoveEntry(ot->iParent);
       
   793 			ot->iParent->iReady = 0;
       
   794 			ot->iCpuChange = FALSE;
   499 			ot->iCpuChange = FALSE;
   795 			ot->iParent->iCpuChange = FALSE;
   500 			ot->iParent->iCpuChange = FALSE;
   796 			}
   501 			}
   797 		}
   502 		}
   798 no_ot:
   503 no_ot:
   799 	NSchedulable* g = (NSchedulable*)iSSList.First();
   504 	NSchedulable* g = (NSchedulable*)First();
   800 	TBool rrcg = FALSE;
   505 	TBool rrcg = FALSE;
   801 	if (g && g->IsGroup())
   506 	if (g && g->IsGroup())
   802 		{
   507 		{
   803 		t = (NThread*)((NThreadGroup*)g)->iNThreadList.First();
   508 		t = (NThread*)((NThreadGroup*)g)->iNThreadList.First();
   804 		if (g->iNext!=g)
   509 		if (g->iNext!=g)
   808 		t = (NThread*)g;
   513 		t = (NThread*)g;
   809 	TBool rrct = (t && t->iNext!=t);
   514 	TBool rrct = (t && t->iNext!=t);
   810 	if (t && t->iTime==0 && (rrcg || rrct))
   515 	if (t && t->iTime==0 && (rrcg || rrct))
   811 		{
   516 		{
   812 		// candidate thread's timeslice has expired and there is another at the same priority
   517 		// candidate thread's timeslice has expired and there is another at the same priority
   813 
       
   814 		iTimeSliceExpireCounter++; // update metric
       
   815 		
       
   816 		if (t==ot)
   518 		if (t==ot)
   817 			{
   519 			{
   818 			if (ot->iParent!=ot)
   520 			if (ot->iParent!=ot)
   819 				{
   521 				{
   820 				((NThreadGroup*)ot->iParent)->iNThreadList.iQueue[ot->iPriority] = ot->iNext;
   522 				((NThreadGroup*)ot->iParent)->iNThreadList.iQueue[ot->iPriority] = ot->iNext;
   821 				iSSList.iQueue[ot->iParent->iPriority] = ot->iParent->iNext;
   523 				iQueue[ot->iParent->iPriority] = ot->iParent->iNext;
   822 				}
   524 				}
   823 			else
   525 			else
   824 				iSSList.iQueue[ot->iPriority] = ot->iNext;
   526 				iQueue[ot->iPriority] = ot->iNext;
   825 			ot->iTime = ot->iTimeslice;
   527 			ot->iTime = ot->iTimeslice;
   826 			NSchedulable* g2 = (NSchedulable*)iSSList.First();
   528 			NSchedulable* g2 = (NSchedulable*)First();
   827 			if (g2->IsGroup())
   529 			if (g2->IsGroup())
   828 				t = (NThread*)((NThreadGroup*)g2)->iNThreadList.First();
   530 				t = (NThread*)((NThreadGroup*)g2)->iNThreadList.First();
   829 			else
   531 			else
   830 				t = (NThread*)g2;
   532 				t = (NThread*)g2;
   831 			if (t->iTime==0)
   533 			if (t->iTime==0)
   836 				}
   538 				}
   837 			else
   539 			else
   838 				{
   540 				{
   839 				__KTRACE_OPT(KSCHED2,DEBUGPRINT("Rschd<-%T RR",ot));
   541 				__KTRACE_OPT(KSCHED2,DEBUGPRINT("Rschd<-%T RR",ot));
   840 				}
   542 				}
       
   543 /*			if (ot->iCpuAffinity & NTHREADBASE_CPU_AFFINITY_MASK)
       
   544 				{
       
   545 				ot->UnReadyT();
       
   546 				migrate = TRUE;
       
   547 				}
       
   548 			else
       
   549 				ot->iTime = ot->iTimeslice;
       
   550 */
   841 			}
   551 			}
   842 		else	// loop again since we need to lock t before round robining it
   552 		else	// loop again since we need to lock t before round robining it
   843 			{
   553 			{
   844 			__KTRACE_OPT(KSCHED2,DEBUGPRINT("Rschd<-%T LL",ot));
   554 			__KTRACE_OPT(KSCHED2,DEBUGPRINT("Rschd<-%T LL",ot));
   845 			iRescheduleNeededFlag = TRUE;
   555 			iRescheduleNeededFlag = TRUE;
   868 		ot->ReadyT(NThreadBase::ENewTimeslice);	// new timeslice if it's queued behind another thread at same priority
   578 		ot->ReadyT(NThreadBase::ENewTimeslice);	// new timeslice if it's queued behind another thread at same priority
   869 	if (gmigrate)
   579 	if (gmigrate)
   870 		ot->iParent->ReadyT(0);	// new timeslice if it's queued behind another thread at same priority
   580 		ot->iParent->ReadyT(0);	// new timeslice if it's queued behind another thread at same priority
   871 	if (ot)
   581 	if (ot)
   872 		{
   582 		{
   873 		TBool dead = ot->iWaitState.ThreadIsDead();
       
   874 		if (dead && ot->iLbLink.iNext)
       
   875 			ot->LbUnlink();
       
   876 		ot->RelSLock();
   583 		ot->RelSLock();
   877 
   584 
   878 		// DFC to signal thread is now dead
   585 		// DFC to signal thread is now dead
   879 		if (dead && ot->iWaitState.iWtC.iKillDfc && __e32_atomic_tau_ord8(&ot->iACount, 1, 0xff, 0)==1)
   586 		if (ot->iWaitState.ThreadIsDead() && ot->iWaitState.iWtC.iKillDfc)
   880 			{
       
   881 			ot->RemoveFromEnumerateList();
       
   882 			ot->iWaitState.iWtC.iKillDfc->DoEnque();
   587 			ot->iWaitState.iWtC.iKillDfc->DoEnque();
   883 			}
       
   884 		}
       
   885 	if (iCCSyncPending)
       
   886 		{
       
   887 		iCCSyncPending = 0;
       
   888 		iReschedIPIs |= 0x80000000u;		// update iCCSyncCpus when kernel is finally unlocked
       
   889 		}
   588 		}
   890 	__KTRACE_OPT(KSCHED,DEBUGPRINT("Rschd->%T",t));
   589 	__KTRACE_OPT(KSCHED,DEBUGPRINT("Rschd->%T",t));
   891 	__NK_ASSERT_ALWAYS(!t || t->iParent);	// must be a thread not a group
   590 	__NK_ASSERT_ALWAYS(!t || t->iParent);	// must be a thread not a group
   892 	return t;	// could return NULL
   591 	return t;	// could return NULL
   893 	}
   592 	}
   894 
   593 
   895 void NSchedulable::LbUnlink()
       
   896 	{
       
   897 	if (iLbState & ELbState_PerCpu)
       
   898 		{
       
   899 		TSubScheduler* ss = &TheSubSchedulers[iLbState & ELbState_CpuMask];
       
   900 		ss->iReadyListLock.LockOnly();
       
   901 		if (iLbState == ss->iLbCounter)
       
   902 			{
       
   903 			iLbLink.Deque();
       
   904 			iLbLink.iNext = 0;
       
   905 			iLbState = ELbState_Inactive;
       
   906 			}
       
   907 		ss->iReadyListLock.UnlockOnly();
       
   908 		}
       
   909 	else if ((iLbState & ELbState_CpuMask) == ELbState_Global)
       
   910 		{
       
   911 		TScheduler& s = TheScheduler;
       
   912 		s.iBalanceListLock.LockOnly();
       
   913 		if (iLbState == s.iLbCounter)
       
   914 			{
       
   915 			iLbLink.Deque();
       
   916 			iLbLink.iNext = 0;
       
   917 			iLbState = ELbState_Inactive;
       
   918 			}
       
   919 		s.iBalanceListLock.UnlockOnly();
       
   920 		}
       
   921 	if (iLbState != ELbState_Inactive)
       
   922 		{
       
   923 		// load balancer is running so we can't dequeue the thread
       
   924 		iLbState |= ELbState_ExtraRef;				// indicates extra ref has been taken
       
   925 		__e32_atomic_tau_ord8(&iACount, 1, 1, 0);	// extra ref will be removed by load balancer
       
   926 		}
       
   927 	}
       
   928 
       
   929 TBool NSchedulable::TakeRef()
       
   930 	{
       
   931 	return __e32_atomic_tau_ord8(&iACount, 1, 1, 0);
       
   932 	}
       
   933 
       
   934 TBool NSchedulable::DropRef()
       
   935 	{
       
   936 	if (__e32_atomic_tau_ord8(&iACount, 1, 0xff, 0)!=1)
       
   937 		return EFalse;
       
   938 	TDfc* d = 0;
       
   939 	AcqSLock();
       
   940 	if (iParent)
       
   941 		{
       
   942 		// it's a thread
       
   943 		NThreadBase* t = (NThreadBase*)this;
       
   944 		if (t->iWaitState.ThreadIsDead() && t->iWaitState.iWtC.iKillDfc)
       
   945 			d = t->iWaitState.iWtC.iKillDfc;
       
   946 		RelSLock();
       
   947 		t->RemoveFromEnumerateList();
       
   948 		}
       
   949 	else
       
   950 		{
       
   951 		NThreadGroup* g = (NThreadGroup*)this;
       
   952 		d = g->iDestructionDfc;
       
   953 		RelSLock();
       
   954 		g->RemoveFromEnumerateList();
       
   955 		}
       
   956 	if (d)
       
   957 		d->DoEnque();
       
   958 	return ETrue;
       
   959 	}
       
   960 
       
   961 void NSchedulable::RemoveFromEnumerateList()
       
   962 	{
       
   963 	TScheduler& s = TheScheduler;
       
   964 	s.iEnumerateLock.LockOnly();
       
   965 	if (iEnumerateLink.Next())
       
   966 		{
       
   967 		iEnumerateLink.Deque();
       
   968 		iEnumerateLink.SetNext(0);
       
   969 		}
       
   970 	s.iEnumerateLock.UnlockOnly();
       
   971 	}
       
   972 
   594 
   973 void NThreadBase::UnReadyT()
   595 void NThreadBase::UnReadyT()
   974 	{
   596 	{
   975 	if (iParent!=this)
   597 	if (iParent!=this)
   976 		{
   598 		{
   980 		if (g.iReady)
   602 		if (g.iReady)
   981 			{
   603 			{
   982 			TSubScheduler& ss = TheSubSchedulers[g.iReady & EReadyCpuMask];
   604 			TSubScheduler& ss = TheSubSchedulers[g.iReady & EReadyCpuMask];
   983 			if (l.IsEmpty())
   605 			if (l.IsEmpty())
   984 				{
   606 				{
   985 				ss.SSRemoveEntry(&g);
   607 //				__KTRACE_OPT(KNKERN,DEBUGPRINT("%T UnReadyT (G=%G-)",this,&g));
       
   608 				ss.Remove(&g);
   986 				g.iReady = 0;
   609 				g.iReady = 0;
   987 				g.iPriority = 0;
   610 				g.iPriority = 0;
   988 				}
   611 				}
   989 			else
   612 			else
   990 				{
   613 				{
   991 				TInt np = l.HighestPriority();
   614 //				__KTRACE_OPT(KNKERN,DEBUGPRINT("%T UnReadyT (G=%G)",this,&g));
   992 				ss.SSChgEntryP(&g, np);
   615 				ss.ChangePriority(&g, l.HighestPriority());
   993 				}
   616 				}
   994 			}
   617 			}
   995 		}
   618 		}
   996 	else
   619 	else
   997 		{
   620 		{
   998 		TSubScheduler& ss = TheSubSchedulers[iReady & EReadyCpuMask];
   621 //		__KTRACE_OPT(KNKERN,DEBUGPRINT("%T UnReadyT",this));
   999 		ss.SSRemoveEntry(this);
   622 		TheSubSchedulers[iReady & EReadyCpuMask].Remove(this);
  1000 		}
   623 		}
  1001 	iReady = 0;
   624 	iReady = 0;
  1002 
       
  1003 	SubScheduler().iMadeUnReadyCounter++;
       
  1004 	}
   625 	}
  1005 
   626 
  1006 
   627 
  1007 void NThreadBase::ChangeReadyThreadPriority()
   628 void NThreadBase::ChangeReadyThreadPriority()
  1008 	{
   629 	{
  1023 		tg->iNThreadList.ChangePriority(this, newp);
   644 		tg->iNThreadList.ChangePriority(this, newp);
  1024 		if (ss)
   645 		if (ss)
  1025 			{
   646 			{
  1026 			TInt ngp = tg->iNThreadList.HighestPriority();
   647 			TInt ngp = tg->iNThreadList.HighestPriority();
  1027 			if (ngp!=tg->iPriority)
   648 			if (ngp!=tg->iPriority)
  1028 				ss->SSChgEntryP(tg, ngp);
   649 				ss->ChangePriority(tg, ngp);
  1029 			}
   650 			}
  1030 		}
   651 		}
  1031 	else
   652 	else
  1032 		ss->SSChgEntryP(this, newp);
   653 		ss->ChangePriority(this, newp);
  1033 	if (iCurrent)	// can't be current if parent not ready
   654 	if (iCurrent)	// can't be current if parent not ready
  1034 		{
   655 		{
  1035 		TInt nhp = ss->HighestPriority();
   656 		TInt nhp = ss->HighestPriority();
  1036 		if (newp<oldp && (newp<nhp || (newp==nhp && iTime==0)))
   657 		if (newp<oldp && (newp<nhp || (newp==nhp && iTime==0)))
  1037 			resched = TRUE;
   658 			resched = TRUE;
  1073  */
   694  */
  1074 EXPORT_C void NThreadBase::SetPriority(TInt newp)
   695 EXPORT_C void NThreadBase::SetPriority(TInt newp)
  1075 	{
   696 	{
  1076 	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_IDFC|MASK_NOT_ISR,"NThreadBase::SetPriority");
   697 	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_IDFC|MASK_NOT_ISR,"NThreadBase::SetPriority");
  1077 	AcqSLock();
   698 	AcqSLock();
  1078 	__KTRACE_OPT(KNKERN,DEBUGPRINT("%T nSetPri(%d) PBNM[%d,%d,%d,%d]",this,newp,iPriority,iBasePri,iNominalPri,iMutexPri));
   699 	__KTRACE_OPT(KNKERN,DEBUGPRINT("%T nSetPri %d(%d)->%d(%d)",this,iPriority,iBasePri,newp,iMutexPri));
  1079 	iBasePri = TUint8(newp);
   700 	iBasePri = TUint8(newp);
  1080 	if (iMutexPri > newp)
   701 	if (iMutexPri > iBasePri)
  1081 		newp = iMutexPri;
   702 		newp = iMutexPri;
  1082 	TInt oldp = iPriority;
   703 	TInt oldp = iPriority;
  1083 	if (newp == oldp)
   704 	if (newp == oldp)
  1084 		{
   705 		{
  1085 		RelSLock();
   706 		RelSLock();
  1135 		if (t)
   756 		if (t)
  1136 			t->SetMutexPriority(wfm);
   757 			t->SetMutexPriority(wfm);
  1137 		wfm->iMutexLock.UnlockOnly();
   758 		wfm->iMutexLock.UnlockOnly();
  1138 		}
   759 		}
  1139 	}
   760 	}
  1140 
       
  1141 void NThreadBase::SetNominalPriority(TInt newp)
       
  1142 	{
       
  1143 	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_IDFC|MASK_NOT_ISR,"NThreadBase::SetNominalPriority");
       
  1144 	AcqSLock();
       
  1145 	__KTRACE_OPT(KNKERN,DEBUGPRINT("%T nSetNPr(%d) PBNM[%d,%d,%d,%d]",this,newp,iPriority,iBasePri,iNominalPri,iMutexPri));
       
  1146 	iNominalPri = TUint8(newp);
       
  1147 	NominalPriorityChanged();
       
  1148 	RelSLock();
       
  1149 	}
       
  1150 
       
  1151 
   761 
  1152 
   762 
  1153 /** Set the inherited priority of a nanokernel thread.
   763 /** Set the inherited priority of a nanokernel thread.
  1154 
   764 
  1155 	@pre	Kernel must be locked.
   765 	@pre	Kernel must be locked.
  1203 		newp = hp;
   813 		newp = hp;
  1204 		g = tg;
   814 		g = tg;
  1205 		}
   815 		}
  1206 	if (newp <= ss->HighestPriority())
   816 	if (newp <= ss->HighestPriority())
  1207 		RescheduleNeeded();
   817 		RescheduleNeeded();
  1208 	ss->SSChgEntryP(g, newp);
   818 	ss->ChangePriority(g, newp);
  1209 out:
   819 out:
  1210 	ss->iReadyListLock.UnlockOnly();
   820 	ss->iReadyListLock.UnlockOnly();
  1211 	}
   821 	}
  1212 
   822 
  1213 
   823 
  1214 /******************************************************************************
       
  1215  * Pull threads on idle
       
  1216  ******************************************************************************/
       
  1217 
       
  1218 const TInt KMaxTries = 4;
       
  1219 
       
  1220 struct SIdlePullThread
       
  1221 	{
       
  1222 	SIdlePullThread();
       
  1223 	void Finish(TBool aDone);
       
  1224 
       
  1225 	NSchedulable*	iS;
       
  1226 	TInt			iPri;
       
  1227 	NSchedulable*	iOld[KMaxCpus];
       
  1228 	};
       
  1229 
       
  1230 SIdlePullThread::SIdlePullThread()
       
  1231 	{
       
  1232 	iS = 0;
       
  1233 	iPri = 0;
       
  1234 	TInt i;
       
  1235 	for (i=0; i<KMaxCpus; ++i)
       
  1236 		iOld[i] = 0;
       
  1237 	}
       
  1238 
       
  1239 void SIdlePullThread::Finish(TBool aComplete)
       
  1240 	{
       
  1241 	if (aComplete && iS)
       
  1242 		{
       
  1243 		iS->AcqSLock();
       
  1244 		iS->SetCpuAffinityT(NKern::CurrentCpu() | KCpuAffinityTransient);
       
  1245 		iS->RelSLock();
       
  1246 		}
       
  1247 	if (iS)
       
  1248 		iS->DropRef();
       
  1249 	TInt i;
       
  1250 	for (i=0; i<KMaxCpus; ++i)
       
  1251 		if (iOld[i])
       
  1252 			iOld[i]->DropRef();
       
  1253 	}
       
  1254 
       
  1255 void TSubScheduler::IdlePullSearch(SIdlePullThread& a, TSubScheduler* aDest)
       
  1256 	{
       
  1257 	NSchedulable* orig = a.iS;
       
  1258 	TInt dcpu = aDest->iCpuNum;
       
  1259 	volatile TUint32& flags = *(volatile TUint32*)&aDest->iRescheduleNeededFlag;
       
  1260 	iReadyListLock.LockOnly();
       
  1261 	if (iRdyThreadCount>1)	// if there's only 1 it'll be running so leave it alone
       
  1262 		{
       
  1263 		TUint64 pres = iSSList.iPresent64;
       
  1264 		TInt tries = iRdyThreadCount;
       
  1265 		if (tries > KMaxTries)
       
  1266 			tries = KMaxTries;
       
  1267 		NSchedulable* q = 0;
       
  1268 		NSchedulable* p = 0;
       
  1269 		TInt pri = -1;
       
  1270 		for (; tries>0 && !flags; --tries)
       
  1271 			{
       
  1272 			if (p)
       
  1273 				{
       
  1274 				p = (NSchedulable*)(p->iNext);
       
  1275 				if (p == q)
       
  1276 					pri = -1;
       
  1277 				}
       
  1278 			if (pri<0)
       
  1279 				{
       
  1280 				pri = __e32_find_ms1_64(pres);
       
  1281 				if (pri < 0)
       
  1282 					break;
       
  1283 				pres &= ~(TUint64(1)<<pri);
       
  1284 				q = (NSchedulable*)iSSList.iQueue[pri];
       
  1285 				p = q;
       
  1286 				}
       
  1287 			NThreadBase* t = 0;
       
  1288 			if (p->iParent)
       
  1289 				t = (NThreadBase*)p;
       
  1290 			if (p->iCurrent)
       
  1291 				continue;	// running on other CPU so leave it alone
       
  1292 			if (p->iFreezeCpu)
       
  1293 				continue;	// can't run on this CPU - frozen to current CPU
       
  1294 			if (t && t->iCoreCycling)
       
  1295 				continue;	// currently cycling through cores so leave alone
       
  1296 			if (t && t->iHeldFastMutex && t->iLinkedObjType==NThreadBase::EWaitNone)
       
  1297 				continue;	// can't run on this CPU - fast mutex held
       
  1298 			if (p->iCpuChange)
       
  1299 				continue;	// already being migrated so leave it alone
       
  1300 			if (!CheckCpuAgainstAffinity(dcpu, p->iCpuAffinity))
       
  1301 				continue;	// can't run on this CPU - hard affinity
       
  1302 			if (p->iPreferredCpu & NSchedulable::EReadyCpuSticky)
       
  1303 				continue;	// don't want to move it on idle, only on periodic balance
       
  1304 			if (pri > a.iPri)
       
  1305 				{
       
  1306 				if (p->TakeRef())
       
  1307 					{
       
  1308 					a.iS = p;
       
  1309 					a.iPri = pri;
       
  1310 					break;
       
  1311 					}
       
  1312 				}
       
  1313 			}
       
  1314 		}
       
  1315 	iReadyListLock.UnlockOnly();
       
  1316 	if (orig && orig!=a.iS)
       
  1317 		a.iOld[iCpuNum] = orig;
       
  1318 	}
       
  1319 
       
  1320 void NKern::Idle()
       
  1321 	{
       
  1322 	TScheduler& s = TheScheduler;
       
  1323 	TSubScheduler& ss0 = SubScheduler();	// OK since idle thread locked to CPU
       
  1324 	ss0.iCurrentThread->iSavedSP = 0;		// will become nonzero if a reschedule occurs
       
  1325 	TUint32 m0 = ss0.iCpuMask;
       
  1326 	volatile TUint32& flags = *(volatile TUint32*)&ss0.iRescheduleNeededFlag;
       
  1327 	if (s.iThreadAcceptCpus & m0)			// if this CPU is shutting down, don't try to pull threads
       
  1328 		{
       
  1329 		SIdlePullThread ipt;
       
  1330 		NKern::Lock();
       
  1331 		s.iIdleBalanceLock.LockOnly();
       
  1332 		TUint32 active = s.iThreadAcceptCpus;
       
  1333 		TUint32 srchm = active &~ m0;
       
  1334 		if (srchm && srchm!=active)
       
  1335 			{
       
  1336 			TUint32 randomizer = *(volatile TUint32*)&s.iIdleBalanceLock;
       
  1337 			TInt nact = __e32_bit_count_32(srchm);
       
  1338 			while (srchm)
       
  1339 				{
       
  1340 				TUint32 srchm2 = srchm;
       
  1341 				if (nact > 1)
       
  1342 					{
       
  1343 					randomizer = 69069*randomizer+41;
       
  1344 					TUint32 lose = randomizer % TUint32(nact);
       
  1345 					for (; lose; --lose)
       
  1346 						srchm2 = srchm2 & (srchm2-1);
       
  1347 					}
       
  1348 				TInt cpu = __e32_find_ls1_32(srchm2);
       
  1349 				TSubScheduler* ss = &TheSubSchedulers[cpu];
       
  1350 				ss->IdlePullSearch(ipt, &ss0);
       
  1351 				if (flags)
       
  1352 					break;
       
  1353 				srchm &= ~(1u<<cpu);
       
  1354 				--nact;
       
  1355 				}
       
  1356 			}
       
  1357 		s.iIdleBalanceLock.UnlockOnly();
       
  1358 		ipt.Finish(!srchm);
       
  1359 		NKern::Unlock();
       
  1360 		}
       
  1361 	DoIdle();
       
  1362 	}
       
  1363