kernel/eka/nkernsmp/nkern.cpp
branchRCL_3
changeset 256 c1f20ce4abcf
parent 0 a41df078684a
child 257 3e88ff8f41d5
equal deleted inserted replaced
249:a179b74831c9 256:c1f20ce4abcf
    96 			// this forces priority changes to wait for the mutex lock
    96 			// this forces priority changes to wait for the mutex lock
    97 			pC->iLinkedObjType = NThreadBase::EWaitFastMutex;
    97 			pC->iLinkedObjType = NThreadBase::EWaitFastMutex;
    98 			pC->iLinkedObj = this;
    98 			pC->iLinkedObj = this;
    99 			pC->iWaitState.SetUpWait(NThreadBase::EWaitFastMutex, NThreadWaitState::EWtStObstructed, this);
    99 			pC->iWaitState.SetUpWait(NThreadBase::EWaitFastMutex, NThreadWaitState::EWtStObstructed, this);
   100 			pC->iWaitLink.iPriority = pC->iPriority;
   100 			pC->iWaitLink.iPriority = pC->iPriority;
   101 			iWaitQ.Add(&pC->iWaitLink);
   101 			if (waited)
       
   102 				iWaitQ.AddHead(&pC->iWaitLink);	// we were next at this priority
       
   103 			else
       
   104 				iWaitQ.Add(&pC->iWaitLink);
   102 			pC->RelSLock();
   105 			pC->RelSLock();
   103 			if (pH)
   106 			if (pH)
   104 				pH->SetMutexPriority(this);
   107 				pH->SetMutexPriority(this);
   105 do_pause:
   108 do_pause:
   106 			iMutexLock.UnlockOnly();
   109 			iMutexLock.UnlockOnly();
   878 	else
   881 	else
   879 		{
   882 		{
   880 		iCsFunction = ECSDivertPending;
   883 		iCsFunction = ECSDivertPending;
   881 		iSuspendCount = 0;
   884 		iSuspendCount = 0;
   882 		iSuspended = 0;
   885 		iSuspended = 0;
       
   886 
       
   887 		// If thread is killed before first resumption, set iACount=1
       
   888 		__e32_atomic_tau_ord8(&iACount, 1, 0, 1);
   883 		if (aS)
   889 		if (aS)
   884 			aS->iReadyListLock.UnlockOnly();
   890 			aS->iReadyListLock.UnlockOnly();
   885 		DoReleaseT(KErrDied,0);
   891 		DoReleaseT(KErrDied,0);
   886 		if (!iReady && !iPauseCount)
   892 		if (!iReady && !iPauseCount)
   887 			ReadyT(0);
   893 			ReadyT(0);
   892 // If aCount>=0 suspend the thread aCount times
   898 // If aCount>=0 suspend the thread aCount times
   893 // If aCount<0 kill the thread
   899 // If aCount<0 kill the thread
   894 TBool NThreadBase::SuspendOrKill(TInt aCount)
   900 TBool NThreadBase::SuspendOrKill(TInt aCount)
   895 	{
   901 	{
   896 	__KTRACE_OPT(KNKERN,DEBUGPRINT("%T nSuspendOrKill %d", this, aCount));
   902 	__KTRACE_OPT(KNKERN,DEBUGPRINT("%T nSuspendOrKill %d", this, aCount));
   897 	if (aCount==0)
   903 	if (aCount==0 || i_NThread_Initial)
   898 		return FALSE;
   904 		return FALSE;
   899 	TBool result = FALSE;
   905 	TBool result = FALSE;
   900 	TBool concurrent = FALSE;
   906 	TBool concurrent = FALSE;
   901 	TSubScheduler* ss = 0;
   907 	TSubScheduler* ss = 0;
   902 	AcqSLock();
   908 	AcqSLock();
  1045 			--iSuspendCount;
  1051 			--iSuspendCount;
  1046 		if (!iSuspendCount)
  1052 		if (!iSuspendCount)
  1047 			{
  1053 			{
  1048 			result = TRUE;
  1054 			result = TRUE;
  1049 			iSuspended = 0;
  1055 			iSuspended = 0;
       
  1056 
       
  1057 			// On first resumption set iACount=1
       
  1058 			// From then on the thread must be killed before being deleted
       
  1059 			__e32_atomic_tau_ord8(&iACount, 1, 0, 1);
  1050 			if (!iPauseCount && !iReady && !iWaitState.iWtC.iWtStFlags)
  1060 			if (!iPauseCount && !iReady && !iWaitState.iWtC.iWtStFlags)
  1051 				ReadyT(0);
  1061 				ReadyT(0);
  1052 			}
  1062 			}
  1053 		}
  1063 		}
  1054 
  1064 
  1210 	TDfc* pD = NULL;
  1220 	TDfc* pD = NULL;
  1211 	NThreadExitHandler xh = iHandlers->iExitHandler;
  1221 	NThreadExitHandler xh = iHandlers->iExitHandler;
  1212 	if (xh)
  1222 	if (xh)
  1213 		pD = (*xh)((NThread*)this);		// call exit handler
  1223 		pD = (*xh)((NThread*)this);		// call exit handler
  1214 
  1224 
       
  1225 	// if CPU freeze still active, remove it
       
  1226 	NKern::EndFreezeCpu(0);
       
  1227 
  1215 	// detach any tied events
  1228 	// detach any tied events
  1216 	DetachTiedEvents();
  1229 	DetachTiedEvents();
  1217 
  1230 
  1218 	NKern::LeaveGroup();	// detach from group if exit handler didn't do it
  1231 	NKern::LeaveGroup();	// detach from group if exit handler didn't do it
  1219 
  1232 
  1281 
  1294 
  1282 	@param	The number of the CPU to which this thread should be locked, or
  1295 	@param	The number of the CPU to which this thread should be locked, or
  1283 			KCpuAny if it should be able to run on any CPU.
  1296 			KCpuAny if it should be able to run on any CPU.
  1284 	@return The previous affinity mask.
  1297 	@return The previous affinity mask.
  1285 */
  1298 */
  1286 TUint32 NThreadBase::SetCpuAffinity(TUint32 aAffinity)
  1299 TUint32 NSchedulable::SetCpuAffinityT(TUint32 aAffinity)
  1287 	{
  1300 	{
  1288 	// check aAffinity is valid
  1301 	// check aAffinity is valid
  1289 	AcqSLock();
  1302 	NThreadBase* t = 0;
  1290 	TUint32 old_aff = iParent->iCpuAffinity;
  1303 	NThreadGroup* g = 0;
  1291 	TBool migrate = FALSE;
  1304 	NSchedulable* p = iParent;
       
  1305 	if (!p)
       
  1306 		g = (NThreadGroup*)this, p=g;
       
  1307 	else
       
  1308 		t = (NThreadBase*)this;
       
  1309 	if (iParent && iParent!=this)
       
  1310 		g = (NThreadGroup*)iParent;
       
  1311 	TUint32 old_aff = p->iCpuAffinity;
  1292 	TBool make_ready = FALSE;
  1312 	TBool make_ready = FALSE;
  1293 	TSubScheduler* ss0 = &SubScheduler();
  1313 	TSubScheduler* ss0 = &SubScheduler();
  1294 	TSubScheduler* ss = 0;
  1314 	TSubScheduler* ss = 0;
  1295 	__KTRACE_OPT(KNKERN,DEBUGPRINT("%T nSetCpu %08x->%08x, F:%d R:%02x PR:%02x",this,iParent->iCpuAffinity,aAffinity,iParent->iFreezeCpu,iReady,iParent->iReady));
  1315 #ifdef KNKERN
  1296 	if (i_NThread_Initial)
  1316 	if (iParent)
       
  1317 		{
       
  1318 		__KTRACE_OPT(KNKERN,DEBUGPRINT("%T nSetCpu %08x->%08x, F:%d R:%02x PR:%02x",this,iParent->iCpuAffinity,aAffinity,iParent->iFreezeCpu,iReady,iParent->iReady));
       
  1319 		}
       
  1320 	else
       
  1321 		{
       
  1322 		__KTRACE_OPT(KNKERN,DEBUGPRINT("%G nSetCpu %08x->%08x, F:%d R:%02x",this,iCpuAffinity,aAffinity,iFreezeCpu,iReady));
       
  1323 		}
       
  1324 #endif
       
  1325 	if (t && t->i_NThread_Initial)
  1297 		goto done;	// can't change affinity of initial thread
  1326 		goto done;	// can't change affinity of initial thread
  1298 	iParent->iCpuAffinity = aAffinity;		// set new affinity, might not take effect yet
  1327 	if (aAffinity == NTHREADBASE_CPU_AFFINITY_MASK)
  1299 	if (!iParent->iReady)
  1328 		{
       
  1329 		p->iTransientCpu = 0;
       
  1330 		}
       
  1331 	else if ( (aAffinity & (KCpuAffinityPref|NTHREADBASE_CPU_AFFINITY_MASK)) == KCpuAffinityPref)
       
  1332 		{
       
  1333 		p->iTransientCpu = 0;
       
  1334 		p->iPreferredCpu = TUint8((aAffinity & (EReadyCpuMask|EReadyCpuSticky)) | EReadyOffset);
       
  1335 		}
       
  1336 	else if ( (aAffinity & (KCpuAffinityTransient|KCpuAffinityPref|NTHREADBASE_CPU_AFFINITY_MASK)) == KCpuAffinityTransient)
       
  1337 		{
       
  1338 		p->iTransientCpu = TUint8(aAffinity & EReadyCpuMask) | EReadyOffset;
       
  1339 		}
       
  1340 	else
       
  1341 		p->iCpuAffinity = NSchedulable::PreprocessCpuAffinity(aAffinity);		// set new affinity, might not take effect yet
       
  1342 	if (!p->iReady)
  1300 		goto done;	// thread/group not currently on a ready list so can just change affinity
  1343 		goto done;	// thread/group not currently on a ready list so can just change affinity
  1301 	migrate = !CheckCpuAgainstAffinity(iParent->iReady & EReadyCpuMask, aAffinity);	// TRUE if thread's current CPU is incompatible with the new affinity
  1344 
  1302 	if (!migrate)
  1345 	// Check if the thread needs to migrate or can stay where it is
       
  1346 	if (!p->ShouldMigrate(p->iReady & EReadyCpuMask))
  1303 		goto done;	// don't need to move thread, so just change affinity
  1347 		goto done;	// don't need to move thread, so just change affinity
  1304 	ss = TheSubSchedulers + (iParent->iReady & EReadyCpuMask);
  1348 	ss = TheSubSchedulers + (p->iReady & EReadyCpuMask);
  1305 	ss->iReadyListLock.LockOnly();
  1349 	ss->iReadyListLock.LockOnly();
  1306 	if (iParent->iCurrent)
  1350 	if (p->iCurrent)
  1307 		{
  1351 		{
  1308 		iParent->iCpuChange = TRUE;			// mark CPU change pending
  1352 		p->iCpuChange = TRUE;			// mark CPU change pending
  1309 		if (ss == ss0)
  1353 		if (ss == ss0)
  1310 			RescheduleNeeded();
  1354 			RescheduleNeeded();
  1311 		else
  1355 		else
  1312 			// kick other CPU now so migration happens before acquisition of fast mutex
  1356 			// kick other CPU now so migration happens before acquisition of fast mutex
  1313 			send_resched_ipi_and_wait(iParent->iReady & EReadyCpuMask);
  1357 			send_resched_ipi_and_wait(p->iReady & EReadyCpuMask);
  1314 		}
  1358 		}
  1315 	else
  1359 	else
  1316 		{
  1360 		{
  1317 		// Note: Need to know here if any thread in group would return TRUE from CheckFastMutexDefer()
  1361 		// Note: Need to know here if any thread in group would return TRUE from CheckFastMutexDefer()
  1318 		// This is handled by the scheduler - when a thread belonging to a group is context switched
  1362 		// This is handled by the scheduler - when a thread belonging to a group is context switched
  1319 		// out while holding a fast mutex its iFastMutexDefer is set to 1 and the group's iFreezeCpu
  1363 		// out while holding a fast mutex its iFastMutexDefer is set to 1 and the group's iFreezeCpu
  1320 		// is incremented.
  1364 		// is incremented.
  1321 		if (iParent->iFreezeCpu || (iParent==this && CheckFastMutexDefer()))
  1365 		if (p->iFreezeCpu || (iParent==this && t->CheckFastMutexDefer()))
  1322 			iParent->iCpuChange = TRUE;	// CPU frozen or fast mutex held so just mark deferred CPU migration
  1366 			p->iCpuChange = TRUE;	// CPU frozen or fast mutex held so just mark deferred CPU migration
  1323 		else
  1367 		else
  1324 			{
  1368 			{
  1325 			ss->Remove(iParent);
  1369 			ss->SSRemoveEntry(p);
  1326 			iParent->iReady = 0;
  1370 			p->iReady = 0;
  1327 			make_ready = TRUE;
  1371 			make_ready = TRUE;
  1328 			}
  1372 			}
  1329 		}
  1373 		}
  1330 	ss->iReadyListLock.UnlockOnly();
  1374 	ss->iReadyListLock.UnlockOnly();
  1331 	if (make_ready)
  1375 	if (make_ready)
  1332 		iParent->ReadyT(0);
  1376 		p->ReadyT(0);
  1333 done:
  1377 done:
  1334 	RelSLock();
       
  1335 	return old_aff;
  1378 	return old_aff;
       
  1379 	}
       
  1380 
       
  1381 /** Force the current thread onto a particular CPU
       
  1382 
       
  1383 	@pre	Kernel must not be locked.
       
  1384 	@pre	Call in a thread context.
       
  1385 	@pre	Current thread must not be in a group
       
  1386 	@pre	Current thread must not hold a fast mutex
       
  1387 	@pre	Current thread must have an active CPU freeze
       
  1388 	@pre	Current thread must not be an initial thread
       
  1389 
       
  1390 	@param	The number of the CPU to which this thread should be moved
       
  1391 */
       
  1392 void NKern::JumpTo(TInt aCpu)
       
  1393 	{
       
  1394 	// check aAffinity is valid
       
  1395 	NThreadBase* t = NKern::CurrentThread();
       
  1396 	__KTRACE_OPT(KNKERN,DEBUGPRINT("%T NJumpTo %d", t, aCpu));
       
  1397 	if (NKern::HeldFastMutex())
       
  1398 		__crash();
       
  1399 	t->LAcqSLock();
       
  1400 	if (t->iParent!=t)
       
  1401 		__crash();
       
  1402 	if (!t->iFreezeCpu)
       
  1403 		__crash();
       
  1404 	if (t->i_NThread_Initial)
       
  1405 		__crash();
       
  1406 	if (TUint(aCpu) >= (TUint)NKern::NumberOfCpus())
       
  1407 		__crash();
       
  1408 	TUint8 fc = (TUint8)(aCpu | NSchedulable::EReadyOffset);
       
  1409 	if (t->iCurrent != fc)
       
  1410 		{
       
  1411 		t->iForcedCpu = fc;
       
  1412 		t->iCpuChange = TRUE;
       
  1413 		RescheduleNeeded();
       
  1414 		}
       
  1415 	t->RelSLockU();		// reschedules and jumps to new CPU
       
  1416 	}
       
  1417 
       
  1418 TBool NSchedulable::ShouldMigrate(TInt aCpu)
       
  1419 	{
       
  1420 	// Check if the thread's current CPU is compatible with the new affinity
       
  1421 	TUint32 active = TheScheduler.iThreadAcceptCpus;
       
  1422 
       
  1423 	// If it can't stay where it is, migrate
       
  1424 	if (!CheckCpuAgainstAffinity(aCpu, iCpuAffinity, active))
       
  1425 		return TRUE;
       
  1426 
       
  1427 	TInt cpu = iTransientCpu ? iTransientCpu : iPreferredCpu;
       
  1428 
       
  1429 	// No preferred or transient CPU, so can stay where it is
       
  1430 	if (!cpu)
       
  1431 		return FALSE;
       
  1432 
       
  1433 	// If thread isn't on preferred CPU but could be, migrate
       
  1434 	cpu &= EReadyCpuMask;
       
  1435 	if (cpu!=aCpu && CheckCpuAgainstAffinity(cpu, iCpuAffinity, active))
       
  1436 		return TRUE;
       
  1437 	return FALSE;
  1336 	}
  1438 	}
  1337 
  1439 
  1338 
  1440 
  1339 /******************************************************************************
  1441 /******************************************************************************
  1340  * Thread wait state
  1442  * Thread wait state
  1410 		CancelTimerT();
  1512 		CancelTimerT();
  1411 	if (oldws64 & EWtStWaitActive)
  1513 	if (oldws64 & EWtStWaitActive)
  1412 		{
  1514 		{
  1413 		NThreadBase* t = Thread();
  1515 		NThreadBase* t = Thread();
  1414 		if (!t->iPauseCount && !t->iSuspended)
  1516 		if (!t->iPauseCount && !t->iSuspended)
  1415 			t->ReadyT(0);
  1517 			t->ReadyT(oldws64 & EWtStObstructed);
  1416 		}
  1518 		}
  1417 	return KErrNone;
  1519 	return KErrNone;
  1418 	}
  1520 	}
  1419 
  1521 
  1420 TUint32 NThreadWaitState::ReleaseT(TAny*& aWaitObj, TInt aReturnValue)
  1522 TUint32 NThreadWaitState::ReleaseT(TAny*& aWaitObj, TInt aReturnValue)
  1754 	aMutex->Signal();
  1856 	aMutex->Signal();
  1755 	NKern::Unlock();
  1857 	NKern::Unlock();
  1756 	}
  1858 	}
  1757 
  1859 
  1758 
  1860 
       
  1861 /** Changes the nominal priority of a thread.
       
  1862 
       
  1863 	This function is intended to be used by the EPOC layer and personality layers.
       
  1864 	Do not use this function directly on a Symbian OS thread - use Kern::ThreadSetPriority().
       
  1865 
       
  1866     @param aThread Thread to receive the new priority.
       
  1867     @param aPriority New inherited priority for aThread.
       
  1868     
       
  1869 	@see Kern::SetThreadPriority()
       
  1870 */
       
  1871 void NKern::ThreadSetNominalPriority(NThread* aThread, TInt aPriority)
       
  1872 	{
       
  1873 	NKern::Lock();
       
  1874 	aThread->SetNominalPriority(aPriority);
       
  1875 	NKern::Unlock();
       
  1876 	}
       
  1877 
       
  1878 
  1759 /** Atomically signals the request semaphore of a nanothread and a fast mutex.
  1879 /** Atomically signals the request semaphore of a nanothread and a fast mutex.
  1760 
  1880 
  1761 	This function is intended to be used by the EPOC layer and personality
  1881 	This function is intended to be used by the EPOC layer and personality
  1762 	layers.  Device drivers should use Kern::RequestComplete instead.
  1882 	layers.  Device drivers should use Kern::RequestComplete instead.
  1763 
  1883 
  1945 		{
  2065 		{
  1946 		NKern::Unlock();
  2066 		NKern::Unlock();
  1947 		return 1;
  2067 		return 1;
  1948 		}
  2068 		}
  1949 	pC->iFreezeCpu = 1;
  2069 	pC->iFreezeCpu = 1;
       
  2070 	__e32_atomic_add_rlx32(&ss.iDeferShutdown, 1);
  1950 	if (pC->iParent != pC)
  2071 	if (pC->iParent != pC)
  1951 		{
  2072 		{
  1952 		pC->AcqSLock();
  2073 		pC->AcqSLock();
  1953 		++pC->iParent->iFreezeCpu;
  2074 		++pC->iParent->iFreezeCpu;
  1954 		pC->RelSLock();
  2075 		pC->RelSLock();
  1984 				RescheduleNeeded();
  2105 				RescheduleNeeded();
  1985 			pC->RelSLock();
  2106 			pC->RelSLock();
  1986 			}
  2107 			}
  1987 		else if (pC->iCpuChange)		// deferred CPU change?
  2108 		else if (pC->iCpuChange)		// deferred CPU change?
  1988 			RescheduleNeeded();
  2109 			RescheduleNeeded();
       
  2110 		__e32_atomic_add_rlx32(&ss.iDeferShutdown, TUint32(-1));
  1989 		}
  2111 		}
  1990 	NKern::Unlock();
  2112 	NKern::Unlock();
  1991 	}
  2113 	}
  1992 
  2114 
  1993 
  2115 
  1998 	@param	The new CPU affinity mask
  2120 	@param	The new CPU affinity mask
  1999 	@return The old affinity mask
  2121 	@return The old affinity mask
  2000  */
  2122  */
  2001 EXPORT_C TUint32 NKern::ThreadSetCpuAffinity(NThread* aThread, TUint32 aAffinity)
  2123 EXPORT_C TUint32 NKern::ThreadSetCpuAffinity(NThread* aThread, TUint32 aAffinity)
  2002 	{
  2124 	{
  2003 	NKern::Lock();
  2125 	aThread->LAcqSLock();
  2004 	TUint32 r = aThread->SetCpuAffinity(aAffinity);
  2126 	TUint32 r = aThread->SetCpuAffinityT(aAffinity);
  2005 	NKern::Unlock();
  2127 	aThread->RelSLockU();
  2006 	return r;
  2128 	return r;
  2007 	}
  2129 	}
  2008 
  2130 
  2009 
  2131 
  2010 /** Modify a thread's timeslice
  2132 /** Modify a thread's timeslice
  2325 		pC->AcqSLock();
  2447 		pC->AcqSLock();
  2326 		ss.iReadyListLock.LockOnly();
  2448 		ss.iReadyListLock.LockOnly();
  2327 		pC->UnReadyT();
  2449 		pC->UnReadyT();
  2328 		pC->iParent = pC;
  2450 		pC->iParent = pC;
  2329 		g->iCurrent = 0;	// since current thread is no longer in g
  2451 		g->iCurrent = 0;	// since current thread is no longer in g
  2330 		ss.AddHead(pC);
  2452 		TUint64 now = NKern::Timestamp();
       
  2453 		g->iLastRunTime.i64 = now;
       
  2454 		g->iTotalCpuTime.i64 += (now - g->iLastStartTime.i64);
       
  2455 		if (--g->iActiveState == 0)
       
  2456 			{
       
  2457 			// group no longer active
       
  2458 			g->iTotalActiveTime.i64 += (now - g->iLastActivationTime.i64);
       
  2459 			}
       
  2460 		ss.SSAddEntryHead(pC);
  2331 		pC->iReady = TUint8(ss.iCpuNum | NSchedulable::EReadyOffset);
  2461 		pC->iReady = TUint8(ss.iCpuNum | NSchedulable::EReadyOffset);
  2332 		pC->iCpuAffinity = g->iCpuAffinity;	// keep same CPU affinity
  2462 		pC->iCpuAffinity = g->iCpuAffinity;	// keep same CPU affinity
  2333 		// if we're frozen, the group's freeze count was incremented
  2463 		// if we're frozen, the group's freeze count was incremented
  2334 		if (pC->iFreezeCpu)
  2464 		if (pC->iFreezeCpu)
  2335 			--g->iFreezeCpu;
  2465 			--g->iFreezeCpu;
  2349 				// we were the last thread in the group stopping it from moving
  2479 				// we were the last thread in the group stopping it from moving
  2350 				// but there may be no other threads left after UnReadyT'ing this one
  2480 				// but there may be no other threads left after UnReadyT'ing this one
  2351 				g->iCpuChange = FALSE;
  2481 				g->iCpuChange = FALSE;
  2352 				if (g->iReady)
  2482 				if (g->iReady)
  2353 					{
  2483 					{
  2354 					ss.Remove(g);
  2484 					ss.SSRemoveEntry(g);
  2355 					g->iReady = 0;
  2485 					g->iReady = 0;
  2356 					make_group_ready = TRUE;
  2486 					make_group_ready = TRUE;
  2357 					}
  2487 					}
  2358 				}
  2488 				}
  2359 			}
  2489 			}
  2391 	__ASSERT_WITH_MESSAGE_DEBUG(!pC->i_NThread_Initial, "Not idle thread", "NKern::JoinGroup");
  2521 	__ASSERT_WITH_MESSAGE_DEBUG(!pC->i_NThread_Initial, "Not idle thread", "NKern::JoinGroup");
  2392 	__NK_ASSERT_ALWAYS(pC->iParent==pC && !pC->iFreezeCpu);
  2522 	__NK_ASSERT_ALWAYS(pC->iParent==pC && !pC->iFreezeCpu);
  2393 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NJoinGroup %T->%G",pC,aGroup));
  2523 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NJoinGroup %T->%G",pC,aGroup));
  2394 	pC->AcqSLock();
  2524 	pC->AcqSLock();
  2395 	aGroup->AcqSLock();
  2525 	aGroup->AcqSLock();
  2396 	TBool migrate = !CheckCpuAgainstAffinity(ss.iCpuNum, aGroup->iCpuAffinity);	// TRUE if thread's current CPU is incompatible with the group's affinity
  2526 
       
  2527 	// Check if current CPU is compatible with group's affinity
       
  2528 	TBool migrate = !CheckCpuAgainstAffinity(ss.iCpuNum, aGroup->iCpuAffinity);
  2397 	if (!aGroup->iReady || aGroup->iReady==pC->iReady)
  2529 	if (!aGroup->iReady || aGroup->iReady==pC->iReady)
  2398 		{
  2530 		{
  2399 		// group not ready or ready on this CPU
  2531 		// group not ready or ready on this CPU
  2400 		if (!migrate)
  2532 		if (!migrate)
  2401 			{
  2533 			{
  2404 			pC->iParent = aGroup;
  2536 			pC->iParent = aGroup;
  2405 			aGroup->iNThreadList.AddHead(pC);
  2537 			aGroup->iNThreadList.AddHead(pC);
  2406 			if (!aGroup->iReady)
  2538 			if (!aGroup->iReady)
  2407 				{
  2539 				{
  2408 				aGroup->iPriority = pC->iPriority;
  2540 				aGroup->iPriority = pC->iPriority;
  2409 				ss.AddHead(aGroup);
  2541 				ss.SSAddEntryHead(aGroup);
  2410 				aGroup->iReady = TUint8(ss.iCpuNum | NSchedulable::EReadyOffset);
  2542 				aGroup->iReady = TUint8(ss.iCpuNum | NSchedulable::EReadyOffset);
  2411 				}
  2543 				}
  2412 			else if (pC->iPriority > aGroup->iPriority)
  2544 			else if (pC->iPriority > aGroup->iPriority)
  2413 				{
  2545 				ss.SSChgEntryP(aGroup, pC->iPriority);
  2414 				ss.ChangePriority(aGroup, pC->iPriority);
       
  2415 				}
       
  2416 			pC->iReady = NSchedulable::EReadyGroup;
  2546 			pC->iReady = NSchedulable::EReadyGroup;
  2417 			aGroup->iCurrent = aGroup->iReady;
  2547 			aGroup->iCurrent = aGroup->iReady;
  2418 			ss.iReadyListLock.UnlockOnly();
  2548 			ss.iReadyListLock.UnlockOnly();
  2419 			++aGroup->iThreadCount;
  2549 			++aGroup->iThreadCount;
       
  2550 			TUint64 now = NKern::Timestamp();
       
  2551 			aGroup->iLastStartTime.i64 = now;
       
  2552 			if (++aGroup->iActiveState == 1)
       
  2553 				aGroup->iLastActivationTime.i64 = now;
  2420 			goto done;
  2554 			goto done;
  2421 			}
  2555 			}
  2422 		}
  2556 		}
  2423 	// this thread needs to migrate to another CPU
  2557 	// this thread needs to migrate to another CPU
  2424 	pC->iNewParent = aGroup;
  2558 	pC->iNewParent = aGroup;
  2442 	NKern::Unlock();
  2576 	NKern::Unlock();
  2443 	}
  2577 	}
  2444 
  2578 
  2445 
  2579 
  2446 /******************************************************************************
  2580 /******************************************************************************
       
  2581  * Iterable Doubly Linked List
       
  2582  ******************************************************************************/
       
  2583 TInt SIterDQIterator::Step(SIterDQLink*& aObj, TInt aMaxSteps)
       
  2584 	{
       
  2585 	if (aMaxSteps <= 0)
       
  2586 		aMaxSteps = KMaxCpus + 3;
       
  2587 	SIterDQLink* p = Next();
       
  2588 	SIterDQLink* q = p;
       
  2589 	__NK_ASSERT_DEBUG(p!=0);
       
  2590 	for(; p->IsIterator() && --aMaxSteps>0; p=p->Next())
       
  2591 		{}
       
  2592 	if (p->IsObject())
       
  2593 		{
       
  2594 		// found object
       
  2595 		Deque();
       
  2596 		InsertAfter(p);
       
  2597 		aObj = p;
       
  2598 		return KErrNone;
       
  2599 		}
       
  2600 	if (p->IsAnchor())
       
  2601 		{
       
  2602 		// reached end of list
       
  2603 		if (p != q)
       
  2604 			{
       
  2605 			Deque();
       
  2606 			InsertBefore(p);	// put at the end
       
  2607 			}
       
  2608 		aObj = 0;
       
  2609 		return KErrEof;
       
  2610 		}
       
  2611 	// Maximum allowed number of other iterators skipped
       
  2612 	Deque();
       
  2613 	InsertAfter(p);
       
  2614 	aObj = 0;
       
  2615 	return KErrGeneral;
       
  2616 	}
       
  2617 
       
  2618 
       
  2619 /******************************************************************************
  2447  * Priority Lists
  2620  * Priority Lists
  2448  ******************************************************************************/
  2621  ******************************************************************************/
  2449 
  2622 
  2450 #ifndef __PRI_LIST_MACHINE_CODED__
  2623 #ifndef __PRI_LIST_MACHINE_CODED__
  2451 /** Returns the priority of the highest priority item present on a priority list.
  2624 /** Returns the priority of the highest priority item present on a priority list.
  2452 
  2625 
  2453 	@return	The highest priority present or -1 if the list is empty.
  2626 	@return	The highest priority present or -1 if the list is empty.
  2454  */
  2627  */
  2455 EXPORT_C TInt TPriListBase::HighestPriority()
  2628 EXPORT_C TInt TPriListBase::HighestPriority()
  2456 	{
  2629 	{
  2457 //	TUint64 present = MAKE_TUINT64(iPresent[1], iPresent[0]);
       
  2458 //	return __e32_find_ms1_64(present);
       
  2459 	return __e32_find_ms1_64(iPresent64);
  2630 	return __e32_find_ms1_64(iPresent64);
  2460 	}
  2631 	}
  2461 
  2632 
  2462 
  2633 
  2463 /** Finds the highest priority item present on a priority list.
  2634 /** Finds the highest priority item present on a priority list.
  2562 
  2733 
  2563 /******************************************************************************
  2734 /******************************************************************************
  2564  * Generic IPIs
  2735  * Generic IPIs
  2565  ******************************************************************************/
  2736  ******************************************************************************/
  2566 
  2737 
  2567 TGenIPIList::TGenIPIList()
       
  2568 	:	iGenIPILock(TSpinLock::EOrderGenericIPIList)
       
  2569 	{
       
  2570 	}
       
  2571 
       
  2572 TGenIPIList GenIPIList;
       
  2573 
       
  2574 extern "C" {
  2738 extern "C" {
  2575 extern void send_generic_ipis(TUint32);
  2739 extern void send_generic_ipis(TUint32);
  2576 
  2740 
  2577 void generic_ipi_isr(TSubScheduler* aS)
  2741 void generic_ipi_isr(TSubScheduler* aS)
  2578 	{
  2742 	{
       
  2743 	TScheduler& s = TheScheduler;
  2579 	TGenericIPI* ipi = aS->iNextIPI;
  2744 	TGenericIPI* ipi = aS->iNextIPI;
  2580 	if (!ipi)
  2745 	if (!ipi)
  2581 		return;
  2746 		return;
  2582 	TUint32 m = aS->iCpuMask;
  2747 	TUint32 m = aS->iCpuMask;
  2583 	SDblQueLink* anchor = &GenIPIList.iA;
  2748 	SDblQueLink* anchor = &s.iGenIPIList.iA;
  2584 	while (ipi != anchor)
  2749 	while (ipi != anchor)
  2585 		{
  2750 		{
  2586 		__e32_atomic_and_acq32(&ipi->iCpusIn, ~m);
  2751 		__e32_atomic_and_acq32(&ipi->iCpusIn, ~m);
  2587 		(*ipi->iFunc)(ipi);
  2752 		(*ipi->iFunc)(ipi);
  2588 		TInt irq = GenIPIList.iGenIPILock.LockIrqSave();
  2753 		TInt irq = s.iGenIPILock.LockIrqSave();
  2589 		TGenericIPI* n = (TGenericIPI*)ipi->iNext;
  2754 		TGenericIPI* n = (TGenericIPI*)ipi->iNext;
  2590 		ipi->iCpusOut &= ~m;
  2755 		ipi->iCpusOut &= ~m;
  2591 		if (ipi->iCpusOut == 0)
  2756 		if (ipi->iCpusOut == 0)
  2592 			{
  2757 			{
  2593 			ipi->Deque();
  2758 			ipi->Deque();
  2597 		ipi = n;
  2762 		ipi = n;
  2598 		while (ipi!=anchor && !(ipi->iCpusIn & m))
  2763 		while (ipi!=anchor && !(ipi->iCpusIn & m))
  2599 			ipi = (TGenericIPI*)ipi->iNext;
  2764 			ipi = (TGenericIPI*)ipi->iNext;
  2600 		if (ipi == anchor)
  2765 		if (ipi == anchor)
  2601 			aS->iNextIPI = 0;
  2766 			aS->iNextIPI = 0;
  2602 		GenIPIList.iGenIPILock.UnlockIrqRestore(irq);
  2767 		s.iGenIPILock.UnlockIrqRestore(irq);
  2603 		}
  2768 		}
  2604 	}
  2769 	}
  2605 }
  2770 }
  2606 
  2771 
  2607 void TGenericIPI::Queue(TGenericIPIFn aFunc, TUint32 aCpuMask)
  2772 void TGenericIPI::Queue(TGenericIPIFn aFunc, TUint32 aCpuMask)
  2609 	__KTRACE_OPT(KSCHED2,DEBUGPRINT("GenIPI F=%08x M=%08x", aFunc, aCpuMask));
  2774 	__KTRACE_OPT(KSCHED2,DEBUGPRINT("GenIPI F=%08x M=%08x", aFunc, aCpuMask));
  2610 	iFunc = aFunc;
  2775 	iFunc = aFunc;
  2611 	TScheduler& s = TheScheduler;
  2776 	TScheduler& s = TheScheduler;
  2612 	TInt i;
  2777 	TInt i;
  2613 	TUint32 ipis = 0;
  2778 	TUint32 ipis = 0;
  2614 	TInt irq = GenIPIList.iGenIPILock.LockIrqSave();
  2779 	TInt irq = s.iGenIPILock.LockIrqSave();
  2615 	if (aCpuMask & 0x80000000u)
  2780 	if (aCpuMask & 0x80000000u)
  2616 		{
  2781 		{
  2617 		if (aCpuMask==0xffffffffu)
  2782 		if (aCpuMask==0xffffffffu)
  2618 			aCpuMask = s.iActiveCpus2;
  2783 			aCpuMask = s.iIpiAcceptCpus;
  2619 		else if (aCpuMask==0xfffffffeu)
  2784 		else if (aCpuMask==0xfffffffeu)
  2620 			aCpuMask = s.iActiveCpus2 &~ SubScheduler().iCpuMask;
  2785 			aCpuMask = s.iIpiAcceptCpus &~ SubScheduler().iCpuMask;
  2621 		else
  2786 		else
  2622 			aCpuMask = 0;
  2787 			aCpuMask = 0;
  2623 		}
  2788 		}
  2624 	iCpusIn = aCpuMask;
  2789 	iCpusIn = aCpuMask;
  2625 	iCpusOut = aCpuMask;
  2790 	iCpusOut = aCpuMask;
  2626 	if (!aCpuMask)
  2791 	if (!aCpuMask)
  2627 		{
  2792 		{
  2628 		GenIPIList.iGenIPILock.UnlockIrqRestore(irq);
  2793 		s.iGenIPILock.UnlockIrqRestore(irq);
  2629 		iNext = 0;
  2794 		iNext = 0;
  2630 		return;
  2795 		return;
  2631 		}
  2796 		}
  2632 	GenIPIList.Add(this);
  2797 	s.iGenIPIList.Add(this);
  2633 	for (i=0; i<s.iNumCpus; ++i)
  2798 	for (i=0; i<s.iNumCpus; ++i)
  2634 		{
  2799 		{
  2635 		if (!(aCpuMask & (1<<i)))
  2800 		if (!(aCpuMask & (1<<i)))
  2636 			continue;
  2801 			continue;
  2637 		TSubScheduler& ss = *s.iSub[i];
  2802 		TSubScheduler& ss = *s.iSub[i];
  2640 			ss.iNextIPI = this;
  2805 			ss.iNextIPI = this;
  2641 			ipis |= (1<<i);
  2806 			ipis |= (1<<i);
  2642 			}
  2807 			}
  2643 		}
  2808 		}
  2644 	send_generic_ipis(ipis);
  2809 	send_generic_ipis(ipis);
  2645 	GenIPIList.iGenIPILock.UnlockIrqRestore(irq);
  2810 	s.iGenIPILock.UnlockIrqRestore(irq);
  2646 	__KTRACE_OPT(KSCHED2,DEBUGPRINT("GenIPI ipis=%08x", ipis));
  2811 	__KTRACE_OPT(KSCHED2,DEBUGPRINT("GenIPI ipis=%08x", ipis));
  2647 	}
  2812 	}
  2648 
  2813 
  2649 void TGenericIPI::QueueAll(TGenericIPIFn aFunc)
  2814 void TGenericIPI::QueueAll(TGenericIPIFn aFunc)
  2650 	{
  2815 	{
  2679 	mb();
  2844 	mb();
  2680 	}
  2845 	}
  2681 
  2846 
  2682 /**	Stop all other CPUs
  2847 /**	Stop all other CPUs
  2683 
  2848 
  2684 	Call with kernel locked
  2849 Call with kernel unlocked, returns with kernel locked.
  2685 */
  2850 Returns mask of CPUs halted plus current CPU.
  2686 void TStopIPI::StopCPUs()
  2851 */
  2687 	{
  2852 TUint32 TStopIPI::StopCPUs()
       
  2853 	{
       
  2854 	CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"TStopIPI::StopCPUs()");
       
  2855 	TScheduler& s = TheScheduler;
  2688 	iFlag = 0;
  2856 	iFlag = 0;
       
  2857 	NKern::ThreadEnterCS();
       
  2858 
       
  2859 	// Stop any cores powering up or down for now
       
  2860 	// A core already on the way down will stop just before the transition to SHUTDOWN_FINAL
       
  2861 	// A core already on the way up will carry on powering up
       
  2862 	TInt irq = s.iGenIPILock.LockIrqSave();
       
  2863 	++s.iCCDeferCount;	// stops bits in iIpiAcceptCpus being cleared, but doesn't stop them being set
       
  2864 						// but iIpiAcceptCpus | s.iCpusComingUp is constant
       
  2865 	TUint32 act2 = s.iIpiAcceptCpus;		// CPUs still accepting IPIs
       
  2866 	TUint32 cu = s.iCpusComingUp;			// CPUs powering up
       
  2867 	s.iGenIPILock.UnlockIrqRestore(irq);
       
  2868 	TUint32 cores = act2 | cu;
       
  2869 	if (cu)
       
  2870 		{
       
  2871 		// wait for CPUs coming up to start accepting IPIs
       
  2872 		while (cores & ~s.iIpiAcceptCpus)
       
  2873 			{
       
  2874 			__snooze();	// snooze until cores have come up
       
  2875 			}
       
  2876 		}
       
  2877 	NKern::Lock();
  2689 	QueueAllOther(&Isr);	// send IPIs to all other CPUs
  2878 	QueueAllOther(&Isr);	// send IPIs to all other CPUs
  2690 	WaitEntry();			// wait for other CPUs to reach the ISR
  2879 	WaitEntry();			// wait for other CPUs to reach the ISR
  2691 	}
  2880 	return cores;
  2692 
  2881 	}
       
  2882 
       
  2883 
       
  2884 /**	Release the stopped CPUs
       
  2885 
       
  2886 Call with kernel locked, returns with kernel unlocked.
       
  2887 */
  2693 void TStopIPI::ReleaseCPUs()
  2888 void TStopIPI::ReleaseCPUs()
  2694 	{
  2889 	{
  2695 	iFlag = 1;				// allow other CPUs to proceed
  2890 	__e32_atomic_store_rel32(&iFlag, 1);	// allow other CPUs to proceed
  2696 	WaitCompletion();		// wait for them to finish with this IPI
  2891 	WaitCompletion();		// wait for them to finish with this IPI
       
  2892 	NKern::Unlock();
       
  2893 	TheScheduler.CCUnDefer();
       
  2894 	NKern::ThreadLeaveCS();
  2697 	}
  2895 	}
  2698 
  2896 
  2699 void TStopIPI::Isr(TGenericIPI* a)
  2897 void TStopIPI::Isr(TGenericIPI* a)
  2700 	{
  2898 	{
  2701 	TStopIPI* s = (TStopIPI*)a;
  2899 	TStopIPI* s = (TStopIPI*)a;
  2702 	while (!s->iFlag)
  2900 	while (!__e32_atomic_load_acq32(&s->iFlag))
  2703 		{
  2901 		{
  2704 		__chill();
  2902 		__chill();
  2705 		}
  2903 		}
  2706 	}
  2904 	__e32_io_completion_barrier();
  2707 
  2905 	}
  2708 
  2906 
       
  2907 
       
  2908 /******************************************************************************
       
  2909  * TCoreCycler - general method to execute something on all active cores
       
  2910  ******************************************************************************/
       
  2911 TCoreCycler::TCoreCycler()
       
  2912 	{
       
  2913 	iCores = 0;
       
  2914 	iG = 0;
       
  2915 	}
       
  2916 
       
  2917 void TCoreCycler::Init()
       
  2918 	{
       
  2919 	CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"TCoreCycler::Init()");
       
  2920 	TScheduler& s = TheScheduler;
       
  2921 	NKern::ThreadEnterCS();
       
  2922 	iG = NKern::LeaveGroup();
       
  2923 	NThread* t = NKern::CurrentThread();
       
  2924 	if (t->iCoreCycling)
       
  2925 		{
       
  2926 		__crash();
       
  2927 		}
       
  2928 	t->iCoreCycling = TRUE;
       
  2929 
       
  2930 	// Stop any cores powering up or down for now
       
  2931 	// A core already on the way down will stop just before the transition to SHUTDOWN_FINAL
       
  2932 	// A core already on the way up will carry on powering up
       
  2933 	TInt irq = s.iGenIPILock.LockIrqSave();
       
  2934 	++s.iCCDeferCount;	// stops bits in iIpiAcceptCpus being cleared, but doesn't stop them being set
       
  2935 						// but iIpiAcceptCpus | s.iCpusComingUp is constant
       
  2936 	TUint32 act2 = s.iIpiAcceptCpus;		// CPUs still accepting IPIs
       
  2937 	TUint32 cu = s.iCpusComingUp;			// CPUs powering up
       
  2938 	TUint32 gd = s.iCpusGoingDown;			// CPUs no longer accepting IPIs on the way down
       
  2939 	s.iGenIPILock.UnlockIrqRestore(irq);
       
  2940 	if (gd)
       
  2941 		{
       
  2942 		// wait for CPUs going down to reach INACTIVE state
       
  2943 		TUint32 remain = gd;
       
  2944 		FOREVER
       
  2945 			{
       
  2946 			TInt i;
       
  2947 			for (i=0; i<KMaxCpus; ++i)
       
  2948 				{
       
  2949 				if (remain & (1u<<i))
       
  2950 					{
       
  2951 					// platform specific function returns TRUE when core has detached from SMP cluster
       
  2952 					if (s.iSub[i]->Detached())
       
  2953 						remain &= ~(1u<<i);	// core is now down
       
  2954 					}
       
  2955 				}
       
  2956 			if (!remain)
       
  2957 				break;		// all done
       
  2958 			else
       
  2959 				{
       
  2960 				__snooze();	// snooze until cores have gone down
       
  2961 				}
       
  2962 			}
       
  2963 		}
       
  2964 	iCores = act2 | cu;
       
  2965 	if (cu)
       
  2966 		{
       
  2967 		// wait for CPUs coming up to start accepting IPIs
       
  2968 		while (iCores & ~s.iIpiAcceptCpus)
       
  2969 			{
       
  2970 			__snooze();	// snooze until cores have come up
       
  2971 			}
       
  2972 		}
       
  2973 	iFrz = NKern::FreezeCpu();
       
  2974 	if (iFrz)
       
  2975 		__crash();	// already frozen so won't be able to migrate :-(
       
  2976 	iInitialCpu = NKern::CurrentCpu();
       
  2977 	iCurrentCpu = iInitialCpu;
       
  2978 	iRemain = iCores;
       
  2979 	}
       
  2980 
       
  2981 TInt TCoreCycler::Next()
       
  2982 	{
       
  2983 	NThread* t = NKern::CurrentThread();
       
  2984 	if (iCores == 0)
       
  2985 		{
       
  2986 		Init();
       
  2987 		return KErrNone;
       
  2988 		}
       
  2989 	if (NKern::CurrentCpu() != iCurrentCpu)
       
  2990 		__crash();
       
  2991 	iRemain &= ~(1u<<iCurrentCpu);
       
  2992 	TInt nextCpu = iRemain ? __e32_find_ms1_32(iRemain) : iInitialCpu;
       
  2993 	if (nextCpu != iCurrentCpu)
       
  2994 		{
       
  2995 		NKern::JumpTo(nextCpu);
       
  2996 		iCurrentCpu = nextCpu;
       
  2997 		if (NKern::CurrentCpu() != iCurrentCpu)
       
  2998 			__crash();
       
  2999 		}
       
  3000 	if (iRemain)
       
  3001 		{
       
  3002 		return KErrNone;
       
  3003 		}
       
  3004 	NKern::EndFreezeCpu(iFrz);
       
  3005 	iCores = 0;
       
  3006 	TScheduler& s = TheScheduler;
       
  3007 	s.CCUnDefer();
       
  3008 	t->iCoreCycling = FALSE;
       
  3009 	if (iG)
       
  3010 		NKern::JoinGroup(iG);
       
  3011 	NKern::ThreadLeaveCS();
       
  3012 	return KErrEof;
       
  3013 	}