kernel/eka/kernel/sutils.cpp
changeset 90 947f0dc9f7a8
parent 39 5d2844f35677
child 134 95847726fe57
--- a/kernel/eka/kernel/sutils.cpp	Tue Feb 02 01:24:03 2010 +0200
+++ b/kernel/eka/kernel/sutils.cpp	Fri Apr 16 16:24:37 2010 +0300
@@ -2538,8 +2538,9 @@
 	return KErrNone;
 	}
 
-TInt K::KernelHal(TInt aFunction, TAny* a1, TAny* /*a2*/)
+TInt K::KernelHal(TInt aFunction, TAny* a1, TAny* a2)
 	{
+	(void)a2;
 	TInt r=KErrNone;
 	switch (aFunction)
 		{
@@ -2682,15 +2683,23 @@
 		case EKernelHalLockThreadToCpu:
 			{
 #ifdef __SMP__
+			r = KErrArgument;
 			TUint32 cpuId = (TUint32)a1;
-			if (cpuId < (TUint32)NKern::NumberOfCpus())
+			TUint32 ncpus = NKern::NumberOfCpus();
+			if (cpuId < ncpus)
 				{
 				NKern::ThreadSetCpuAffinity(NKern::CurrentThread(), cpuId);
 				r = KErrNone;
 				}
-			else
+			else if (cpuId & NTHREADBASE_CPU_AFFINITY_MASK)
 				{
-				r = KErrArgument;
+				TUint32 mask = cpuId & ~NTHREADBASE_CPU_AFFINITY_MASK;
+				TUint32 amask = ~((~0u)<<ncpus);
+				if (cpuId==KCpuAffinityAny || ((mask & amask) && (mask &~ amask)==0))
+					{
+					NKern::ThreadSetCpuAffinity(NKern::CurrentThread(), cpuId);
+					r = KErrNone;
+					}
 				}
 #else
 			r = KErrNone;
@@ -2702,7 +2711,57 @@
 			// return bottom 31 bits of config flags so as not to signal an error
 			r=K::KernelConfigFlags() & 0x7fffffff;
 			break;
-	
+
+#ifdef __SMP__
+		case EKernelHalCpuStates:
+			{
+			SCpuStates states;
+			memclr(&states, sizeof(states));
+
+			TScheduler& s = TheScheduler;
+			TInt irq = s.iGenIPILock.LockIrqSave();
+			states.iTA = s.iThreadAcceptCpus;
+			states.iIA = s.iIpiAcceptCpus;
+			states.iCU = s.iCpusComingUp;
+			states.iGD = s.iCpusGoingDown;
+			states.iDC = s.iCCDeferCount;
+			states.iSC = s.iCCSyncCpus;
+			states.iRC = s.iCCReactivateCpus;
+			states.iCCS = s.iCCState;
+			states.iPO = TUint8(s.iPoweringOff ? (s.iPoweringOff->iCpuNum|0x80) : 0);
+			states.iPODC = s.iDetachCount;
+			TInt i;
+			TInt nc = NKern::NumberOfCpus();
+			for (i=0; i<nc; ++i)
+				{
+				TSubScheduler& ss = TheSubSchedulers[i];
+				states.iDS[i] = ss.iDeferShutdown;
+#ifdef __CPU_ARM
+				volatile TUint32* p = (volatile TUint32*)ss.iUncached;
+				states.iUDC[i] = p[0];
+				states.iUAC[i] = p[1];
+#endif
+				}
+			s.iGenIPILock.UnlockIrqRestore(irq);
+
+			kumemput32(a1, &states, sizeof(states));
+			r = KErrNone;
+			break;
+			}
+
+		case EKernelHalSetNumberOfCpus:
+			{
+			TInt n = (TInt)a1;
+			if (n<=0 || n>NKern::NumberOfCpus())
+				r = KErrArgument;
+			else
+				{
+				NKern::SetNumberOfActiveCpus(n);
+				r = KErrNone;
+				}
+			break;
+			}
+#endif
 		default:
 			r=KErrNotSupported;
 			break;
@@ -2967,12 +3026,17 @@
 		else if (!nt || nt->iCsCount==0)
 			m &= ~MASK_NO_CRITICAL;
 		}
-	if (m & MASK_CRITICAL)
+	if (m & (MASK_CRITICAL|MASK_NO_KILL_OR_SUSPEND))
 		{
 		if (t && (t->iThreadType!=EThreadUser || nt->iCsCount>0))
-			m &= ~MASK_CRITICAL;
+			m &= ~(MASK_CRITICAL|MASK_NO_KILL_OR_SUSPEND);
 		else if (!nt || nt->iCsCount>0)
-			m &= ~MASK_CRITICAL;
+			m &= ~(MASK_CRITICAL|MASK_NO_KILL_OR_SUSPEND);
+		}
+	if (m & MASK_NO_KILL_OR_SUSPEND)
+		{
+		if (!nt || NKern::KernelLocked() || NKern::HeldFastMutex())
+			m &= ~MASK_NO_KILL_OR_SUSPEND;
 		}
 	if (m & MASK_KERNEL_LOCKED)
 		{
@@ -3074,6 +3138,8 @@
 		Kern::Printf("Assertion failed");
 	if (m & MASK_NO_RESCHED)
 		Kern::Printf("Assertion failed: Don't call from thread with kernel unlocked");
+	if (m & MASK_NO_KILL_OR_SUSPEND)
+		Kern::Printf("Assertion failed: Must not be suspended or killed here");
 
 #ifdef __KERNEL_APIS_CONTEXT_CHECKS_FAULT__
 	if (aFunction)