kernel/eka/nkernsmp/arm/ncthrd.cpp
changeset 90 947f0dc9f7a8
parent 0 a41df078684a
child 177 a232af6b0b1f
equal deleted inserted replaced
52:2d65c2f76d7b 90:947f0dc9f7a8
    34 extern void initialiseState(TInt aCpu, TSubScheduler* aSS);
    34 extern void initialiseState(TInt aCpu, TSubScheduler* aSS);
    35 
    35 
    36 extern "C" void ExcFault(TAny*);
    36 extern "C" void ExcFault(TAny*);
    37 
    37 
    38 extern TUint32 __mpid();
    38 extern TUint32 __mpid();
    39 extern void InitAPTimestamp(SNThreadCreateInfo& aInfo);
    39 extern void InitTimestamp(TSubScheduler* aSS, SNThreadCreateInfo& aInfo);
    40 
    40 
    41 TInt NThread::Create(SNThreadCreateInfo& aInfo, TBool aInitial)
    41 TInt NThread::Create(SNThreadCreateInfo& aInfo, TBool aInitial)
    42 	{
    42 	{
    43 	// Assert ParameterBlockSize is not negative and is a multiple of 8 bytes
    43 	// Assert ParameterBlockSize is not negative and is a multiple of 8 bytes
    44 	__NK_ASSERT_ALWAYS((aInfo.iParameterBlockSize&0x80000007)==0);
    44 	__NK_ASSERT_ALWAYS((aInfo.iParameterBlockSize&0x80000007)==0);
    45 	__NK_ASSERT_ALWAYS(aInfo.iStackBase && aInfo.iStackSize>=aInfo.iParameterBlockSize+KNThreadMinStackSize);
    45 	__NK_ASSERT_ALWAYS(aInfo.iStackBase && aInfo.iStackSize>=aInfo.iParameterBlockSize+KNThreadMinStackSize);
    46 	TInt cpu = -1;
    46 	TInt cpu = -1;
       
    47 	TSubScheduler* ss = 0;
    47 	new (this) NThread;
    48 	new (this) NThread;
    48 	if (aInitial)
    49 	if (aInitial)
    49 		{
    50 		{
    50 		cpu = __e32_atomic_add_ord32(&TheScheduler.iNumCpus, 1);
    51 		cpu = __e32_atomic_add_ord32(&TheScheduler.iNumCpus, 1);
    51 		aInfo.iCpuAffinity = cpu;
    52 		aInfo.iCpuAffinity = cpu;
    52 		// OK since we can't migrate yet
    53 		// OK since we can't migrate yet
    53 		TSubScheduler& ss = TheSubSchedulers[cpu];
    54 		ss = &TheSubSchedulers[cpu];
    54 		ss.iCurrentThread = this;
    55 		ss->iCurrentThread = this;
    55 		iRunCount64 = UI64LIT(1);
    56 		ss->iDeferShutdown = 0;
    56 		__KTRACE_OPT(KBOOT,DEBUGPRINT("Init: cpu=%d ss=%08x", cpu, &ss));
    57 		iRunCount.i64 = UI64LIT(1);
       
    58 		iActiveState = 1;
       
    59 		__KTRACE_OPT(KBOOT,DEBUGPRINT("Init: cpu=%d ss=%08x", cpu, ss));
    57 		if (cpu)
    60 		if (cpu)
    58 			{
    61 			{
    59 			initialiseState(cpu,&ss);
    62 			initialiseState(cpu,ss);
    60 
    63 
    61 			ArmLocalTimer& T = LOCAL_TIMER;
    64 			ArmLocalTimer& T = LOCAL_TIMER;
    62 			T.iWatchdogDisable = E_ArmTmrWDD_1;
    65 			T.iWatchdogDisable = E_ArmTmrWDD_1;
    63 			T.iWatchdogDisable = E_ArmTmrWDD_2;
    66 			T.iWatchdogDisable = E_ArmTmrWDD_2;
    64 			T.iTimerCtrl = 0;
    67 			T.iTimerCtrl = 0;
    67 			T.iWatchdogIntStatus = E_ArmTmrIntStatus_Event;
    70 			T.iWatchdogIntStatus = E_ArmTmrIntStatus_Event;
    68 
    71 
    69 			NIrq::HwInit2AP();
    72 			NIrq::HwInit2AP();
    70 			T.iTimerCtrl = E_ArmTmrCtrl_IntEn | E_ArmTmrCtrl_Reload | E_ArmTmrCtrl_Enable;
    73 			T.iTimerCtrl = E_ArmTmrCtrl_IntEn | E_ArmTmrCtrl_Reload | E_ArmTmrCtrl_Enable;
    71 
    74 
    72 			__e32_atomic_ior_ord32(&TheScheduler.iActiveCpus1, 1<<cpu);
    75 			__e32_atomic_ior_ord32(&TheScheduler.iThreadAcceptCpus, 1<<cpu);
    73 			__e32_atomic_ior_ord32(&TheScheduler.iActiveCpus2, 1<<cpu);
    76 			__e32_atomic_ior_ord32(&TheScheduler.iIpiAcceptCpus, 1<<cpu);
    74 			__e32_atomic_ior_ord32(&TheScheduler.iCpusNotIdle, 1<<cpu);
    77 			__e32_atomic_ior_ord32(&TheScheduler.iCpusNotIdle, 1<<cpu);
       
    78 			__e32_atomic_add_ord32(&TheScheduler.iCCRequestLevel, 1);
    75 			__KTRACE_OPT(KBOOT,DEBUGPRINT("AP MPID=%08x",__mpid()));
    79 			__KTRACE_OPT(KBOOT,DEBUGPRINT("AP MPID=%08x",__mpid()));
    76 			}
    80 			}
    77 		else
    81 		else
    78 			{
    82 			{
    79 			Arm::DefaultDomainAccess = Arm::Dacr();
    83 			Arm::DefaultDomainAccess = Arm::Dacr();
   130 
   134 
   131 		// start local timer
   135 		// start local timer
   132 		ArmLocalTimer& T = LOCAL_TIMER;
   136 		ArmLocalTimer& T = LOCAL_TIMER;
   133 		T.iTimerCtrl = E_ArmTmrCtrl_IntEn | E_ArmTmrCtrl_Reload | E_ArmTmrCtrl_Enable;
   137 		T.iTimerCtrl = E_ArmTmrCtrl_IntEn | E_ArmTmrCtrl_Reload | E_ArmTmrCtrl_Enable;
   134 
   138 
   135 		// synchronize AP's timestamp with BP's
   139 		// Initialise timestamp
   136 		if (cpu>0)
   140 		InitTimestamp(ss, aInfo);
   137 			InitAPTimestamp(aInfo);
   141 		}
   138 		}
   142 	AddToEnumerateList();
       
   143 	InitLbInfo();
   139 #ifdef BTRACE_THREAD_IDENTIFICATION
   144 #ifdef BTRACE_THREAD_IDENTIFICATION
   140 	BTrace4(BTrace::EThreadIdentification,BTrace::ENanoThreadCreate,this);
   145 	BTrace4(BTrace::EThreadIdentification,BTrace::ENanoThreadCreate,this);
   141 #endif
   146 #endif
   142 	return KErrNone;
   147 	return KErrNone;
   143 	}
   148 	}
   182 	DEBUGPRINT("R13Svc=%08x R14Svc=%08x SpsrSvc=%08x",a.iR13Svc,a.iR14Svc,a.iSpsrSvc);
   187 	DEBUGPRINT("R13Svc=%08x R14Svc=%08x SpsrSvc=%08x",a.iR13Svc,a.iR14Svc,a.iSpsrSvc);
   183 
   188 
   184 	TInt irq = NKern::DisableAllInterrupts();
   189 	TInt irq = NKern::DisableAllInterrupts();
   185 	TSubScheduler& ss = SubScheduler();
   190 	TSubScheduler& ss = SubScheduler();
   186 	NThreadBase* ct = ss.iCurrentThread;
   191 	NThreadBase* ct = ss.iCurrentThread;
   187 	TInt inc = TInt(ss.i_IrqNestCount);
   192 	TInt inc = TInt(ss.iSSX.iIrqNestCount);
   188 	TInt cpu = ss.iCpuNum;
   193 	TInt cpu = ss.iCpuNum;
   189 	TInt klc = ss.iKernLockCount;
   194 	TInt klc = ss.iKernLockCount;
   190 	NKern::RestoreInterrupts(irq);
   195 	NKern::RestoreInterrupts(irq);
   191 	DEBUGPRINT("Thread %T, CPU %d, KLCount=%d, IrqNest=%d", ct, cpu, klc, inc);
   196 	DEBUGPRINT("Thread %T, CPU %d, KLCount=%d, IrqNest=%d", ct, cpu, klc, inc);
   192 	}
   197 	}
   650 	NThread* pC = NCurrentThreadL();
   655 	NThread* pC = NCurrentThreadL();
   651 	TSubScheduler* ss = 0;
   656 	TSubScheduler* ss = 0;
   652 	if (pC != this)
   657 	if (pC != this)
   653 		{
   658 		{
   654 		AcqSLock();
   659 		AcqSLock();
   655 		if (iWaitState.ThreadIsDead())
   660 		if (iWaitState.ThreadIsDead() || i_NThread_Initial)
   656 			{
   661 			{
   657 			RelSLock();
   662 			RelSLock();
   658 			aAvailRegistersMask = 0;
   663 			aAvailRegistersMask = 0;
   659 			return;
   664 			return;
   660 			}
   665 			}
   838 	NThread* pC = NCurrentThreadL();
   843 	NThread* pC = NCurrentThreadL();
   839 	TSubScheduler* ss = 0;
   844 	TSubScheduler* ss = 0;
   840 	if (pC != this)
   845 	if (pC != this)
   841 		{
   846 		{
   842 		AcqSLock();
   847 		AcqSLock();
   843 		if (iWaitState.ThreadIsDead())
   848 		if (iWaitState.ThreadIsDead() || i_NThread_Initial)
   844 			{
   849 			{
   845 			RelSLock();
   850 			RelSLock();
   846 			aRegMask = 0;
   851 			aRegMask = 0;
   847 			return;
   852 			return;
   848 			}
   853 			}
  1055 		return 1;
  1060 		return 1;
  1056 		}
  1061 		}
  1057 	return 0;
  1062 	return 0;
  1058 	}
  1063 	}
  1059 
  1064 
  1060 /** Return the total CPU time so far used by the specified thread.
       
  1061 
       
  1062 	@return The total CPU time in units of 1/NKern::CpuTimeMeasFreq().
       
  1063 */
       
  1064 EXPORT_C TUint64 NKern::ThreadCpuTime(NThread* aThread)
       
  1065 	{
       
  1066 	TSubScheduler* ss = 0;
       
  1067 	NKern::Lock();
       
  1068 	aThread->AcqSLock();
       
  1069 	if (aThread->i_NThread_Initial)
       
  1070 		ss = &TheSubSchedulers[aThread->iLastCpu];
       
  1071 	else if (aThread->iReady && aThread->iParent->iReady)
       
  1072 		ss = &TheSubSchedulers[aThread->iParent->iReady & NSchedulable::EReadyCpuMask];
       
  1073 	if (ss)
       
  1074 		ss->iReadyListLock.LockOnly();
       
  1075 	TUint64 t = aThread->iTotalCpuTime64;
       
  1076 	if (aThread->iCurrent || (aThread->i_NThread_Initial && !ss->iCurrentThread))
       
  1077 		t += (NKern::Timestamp() - ss->iLastTimestamp64);
       
  1078 	if (ss)
       
  1079 		ss->iReadyListLock.UnlockOnly();
       
  1080 	aThread->RelSLock();
       
  1081 	NKern::Unlock();
       
  1082 	return t;
       
  1083 	}
       
  1084 
  1065 
  1085 TInt NKern::QueueUserModeCallback(NThreadBase* aThread, TUserModeCallback* aCallback)
  1066 TInt NKern::QueueUserModeCallback(NThreadBase* aThread, TUserModeCallback* aCallback)
  1086 	{
  1067 	{
  1087 	__e32_memory_barrier();
  1068 	__e32_memory_barrier();
  1088 	if (aCallback->iNext != KUserModeCallbackUnqueued)
  1069 	if (aCallback->iNext != KUserModeCallbackUnqueued)
  1089 		return KErrInUse;
  1070 		return KErrInUse;
       
  1071 	if (aThread->i_NThread_Initial)
       
  1072 		return KErrArgument;
  1090 	TInt result = KErrDied;
  1073 	TInt result = KErrDied;
  1091 	NKern::Lock();
  1074 	NKern::Lock();
  1092 	TUserModeCallback* listHead = aThread->iUserModeCallbacks;
  1075 	TUserModeCallback* listHead = aThread->iUserModeCallbacks;
  1093 	do	{
  1076 	do	{
  1094 		if (TLinAddr(listHead) & 3)
  1077 		if (TLinAddr(listHead) & 3)