diff -r c1f20ce4abcf -r 3e88ff8f41d5 kernel/eka/nkernsmp/x86/ncthrd.cpp --- a/kernel/eka/nkernsmp/x86/ncthrd.cpp Tue Aug 31 16:34:26 2010 +0300 +++ b/kernel/eka/nkernsmp/x86/ncthrd.cpp Wed Sep 01 12:34:56 2010 +0100 @@ -36,7 +36,7 @@ extern void __ltr(TInt /*aSelector*/); extern "C" TUint __tr(); -extern void InitTimestamp(TSubScheduler* aSS, SNThreadCreateInfo& aInfo); +extern void InitAPTimestamp(SNThreadCreateInfo& aInfo); TInt NThread::Create(SNThreadCreateInfo& aInfo, TBool aInitial) { @@ -44,7 +44,6 @@ return KErrArgument; new (this) NThread; TInt cpu = -1; - TSubScheduler* ss = 0; if (aInitial) { cpu = __e32_atomic_add_ord32(&TheScheduler.iNumCpus, 1); @@ -53,22 +52,20 @@ aInfo.iCpuAffinity = cpu; // OK since we can't migrate yet TUint32 apicid = *(volatile TUint32*)(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID) >> 24; - ss = &TheSubSchedulers[cpu]; - ss->iSSX.iAPICID = apicid << 24; - ss->iCurrentThread = this; - ss->iDeferShutdown = 0; - SubSchedulerLookupTable[apicid] = ss; - iRunCount.i64 = UI64LIT(1); - iActiveState = 1; - __KTRACE_OPT(KBOOT,DEBUGPRINT("Init: cpu=%d APICID=%08x ss=%08x", cpu, apicid, ss)); + TSubScheduler& ss = TheSubSchedulers[cpu]; + ss.i_APICID = (TAny*)(apicid<<24); + ss.iCurrentThread = this; + SubSchedulerLookupTable[apicid] = &ss; + ss.iLastTimestamp64 = NKern::Timestamp(); + iRunCount64 = UI64LIT(1); + __KTRACE_OPT(KBOOT,DEBUGPRINT("Init: cpu=%d APICID=%08x ss=%08x", cpu, apicid, &ss)); if (cpu) { __ltr(TSS_SELECTOR(cpu)); NIrq::HwInit2AP(); - __e32_atomic_ior_ord32(&TheScheduler.iThreadAcceptCpus, 1<0) + InitAPTimestamp(aInfo); } - AddToEnumerateList(); - InitLbInfo(); #ifdef BTRACE_THREAD_IDENTIFICATION BTrace4(BTrace::EThreadIdentification,BTrace::ENanoThreadCreate,this); #endif @@ -153,7 +149,7 @@ TInt irq = NKern::DisableAllInterrupts(); TSubScheduler& ss = SubScheduler(); NThreadBase* ct = ss.iCurrentThread; - TInt inc = TInt(ss.iSSX.iIrqNestCount); + TInt inc = TInt(ss.i_IrqNestCount); TInt cpu = ss.iCpuNum; NKern::RestoreInterrupts(irq); DEBUGPRINT("Thread %T, CPU %d, KLCount=%08x, IrqNest=%d",ct,cpu,ss.iKernLockCount,inc); @@ -232,7 +228,7 @@ if (pC != this) { AcqSLock(); - if (iWaitState.ThreadIsDead() || i_NThread_Initial) + if (iWaitState.ThreadIsDead()) { RelSLock(); aAvailRegistersMask = 0; @@ -283,7 +279,7 @@ TGetContextIPI& ipi = *(TGetContextIPI*)aPtr; TX86RegSet& a = *ipi.iContext; TSubScheduler& ss = SubScheduler(); - TUint32* irqstack = (TUint32*)ss.iSSX.iIrqStackTop; + TUint32* irqstack = (TUint32*)ss.i_IrqStackTop; SThreadExcStack* txs = (SThreadExcStack*)irqstack[-1]; // first word pushed on IRQ stack points to thread supervisor stack GetContextAfterExc(a, txs, *ipi.iAvailRegsMask, TRUE); } @@ -394,7 +390,7 @@ if (pC != this) { AcqSLock(); - if (iWaitState.ThreadIsDead() || i_NThread_Initial) + if (iWaitState.ThreadIsDead()) { RelSLock(); aRegMask = 0; @@ -571,6 +567,31 @@ } +/** Return the total CPU time so far used by the specified thread. + + @return The total CPU time in units of 1/NKern::CpuTimeMeasFreq(). +*/ +EXPORT_C TUint64 NKern::ThreadCpuTime(NThread* aThread) + { + TSubScheduler* ss = 0; + NKern::Lock(); + aThread->AcqSLock(); + if (aThread->i_NThread_Initial) + ss = &TheSubSchedulers[aThread->iLastCpu]; + else if (aThread->iReady && aThread->iParent->iReady) + ss = &TheSubSchedulers[aThread->iParent->iReady & NSchedulable::EReadyCpuMask]; + if (ss) + ss->iReadyListLock.LockOnly(); + TUint64 t = aThread->iTotalCpuTime64; + if (aThread->iCurrent || (aThread->i_NThread_Initial && !ss->iCurrentThread)) + t += (NKern::Timestamp() - ss->iLastTimestamp64); + if (ss) + ss->iReadyListLock.UnlockOnly(); + aThread->RelSLock(); + NKern::Unlock(); + return t; + } + extern "C" void __fastcall add_dfc(TDfc* aDfc) { aDfc->Add(); @@ -582,8 +603,6 @@ __e32_memory_barrier(); if (aCallback->iNext != KUserModeCallbackUnqueued) return KErrInUse; - if (aThread->i_NThread_Initial) - return KErrArgument; TInt result = KErrDied; NKern::Lock(); TUserModeCallback* listHead = aThread->iUserModeCallbacks;