--- a/kernel/eka/include/nkernsmp/nk_priv.h Tue Aug 31 16:34:26 2010 +0300
+++ b/kernel/eka/include/nkernsmp/nk_priv.h Wed Sep 01 12:34:56 2010 +0100
@@ -27,19 +27,6 @@
class Monitor;
-const TInt KNumPriClasses = 4;
-extern const TUint8 KClassFromPriority[KNumPriorities];
-
-#ifndef __LOAD_BALANCE_INFO_DEFINED__
-/**
-@internalComponent
-*/
-struct SLbInfo
- {
- TUint64 i__Dummy;
- };
-#endif
-
/********************************************
* Schedulable = thread or thread group
********************************************/
@@ -54,20 +41,13 @@
class NSchedulable : public TPriListLink
{
public:
- /**
- @internalComponent
- */
enum
{
EReadyGroup=1,
- EReadyCpuMask=0x1f,
- EReadyCpuSticky=0x40,
+ EReadyCpuMask=0x7f,
EReadyOffset=0x80,
};
- /**
- @internalComponent
- */
enum NReadyFlags
{
ENewTimeslice=1,
@@ -75,9 +55,6 @@
EUnPause=4,
};
- /**
- @internalComponent
- */
enum NEventState
{
EEventCountShift=16u,
@@ -90,96 +67,32 @@
EDeferredReady=0x4000u,
EEventParent=0x8000u,
};
-
- /**
- @internalComponent
- */
- enum NLbState
- {
- ELbState_Inactive = 0x00u, // not currently involved in load balancing
- ELbState_Global = 0x01u, // flag indicating this is on global load balance list
- ELbState_Temp = 0x02u, // flag indicating this is on a temporary load balance list
- ELbState_CpuMask = 0x1Fu, // mask of bits indicating CPU if on per-CPU list
- ELbState_PerCpu = 0x20u, // flag indicating this is on per-CPU load balance list
- ELbState_ExtraRef = 0x40u, // flag indicating extra reference has been taken after thread/group died
- ELbState_Generation = 0x80u, // 1 bit generation number
- };
-
- /**
- @internalComponent
- */
- enum NCpuStatsSelect
- {
- E_RunTime=0x01u,
- E_RunTimeDelta=0x02u,
- E_ActiveTime=0x04u,
- E_ActiveTimeDelta=0x08u,
- E_LastRunTime=0x10u,
- E_LastActiveTime=0x20u,
-
- E_AllStats = 0x3fu
- };
-
- /**
- @internalComponent
- */
- struct SCpuStats
- {
- TUint64 iRunTime; // total run time
- TUint64 iRunTimeDelta; // run time since we last asked
- TUint64 iActiveTime; // total active time
- TUint64 iActiveTimeDelta; // active time since we last asked
- TUint64 iLastRunTime; // how long ago this last ran
- TUint64 iLastActiveTime; // how long ago this was last active
- };
public:
- NSchedulable(); /**< @internalComponent */
- void AcqSLock(); /**< @internalComponent */
- void RelSLock(); /**< @internalComponent */
- void LAcqSLock(); /**< @internalComponent */
- void RelSLockU(); /**< @internalComponent */
- void ReadyT(TUint aMode); /**< @internalComponent */ // make ready, assumes lock held
- TInt BeginTiedEvent(); /**< @internalComponent */
- void EndTiedEvent(); /**< @internalComponent */
- TInt AddTiedEvent(NEventHandler* aEvent); /**< @internalComponent */
- TBool TiedEventReadyInterlock(TInt aCpu); /**< @internalComponent */
- void UnPauseT(); /**< @internalComponent */ // decrement pause count and make ready if necessary
- static void DeferredReadyIDfcFn(TAny*); /**< @internalComponent */
- void DetachTiedEvents(); /**< @internalComponent */
- TBool TakeRef(); /**< @internalComponent */
- TBool DropRef(); /**< @internalComponent */
- void LbUnlink(); /**< @internalComponent */
- void LbTransfer(SDblQue& aDestQ); /**< @internalComponent */
- void RemoveFromEnumerateList(); /**< @internalComponent */
- void GetCpuStats(TUint aMask, SCpuStats& aOut); /**< @internalComponent */
- void GetCpuStatsT(TUint aMask, SCpuStats& aOut); /**< @internalComponent */
- void GetLbStats(TUint64 aTime); /**< @internalComponent */
- void LbDone(TUint aFlags); /**< @internalComponent */
- TUint32 SetCpuAffinityT(TUint32 aAffinity); /**< @internalComponent */
- TBool ShouldMigrate(TInt aCpu); /**< @internalComponent */
- void InitLbInfo(); /**< @internalComponent */
- void NominalPriorityChanged(); /**< @internalComponent */
- void AddToEnumerateList(); /**< @internalComponent */
- void SetEventCpu(); /**< @internalComponent */
+ NSchedulable();
+ void AcqSLock();
+ void RelSLock();
+ void LAcqSLock();
+ void RelSLockU();
+ void ReadyT(TUint aMode); // make ready, assumes lock held
+ TInt BeginTiedEvent();
+ void EndTiedEvent();
+ TInt AddTiedEvent(NEventHandler* aEvent);
+ TBool TiedEventReadyInterlock(TInt aCpu);
+ void UnPauseT(); // decrement pause count and make ready if necessary
+ static void DeferredReadyIDfcFn(TAny*);
+ void DetachTiedEvents();
public:
- static TUint32 PreprocessCpuAffinity(TUint32 aAffinity); /**< @internalComponent */
- inline TBool IsGroup() {return !iParent;} /**< @internalComponent */
- inline TBool IsLoneThread() {return iParent==this;} /**< @internalComponent */
- inline TBool IsGroupThread() {return iParent && iParent!=this;} /**< @internalComponent */
+ inline TBool IsGroup() {return !iParent;}
+ inline TBool IsLoneThread() {return iParent==this;}
+ inline TBool IsGroupThread() {return iParent && iParent!=this;}
public:
// TUint8 iReady; /**< @internalComponent */ // flag indicating thread on ready list = cpu number | EReadyOffset
// TUint8 iCurrent; /**< @internalComponent */ // flag indicating thread is running
// TUint8 iLastCpu; /**< @internalComponent */ // CPU on which this thread last ran
TUint8 iPauseCount; /**< @internalComponent */ // count of externally requested pauses extending a voluntary wait
TUint8 iSuspended; /**< @internalComponent */ // flag indicating active external suspend (Not used for groups)
- TUint8 iACount; /**< @internalComponent */ // access count
- TUint8 iPreferredCpu; /**< @internalComponent */
-
- TInt iActiveState; /**< @internalComponent */
- TUint8 i_NSchedulable_Spare2; /**< @internalComponent */
- TUint8 iForcedCpu; /**< @internalComponent */
- TUint8 iTransientCpu; /**< @internalComponent */
- TUint8 iLbState; /**< @internalComponent */
+ TUint8 iNSchedulableSpare1; /**< @internalComponent */
+ TUint8 iNSchedulableSpare2; /**< @internalComponent */
TUint8 iCpuChange; /**< @internalComponent */ // flag showing CPU migration outstanding
TUint8 iStopping; /**< @internalComponent */ // thread is exiting, thread group is being destroyed
@@ -196,31 +109,21 @@
TUint32 i_IDfcMem[sizeof(TDfc)/sizeof(TUint32)]; /**< @internalComponent */ // IDFC used to make thread ready after last tied event completes
// TDfc iDeferredReadyIDfc; /**< @internalComponent */ // IDFC used to make thread ready after last tied event completes
- union {
- TUint64HL iRunCount; /**< @internalComponent */ // number of times this thread has run
- TUint64HL iLastStartTime; /**< @internalComponent */ // last start time for groups
+ union
+ {
+ TUint64 iRunCount64;
+ TUint32 iRunCount32[2];
};
- TUint64HL iLastRunTime; /**< @internalComponent */ // time when this thread last ran
- TUint64HL iTotalCpuTime; /**< @internalComponent */ // total CPU time used by this thread
- TUint64HL iLastActivationTime; /**< @internalComponent */ // time when this thread last became active
- TUint64HL iTotalActiveTime; /**< @internalComponent */ // total time this thread has been active
- TUint64HL iSavedCpuTime; /**< @internalComponent */ // Total CPU time used at last check
- TUint64HL iSavedActiveTime; /**< @internalComponent */ // Total active time at last check
- SDblQueLink iLbLink; /**< @internalComponent */ // Link into queue of tasks requiring load balancing
- SIterDQLink iEnumerateLink; /**< @internalComponent */
-
- enum {EMaxLbInfoSize = 48}; /**< @internalComponent */
- union {
- TUint64 i__Dummy[EMaxLbInfoSize/sizeof(TUint64)]; /**< @internalComponent */
- SLbInfo iLbInfo; /**< @internalComponent */
+ union
+ {
+ TUint64 iTotalCpuTime64; /**< @internalComponent */ // total time spent running, in hi-res timer ticks
+ TUint32 iTotalCpuTime32[2]; /**< @internalComponent */ // total time spent running, in hi-res timer ticks
};
};
__ASSERT_COMPILE(!(_FOFF(NSchedulable,iSSpinLock)&7));
-__ASSERT_COMPILE(!(_FOFF(NSchedulable,iRunCount)&7));
-__ASSERT_COMPILE(!(_FOFF(NSchedulable,iTotalCpuTime)&7));
-__ASSERT_COMPILE(!(_FOFF(NSchedulable,iLbInfo)&7));
-__ASSERT_COMPILE(sizeof(SLbInfo) <= NSchedulable::EMaxLbInfoSize);
+__ASSERT_COMPILE(!(_FOFF(NSchedulable,iRunCount64)&7));
+__ASSERT_COMPILE(!(_FOFF(NSchedulable,iTotalCpuTime64)&7));
__ASSERT_COMPILE(!(sizeof(NSchedulable)&7));
@@ -358,34 +261,34 @@
};
public:
NThreadBase();
- TInt Create(SNThreadCreateInfo& anInfo, TBool aInitial); /**< @internalComponent */
- void UnReadyT(); /**< @internalComponent */
- TBool SuspendOrKill(TInt aCount); /**< @internalComponent */
- TBool DoSuspendOrKillT(TInt aCount, TSubScheduler* aS); /**< @internalComponent */
- TBool CancelTimerT(); /**< @internalComponent */
- void DoReleaseT(TInt aReturnCode, TUint aMode); /**< @internalComponent */
- TBool CheckFastMutexDefer(); /**< @internalComponent */
- void DoCsFunctionT(); /**< @internalComponent */
- TBool Resume(TBool aForce); /**< @internalComponent */
- IMPORT_C TBool Suspend(TInt aCount); /**< @internalComponent */
- IMPORT_C TBool Resume(); /**< @internalComponent */
- IMPORT_C TBool ForceResume(); /**< @internalComponent */
+ TInt Create(SNThreadCreateInfo& anInfo, TBool aInitial);
+ void UnReadyT();
+ TBool SuspendOrKill(TInt aCount);
+ TBool DoSuspendOrKillT(TInt aCount, TSubScheduler* aS);
+ TBool CancelTimerT();
+ void DoReleaseT(TInt aReturnCode, TUint aMode);
+ TBool CheckFastMutexDefer();
+ void DoCsFunctionT();
+ TBool Resume(TBool aForce);
+ IMPORT_C TBool Suspend(TInt aCount); /**< @internalComponent */
+ IMPORT_C TBool Resume(); /**< @internalComponent */
+ IMPORT_C TBool ForceResume(); /**< @internalComponent */
IMPORT_C void Release(TInt aReturnCode, TUint aMode); /**< @internalComponent */
- IMPORT_C void RequestSignal(); /**< @internalComponent */
- IMPORT_C void SetPriority(TInt aPriority); /**< @internalComponent */
- void SetNominalPriority(TInt aPriority); /**< @internalComponent */
- void SetMutexPriority(NFastMutex* aMutex); /**< @internalComponent */
- void LoseInheritedPriorityT(); /**< @internalComponent */
- void ChangeReadyThreadPriority(); /**< @internalComponent */
- TBool TiedEventLeaveInterlock(); /**< @internalComponent */
- TBool TiedEventJoinInterlock(); /**< @internalComponent */
- IMPORT_C void Kill(); /**< @internalComponent */
- void Exit(); /**< @internalComponent */
+ IMPORT_C void RequestSignal(); /**< @internalComponent */
+ IMPORT_C void SetPriority(TInt aPriority); /**< @internalComponent */
+ void SetMutexPriority(NFastMutex* aMutex);
+ void LoseInheritedPriorityT();
+ void ChangeReadyThreadPriority();
+ TUint32 SetCpuAffinity(TUint32 aAffinity);
+ TBool TiedEventLeaveInterlock();
+ TBool TiedEventJoinInterlock();
+ IMPORT_C void Kill(); /**< @internalComponent */
+ void Exit();
// hooks for platform-specific code
- void OnKill(); /**< @internalComponent */
- void OnExit(); /**< @internalComponent */
+ void OnKill();
+ void OnExit();
public:
- static void TimerExpired(TAny* aPtr); /**< @internalComponent */
+ static void TimerExpired(TAny* aPtr);
/** @internalComponent */
inline void UnknownState(TInt aOp, TInt aParam)
@@ -418,10 +321,10 @@
TPriListLink iWaitLink; /**< @internalComponent */ // used to link thread into a wait queue
// TUint8 iBasePri; /**< @internalComponent */ // priority with no fast mutex held
// TUint8 iMutexPri; /**< @internalComponent */ // priority from held fast mutex
-// TUint8 iNominalPri; /**< @internalComponent */ // nominal priority of thread (excluding effect of higher level inheritance)
+// TUint8 iInitial; /**< @internalComponent */ // TRUE if this is an initial thread
TUint8 iLinkedObjType;
TUint8 i_ThrdAttr; /**< @internalComponent */
- TUint8 iInitial; /**< @internalComponent */ // TRUE if this is an initial thread
+ TUint8 iNThreadBaseSpare10;
TUint8 iFastMutexDefer; /**< @internalComponent */
NFastSemaphore iRequestSemaphore; /**< @internalComponent */
@@ -454,15 +357,14 @@
TAny* iExtraContext; /**< @internalComponent */ // parent FPSCR value (iExtraContextSize == -1), coprocessor context (iExtraContextSize > 0) or NULL
TInt iExtraContextSize; /**< @internalComponent */ // +ve=dynamically allocated, 0=none, -1=iExtraContext stores parent FPSCR value
- TUint8 iCoreCycling; /**< @internalComponent */ // this thread is currently cycling through all active cores
- TUint8 iRebalanceAttr; /**< @internalComponent */ // behaviour of load balancing wrt this thread
- TUint8 iNThreadBaseSpare4c; /**< @internalComponent */ // spare to allow growth while preserving BC
- TUint8 iNThreadBaseSpare4d; /**< @internalComponent */ // spare to allow growth while preserving BC
- TUint32 iNThreadBaseSpare5; /**< @internalComponent */ // spare to allow growth while preserving BC
TUint32 iNThreadBaseSpare6; /**< @internalComponent */ // spare to allow growth while preserving BC
TUint32 iNThreadBaseSpare7; /**< @internalComponent */ // spare to allow growth while preserving BC
TUint32 iNThreadBaseSpare8; /**< @internalComponent */ // spare to allow growth while preserving BC
TUint32 iNThreadBaseSpare9; /**< @internalComponent */ // spare to allow growth while preserving BC
+
+ // For EMI support - HOPEFULLY THIS CAN DIE
+ TUint32 iTag; /**< @internalComponent */ // User defined set of bits which is ANDed with a mask when the thread is scheduled, and indicates if a DFC should be scheduled.
+ TAny* iVemsData; /**< @internalComponent */ // This pointer can be used by any VEMS to store any data associated with the thread. This data must be clean up before the Thread Exit Monitor completes.
};
__ASSERT_COMPILE(!(_FOFF(NThreadBase,iWaitLink)&7));
@@ -475,14 +377,12 @@
#define iBasePri iWaitLink.iSpare1 /**< @internalComponent */
#define iMutexPri iWaitLink.iSpare2 /**< @internalComponent */
-#define iNominalPri iWaitLink.iSpare3 /**< @internalComponent */
-#define i_NThread_Initial iInitial /**< @internalComponent */
+#define i_NThread_Initial iWaitLink.iSpare3 /**< @internalComponent */
#endif
/** @internalComponent */
-#define i_NThread_BasePri iWaitLink.iSpare1
-#define i_NThread_NominalPri iWaitLink.iSpare3
+#define i_NThread_BasePri iWaitLink.iSpare1
/** @internalComponent */
#define NTHREADBASE_CPU_AFFINITY_MASK 0x80000000
@@ -507,7 +407,6 @@
NThreadGroup();
public:
TInt iThreadCount; /**< @internalComponent */
- TDfc* iDestructionDfc; /**< @internalComponent */
TPriList<NThreadBase, KNumPriorities> iNThreadList; /**< @internalComponent */
};
@@ -515,49 +414,25 @@
* Scheduler
********************************************/
-#include <nk_plat.h>
-
-/**
-@internalComponent
-*/
-enum
- {
- EQueueEvent_Kick=1,
- EQueueEvent_WakeUp=2,
- };
-
/**
@internalComponent
*/
class TScheduler;
class NThread;
class NIrqHandler;
-struct SIdlePullThread;
-class TSubScheduler
+class TSubScheduler : public TPriListBase
{
public:
TSubScheduler();
void QueueDfcs();
void RotateReadyList(TInt aPriority);
NThread* SelectNextThread();
- TInt QueueEvent(NEventHandler* aEvent);
+ TBool QueueEvent(NEventHandler* aEvent);
void QueueEventAndKick(NEventHandler* aEvent);
void SaveTimesliceTimer(NThreadBase* aThread);
void UpdateThreadTimes(NThreadBase* aOld, NThreadBase* aNew);
- void SSAddEntry(NSchedulable* aEntry);
- void SSAddEntryHead(NSchedulable* aEntry);
- void SSRemoveEntry(NSchedulable* aEntry);
- void SSChgEntryP(NSchedulable* aEntry, TInt aNewPriority);
- void IdlePullSearch(SIdlePullThread& a, TSubScheduler* aDest);
- void GetLbThreads(SDblQue& aQ);
- TBool Detached(); // platform specific
-
- inline TInt HighestPriority()
- { return iSSList.HighestPriority(); }
- inline NSchedulable* EntryAtPriority(TInt aPri)
- { return (NSchedulable*)iSSList.iQueue[aPri]; }
private:
- TPriList<NSchedulable, KNumPriorities> iSSList;
+ SDblQueLink* iExtraQueues[KNumPriorities-1];
public:
TSpinLock iExIDfcLock; // lock to protect exogenous IDFC queue
@@ -581,19 +456,25 @@
TUint8 iInIDFC; // TRUE if IDFCs are currently being run on this CPU
volatile TUint8 iEventHandlersPending; // TRUE if an event handler is pending on this CPU
- TUint8 iCCSyncPending;
- TUint8 iLbCounter;
+ TUint8 iSubSchedulerSpare4;
+ TUint8 iSubSchedulerSpare5;
TAny* iAddressSpace;
TUint32 iReschedIPIs;
TScheduler* iScheduler;
- TInt iDeferShutdown; // counts reasons why this CPU can't shut down
- TInt iRdyThreadCount; // number of ready threads excluding idle thread
- TUint16 iPriClassThreadCount[KNumPriClasses];
+ union
+ {
+ TUint64 iLastTimestamp64; // NKern::Timestamp() value at last reschedule or timestamp sync
+ TUint32 iLastTimestamp32[2];
+ };
+ union
+ {
+ TUint64 iReschedCount64;
+ TUint32 iReschedCount32[2];
+ };
- TUint64HL iLastTimestamp; // timestamp at which last reschedule occurred
- TUint64HL iReschedCount;
+ TAny* iExtras[24]; // Space for platform-specific extras
TGenericIPI* iNextIPI; // next generic IPI to run on this CPU
NThread* iInitialThread; // Initial (idle) thread on this CPU
@@ -604,32 +485,15 @@
TUint64 iSpinLockOrderCheck; // bitmask showing which spinlock orders currently held
- TSubSchedulerX iSSX; // platform specific extras
-
- volatile TAny* iUncached; // points to platform specific uncached data structure
- TUint iMadeReadyCounter; // Number of times this core made a thread ready.
-
- TUint iMadeUnReadyCounter; // Number of times this core made a thread unready.
- TUint iTimeSliceExpireCounter; // Number of times this core hass reschedualed due to time slice exireation.
-
- TUint32 iSubSchedulerPadding[70];
- SDblQue iLbQ; // threads to be considered by subsequent periodic load balance
-
- TAny* iSubSchedScratch[16]; // For use by code outside NKern
+ TUint32 iSubSchedulerPadding[8];
};
-const TInt KSubSchedulerShift = 10; // log2(sizeof(TSubScheduler))
-
__ASSERT_COMPILE(!(_FOFF(TSubScheduler,iExIDfcLock)&7));
__ASSERT_COMPILE(!(_FOFF(TSubScheduler,iEventHandlerLock)&7));
__ASSERT_COMPILE(!(_FOFF(TSubScheduler,iReadyListLock)&7));
-__ASSERT_COMPILE(!(_FOFF(TSubScheduler,iLastTimestamp)&7));
-__ASSERT_COMPILE(!(_FOFF(TSubScheduler,iReschedCount)&7));
-__ASSERT_COMPILE(sizeof(TSubSchedulerX)==256);
-__ASSERT_COMPILE(sizeof(TSubScheduler)==(1<<KSubSchedulerShift)); // make it a nice power of 2 size for easy indexing
-
-struct SCoreControlAction;
-struct SVariantInterfaceBlock;
+__ASSERT_COMPILE(!(_FOFF(TSubScheduler,iLastTimestamp64)&7));
+__ASSERT_COMPILE(!(_FOFF(TSubScheduler,iReschedCount64)&7));
+__ASSERT_COMPILE(sizeof(TSubScheduler)==512); // make it a nice power of 2 size for easy indexing
/**
@internalComponent
@@ -641,65 +505,22 @@
static void Reschedule();
IMPORT_C static TScheduler* Ptr();
inline void SetProcessHandler(TLinAddr aHandler) {iProcessHandler=aHandler;}
- void PeriodicBalance();
- TBool ReBalance(SDblQue& aQ, TBool aCC);
- void CCReactivate(TUint32 aMore);
- void CCIpiReactivate();
- void CCRequest();
- void GetLbThreads(SDblQue& aQ);
- void CCUnDefer();
- void ChangeThreadAcceptCpus(TUint32 aNewMask);
- TUint32 ReschedInactiveCpus(TUint32 aMask);
- void InitCCAction(SCoreControlAction& aA);
- TUint32 ModifyCCState(TUint32 aAnd, TUint32 aXor);
- TUint32 CpuShuttingDown(TSubScheduler& aSS);
- void AllCpusIdle();
- void FirstBackFromIdle();
-
- void InitLB();
- void StartRebalanceTimer(TBool aRestart);
- void StopRebalanceTimer(TBool aTemp);
- static void BalanceTimerExpired(TAny*);
- static void StartPeriodicBalancing();
- static void CCSyncDone(TAny*);
- static void CCReactivateDfcFn(TAny*);
- static void CCRequestDfcFn(TAny*);
- static void CCIpiReactivateFn(TAny*);
- static TDfcQue* RebalanceDfcQ();
- static NThread* LBThread();
- static TBool CoreControlSupported();
- static void CCInitiatePowerUp(TUint32 aCores);
- static void CCIndirectPowerDown(TAny*);
- static void DoFrequencyChanged(TAny*);
public:
TLinAddr iMonitorExceptionHandler;
TLinAddr iProcessHandler;
- volatile TUint32 iThreadAcceptCpus; // bit n set if CPU n is accepting unlocked threads
- volatile TUint32 iIpiAcceptCpus; // bit n set if CPU n is accepting generic IPIs
- volatile TUint32 iCpusComingUp; // bit n set if CPU n is in the process of powering up
- volatile TUint32 iCpusGoingDown; // bit n set if CPU n is in the process of powering down and is no longer accepting IPIs
- volatile TInt iCCDeferCount; // >0 means CPUs on the way down will stop just before the 'point of no return'
- volatile TUint32 iCCSyncCpus; // bit n set if CPU n has not yet observed a change to iThreadAcceptCpus
- volatile TUint32 iCCReactivateCpus;
- volatile TUint32 iCCState;
+ TLinAddr iRescheduleHook;
+ TUint32 iActiveCpus1; // bit n set if CPU n is accepting unlocked threads
+ TUint32 iActiveCpus2; // bit n set if CPU n is accepting generic IPIs
TInt iNumCpus; // number of CPUs under the kernel's control
- TLinAddr iRescheduleHook;
-
- SDblQue iGenIPIList; // list of active generic IPIs
- TSpinLock iGenIPILock; // spin lock protects iGenIPIList, also iIpiAcceptCpus, iCpusComingUp, iCpusGoingDown, iCCDeferCount
TSubScheduler* iSub[KMaxCpus]; // one subscheduler per CPU
- TAny* iSchedScratch[16]; // for use by code outside NKern
-
- TSchedulerX iSX; // platform specific extras
+ TAny* iExtras[24]; // Space for platform-specific extras
NFastMutex iLock; // the 'system lock' fast mutex
- TSpinLock iIdleBalanceLock;
-
TSpinLock iIdleSpinLock; // lock to protect list of DFCs to be run on idle
SDblQue iIdleDfcs; // list of DFCs to run when all CPUs go idle
@@ -707,44 +528,27 @@
TUint32 iCpusNotIdle; // bitmask - Bit n set => CPU n is not idle
TUint8 iIdleGeneration; // Toggles between 0 and 1 each time iIdleDfcs list is spilled to a CPU IDFC queue
TUint8 iIdleSpillCpu; // Which CPU last spilled the iIdleDfcs list to its IDFC queue
- TUint8 iLbCounter;
- volatile TUint8 iNeedBal;
+ TUint8 iTSchedulerSpare1;
+ TUint8 iTSchedulerSpare2;
TUint32 iIdleGenerationCount; // Incremented each time iIdleDfcs list is spilled to a CPU IDFC queue
- TDfcQue* iRebalanceDfcQ;
-
- TSpinLock iEnumerateLock; // lock to protect iAllThreads, iAllGroups
- SIterDQ iAllThreads; // list of all nanokernel threads in order of creation
- SIterDQ iAllGroups; // list of all thread groups in order of creation
- TSpinLock iBalanceListLock; // lock to protect iBalanceList
- TUint64 iLastBalanceTime; // time at which last rebalance occurred
- SDblQue iBalanceList; // list of threads/groups for load balancing
- NTimer iBalanceTimer; // triggers periodic rebalancing
- TDfc iCCSyncIDFC; // runs when a change to iThreadAcceptCpus has been observed by all CPUs
- TDfc iCCReactivateDfc; // runs when a reschedule IPI is targeted to an inactive CPU
+ TUint32 i_Scheduler_Padding[3];
- TUint32 iCCRequestLevel; // Number of active cores last requested
- volatile TUint32 iCCIpiReactivate; // Cores to be woken up because of IPIs
-
- TDfc iCCRequestDfc; // runs when a request is made to change the number of active cores
- TDfc iCCPowerDownDfc; // runs when indirect power down of core(s) is required
- TDfc iCCIpiReactIDFC; // runs when an IPI needs to wake up a core
- TDfc iFreqChgDfc; // runs when frequency changes required
-
- TSubScheduler* iPoweringOff; // CPU last to power off
- TUint32 iDetachCount; // detach count before power off
-
- SVariantInterfaceBlock* iVIB;
- TUint32 i_Scheduler_Padding[29];
+ // For EMI support - HOPEFULLY THIS CAN DIE
+ NThread* iSigma;
+ TDfc* iEmiDfc;
+ TUint32 iEmiMask;
+ TUint32 iEmiState;
+ TUint32 iEmiDfcTrigger;
+ TBool iLogging;
+ TAny* iBufferStart;
+ TAny* iBufferEnd;
+ TAny* iBufferTail;
+ TAny* iBufferHead;
};
-__ASSERT_COMPILE(!(_FOFF(TScheduler,iGenIPILock)&7));
__ASSERT_COMPILE(!(_FOFF(TScheduler,iIdleSpinLock)&7));
-__ASSERT_COMPILE(!(_FOFF(TScheduler,iIdleBalanceLock)&7));
-__ASSERT_COMPILE(!(_FOFF(TScheduler,iEnumerateLock)&7));
-__ASSERT_COMPILE(!(_FOFF(TScheduler,iBalanceListLock)&7));
-__ASSERT_COMPILE(sizeof(TSchedulerX)==32*4);
-__ASSERT_COMPILE(sizeof(TScheduler)==1024);
+__ASSERT_COMPILE(sizeof(TScheduler)==512);
extern TScheduler TheScheduler;
extern TSubScheduler TheSubSchedulers[KMaxCpus];
@@ -770,6 +574,11 @@
/**
@internalComponent
*/
+extern "C" void send_resched_ipis(TUint32 aMask);
+
+/**
+@internalComponent
+*/
extern "C" void send_resched_ipi(TInt aCpu);
/**
@@ -777,6 +586,9 @@
*/
extern "C" void send_resched_ipi_and_wait(TInt aCpu);
+
+#include <nk_plat.h>
+
/**
Call with kernel locked
@@ -807,20 +619,6 @@
return aAffinity==(TUint32)aCpu;
}
-/** @internalComponent */
-inline TBool CheckCpuAgainstAffinity(TInt aCpu, TUint32 aAffinity, TUint32 aActive)
- {
- if (aAffinity & NTHREADBASE_CPU_AFFINITY_MASK)
- return aActive & aAffinity & (1<<aCpu);
- return (aAffinity==(TUint32)aCpu) && (aActive & (1<<aCpu));
- }
-
-/** @internalComponent */
-inline TUint32 AffinityToMask(TUint32 aAffinity)
- {
- return (aAffinity & NTHREADBASE_CPU_AFFINITY_MASK) ? (aAffinity & ~NTHREADBASE_CPU_AFFINITY_MASK) : (1u<<aAffinity);
- }
-
/**
@internalComponent
*/
@@ -896,20 +694,7 @@
public:
STimerQ iTickQ[ENumTimerQueues]; /**< @internalComponent */ // NOTE: the order of member data is important
TUint32 iPresent; /**< @internalComponent */ // The assembler code relies on it
-
- /**
- This member is intended for use by ASSP/variant interrupt code as a convenient
- location to store the value of a free running counter at the point where the
- system tick is started.
- @publishedPartner
- @prototype
- */
- TUint32 iFRCOffset;
-
- union {
- TUint32 iMsCount; /**< @internalComponent */
- TUint64 iMsCount64; /**< @internalComponent */
- };
+ TUint32 iMsCount; /**< @internalComponent */
SDblQue iHoldingQ; /**< @internalComponent */
SDblQue iOrderedQ; /**< @internalComponent */
SDblQue iCompletedQ; /**< @internalComponent */
@@ -968,6 +753,17 @@
/**
@internalComponent
*/
+class TGenIPIList : public SDblQue
+ {
+public:
+ TGenIPIList();
+public:
+ TSpinLock iGenIPILock;
+ };
+
+/**
+@internalComponent
+*/
class TCancelIPI : public TGenericIPI
{
public:
@@ -1001,24 +797,25 @@
/**
@internalComponent
*/
-#define MASK_NO_FAST_MUTEX 0x1
-#define MASK_CRITICAL 0x2
-#define MASK_NO_CRITICAL 0x4
-#define MASK_KERNEL_LOCKED 0x8
-#define MASK_KERNEL_UNLOCKED 0x10
-#define MASK_KERNEL_LOCKED_ONCE 0x20
-#define MASK_INTERRUPTS_ENABLED 0x40
-#define MASK_INTERRUPTS_DISABLED 0x80
-#define MASK_SYSTEM_LOCKED 0x100
-#define MASK_NOT_ISR 0x400
-#define MASK_NOT_IDFC 0x800
-#define MASK_NOT_THREAD 0x1000
-#define MASK_NO_CRITICAL_IF_USER 0x2000
-#define MASK_ALWAYS_FAIL 0x4000
-#define MASK_NO_RESCHED 0x8000
-#define MASK_NO_KILL_OR_SUSPEND 0x10000
-#define MASK_THREAD_STANDARD ( MASK_NO_FAST_MUTEX | MASK_KERNEL_UNLOCKED | MASK_INTERRUPTS_ENABLED | MASK_NOT_ISR | MASK_NOT_IDFC )
-#define MASK_THREAD_CRITICAL ( MASK_THREAD_STANDARD | MASK_CRITICAL )
+#define MASK_NO_FAST_MUTEX 0x1
+#define MASK_CRITICAL 0x2
+#define MASK_NO_CRITICAL 0x4
+#define MASK_KERNEL_LOCKED 0x8
+#define MASK_KERNEL_UNLOCKED 0x10
+#define MASK_KERNEL_LOCKED_ONCE 0x20
+#define MASK_INTERRUPTS_ENABLED 0x40
+#define MASK_INTERRUPTS_DISABLED 0x80
+#define MASK_SYSTEM_LOCKED 0x100
+#define MASK_NOT_ISR 0x400
+#define MASK_NOT_IDFC 0x800
+#define MASK_NOT_THREAD 0x1000
+#define MASK_NO_CRITICAL_IF_USER 0x2000
+#define MASK_ALWAYS_FAIL 0x4000
+#define MASK_NO_RESCHED 0x8000
+#define MASK_NO_KILL_OR_SUSPEND 0x10000
+
+#define MASK_THREAD_STANDARD ( MASK_NO_FAST_MUTEX | MASK_KERNEL_UNLOCKED | MASK_INTERRUPTS_ENABLED | MASK_NOT_ISR | MASK_NOT_IDFC )
+#define MASK_THREAD_CRITICAL ( MASK_THREAD_STANDARD | MASK_CRITICAL )
#if defined(__STANDALONE_NANOKERNEL__) || (!defined (__KERNEL_APIS_CONTEXT_CHECKS_WARNING__)&&!defined (__KERNEL_APIS_CONTEXT_CHECKS_FAULT__))
#define CHECK_PRECONDITIONS(mask,function)