kernel/eka/include/nkernsmp/nk_priv.h
changeset 0 a41df078684a
child 36 538db54a451d
equal deleted inserted replaced
-1:000000000000 0:a41df078684a
       
     1 // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
       
     2 // All rights reserved.
       
     3 // This component and the accompanying materials are made available
       
     4 // under the terms of the License "Eclipse Public License v1.0"
       
     5 // which accompanies this distribution, and is available
       
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
       
     7 //
       
     8 // Initial Contributors:
       
     9 // Nokia Corporation - initial contribution.
       
    10 //
       
    11 // Contributors:
       
    12 //
       
    13 // Description:
       
    14 // e32\include\nkernsmp\nk_priv.h
       
    15 // 
       
    16 // WARNING: This file contains some APIs which are internal and are subject
       
    17 //          to change without notice. Such APIs should therefore not be used
       
    18 //          outside the Kernel and Hardware Services package.
       
    19 //
       
    20 
       
    21 #ifndef __NK_PRIV_H__
       
    22 #define __NK_PRIV_H__
       
    23 #include <cpudefs.h>
       
    24 #include <nkern.h>
       
    25 
       
    26 #define __USE_BTRACE_LOCK__
       
    27 
       
    28 class Monitor;
       
    29 
       
    30 /********************************************
       
    31  * Schedulable = thread or thread group
       
    32  ********************************************/
       
    33 
       
    34 /**
       
    35 @publishedPartner
       
    36 @prototype
       
    37 
       
    38 Base class for a nanokernel thread or group
       
    39 */
       
    40 class NThreadGroup;
       
    41 class NSchedulable : public TPriListLink
       
    42 	{
       
    43 public:
       
    44 	enum
       
    45 		{
       
    46 		EReadyGroup=1,
       
    47 		EReadyCpuMask=0x7f,
       
    48 		EReadyOffset=0x80,
       
    49 		};
       
    50 
       
    51 	enum NReadyFlags
       
    52 		{
       
    53 		ENewTimeslice=1,
       
    54 		EPreferSameCpu=2,
       
    55 		EUnPause=4,
       
    56 		};
       
    57 
       
    58 	enum NEventState
       
    59 		{
       
    60 		EEventCountShift=16u,
       
    61 		EEventCountMask=0xffff0000u,
       
    62 		EEventCountInc=0x10000u,
       
    63 		EEventCpuShift=0u,
       
    64 		EEventCpuMask=0x1fu,
       
    65 		EThreadCpuShift=8u,
       
    66 		EThreadCpuMask=0x1f00u,
       
    67 		EDeferredReady=0x4000u,
       
    68 		EEventParent=0x8000u,
       
    69 		};
       
    70 public:
       
    71 	NSchedulable();
       
    72 	void AcqSLock();
       
    73 	void RelSLock();
       
    74 	void LAcqSLock();
       
    75 	void RelSLockU();
       
    76 	void ReadyT(TUint aMode);					// make ready, assumes lock held
       
    77 	TInt BeginTiedEvent();
       
    78 	void EndTiedEvent();
       
    79 	TInt AddTiedEvent(NEventHandler* aEvent);
       
    80 	TBool TiedEventReadyInterlock(TInt aCpu);
       
    81 	void UnPauseT();							// decrement pause count and make ready if necessary
       
    82 	static void DeferredReadyIDfcFn(TAny*);
       
    83 	void DetachTiedEvents();
       
    84 public:
       
    85 	inline TBool IsGroup()			{return !iParent;}
       
    86 	inline TBool IsLoneThread()		{return iParent==this;}
       
    87 	inline TBool IsGroupThread()	{return iParent && iParent!=this;}
       
    88 public:
       
    89 //	TUint8				iReady;					/**< @internalComponent */	// flag indicating thread on ready list = cpu number | EReadyOffset
       
    90 //	TUint8				iCurrent;				/**< @internalComponent */	// flag indicating thread is running
       
    91 //	TUint8				iLastCpu;				/**< @internalComponent */	// CPU on which this thread last ran
       
    92 	TUint8				iPauseCount;			/**< @internalComponent */	// count of externally requested pauses extending a voluntary wait
       
    93 	TUint8				iSuspended;				/**< @internalComponent */	// flag indicating active external suspend (Not used for groups)
       
    94 	TUint8				iNSchedulableSpare1;	/**< @internalComponent */
       
    95 	TUint8				iNSchedulableSpare2;	/**< @internalComponent */
       
    96 
       
    97 	TUint8				iCpuChange;				/**< @internalComponent */	// flag showing CPU migration outstanding
       
    98 	TUint8				iStopping;				/**< @internalComponent */	// thread is exiting, thread group is being destroyed
       
    99 	TUint16				iFreezeCpu;				/**< @internalComponent */	// flag set if CPU frozen - count for groups
       
   100 	NSchedulable*		iParent;				/**< @internalComponent */	// Pointer to group containing thread, =this for normal thread, =0 for group
       
   101 
       
   102 	TUint32				iCpuAffinity;			/**< @internalComponent */
       
   103 	volatile TUint32	iEventState;			/**< @internalComponent */	// bits 16-31=count, 0-4=event CPU, 5-9=thread CPU, 10=defer, 11=parent
       
   104 
       
   105 	TSpinLock			iSSpinLock;				/**< @internalComponent */
       
   106 
       
   107 	SDblQue				iEvents;				/**< @internalComponent */	// doubly-linked list of tied events
       
   108 
       
   109 	TUint32				i_IDfcMem[sizeof(TDfc)/sizeof(TUint32)];	/**< @internalComponent */	// IDFC used to make thread ready after last tied event completes
       
   110 //	TDfc				iDeferredReadyIDfc;		/**< @internalComponent */	// IDFC used to make thread ready after last tied event completes
       
   111 
       
   112 	union
       
   113 		{
       
   114 		TUint64			iRunCount64;
       
   115 		TUint32			iRunCount32[2];
       
   116 		};
       
   117 	union
       
   118 		{
       
   119 		TUint64			iTotalCpuTime64;		/**< @internalComponent */	// total time spent running, in hi-res timer ticks
       
   120 		TUint32			iTotalCpuTime32[2];		/**< @internalComponent */	// total time spent running, in hi-res timer ticks
       
   121 		};
       
   122 	};
       
   123 
       
   124 __ASSERT_COMPILE(!(_FOFF(NSchedulable,iSSpinLock)&7));
       
   125 __ASSERT_COMPILE(!(_FOFF(NSchedulable,iRunCount64)&7));
       
   126 __ASSERT_COMPILE(!(_FOFF(NSchedulable,iTotalCpuTime64)&7));
       
   127 __ASSERT_COMPILE(!(sizeof(NSchedulable)&7));
       
   128 
       
   129 
       
   130 /**
       
   131 @internalComponent
       
   132 */
       
   133 inline TBool TDfc::IsValid()
       
   134 	{
       
   135 	if (iHType < KNumDfcPriorities)
       
   136 		return TRUE;
       
   137 	if (iHType != EEventHandlerIDFC)
       
   138 		return FALSE;
       
   139 	return !iTied || !iTied->iStopping;
       
   140 	}
       
   141 
       
   142 /********************************************
       
   143  * Thread
       
   144  ********************************************/
       
   145 
       
   146 /**
       
   147 @internalComponent
       
   148 */
       
   149 class NThreadWaitState
       
   150 	{
       
   151 private:
       
   152 	enum TWtStFlags
       
   153 		{
       
   154 		EWtStWaitPending		=0x01u,		// thread is about to wait
       
   155 		EWtStWaitActive			=0x02u,		// thread is actually blocked
       
   156 		EWtStTimeout			=0x04u,		// timeout is active on this wait
       
   157 		EWtStObstructed			=0x08u,		// wait is due to obstruction (e.g. mutex) rather than lack of work to do
       
   158 		EWtStDead				=0x80u,		// thread is dead
       
   159 		};
       
   160 private:
       
   161 	NThreadWaitState();
       
   162 	void SetUpWait(TUint aType, TUint aFlags, TAny* aWaitObj);
       
   163 	void SetUpWait(TUint aType, TUint aFlags, TAny* aWaitObj, TUint32 aTimeout);
       
   164 	void SetDead(TDfc* aKillDfc);
       
   165 	void CancelWait();
       
   166 	TInt DoWait();
       
   167 	static void TimerExpired(TAny*);
       
   168 	TInt UnBlockT(TUint aType, TAny* aWaitObj, TInt aReturnValue);
       
   169 	TUint32 ReleaseT(TAny*& aWaitObj, TInt aReturnValue);
       
   170 	void CancelTimerT();
       
   171 private:
       
   172 	inline NThreadBase* Thread();
       
   173 	inline TBool WaitPending()
       
   174 		{ return iWtC.iWtStFlags & (EWtStWaitPending|EWtStDead); }
       
   175 	inline TBool ThreadIsBlocked()
       
   176 		{ return iWtC.iWtStFlags & (EWtStWaitActive|EWtStDead); }
       
   177 	inline TBool ThreadIsDead()
       
   178 		{ return iWtC.iWtStFlags & EWtStDead; }
       
   179 private:
       
   180 	struct S
       
   181 		{
       
   182 		volatile TUint8			iWtStFlags;
       
   183 		volatile TUint8			iWtObjType;
       
   184 		volatile TUint8			iWtStSpare1;
       
   185 		volatile TUint8			iWtStSpare2;
       
   186 		union
       
   187 			{
       
   188 			TAny* volatile		iWtObj;
       
   189 			volatile TInt		iRetVal;
       
   190 			TDfc* volatile		iKillDfc;
       
   191 			};
       
   192 		};
       
   193 	union
       
   194 		{
       
   195 		S						iWtC;
       
   196 		volatile TUint32		iWtSt32[2];
       
   197 		volatile TUint64		iWtSt64;
       
   198 		};
       
   199 	NTimer						iTimer;
       
   200 private:
       
   201 	friend class NSchedulable;
       
   202 	friend class NThreadBase;
       
   203 	friend class NThread;
       
   204 	friend class TScheduler;
       
   205 	friend class TSubScheduler;
       
   206 	friend class TDfc;
       
   207 	friend class TDfcQue;
       
   208 	friend class NFastSemaphore;
       
   209 	friend class NFastMutex;
       
   210 	friend class NTimer;
       
   211 	friend class NTimerQ;
       
   212 	friend class NKern;
       
   213 	friend class Monitor;
       
   214 	friend class NKTest;
       
   215 	};
       
   216 
       
   217 /**
       
   218 @publishedPartner
       
   219 @prototype
       
   220 
       
   221 Base class for a nanokernel thread.
       
   222 */
       
   223 class TSubScheduler;
       
   224 class NThreadBase : public NSchedulable
       
   225 	{
       
   226 public:
       
   227     /**
       
   228     Defines the possible types of wait object
       
   229     */
       
   230 	enum NThreadWaitType
       
   231 		{
       
   232 		EWaitNone,
       
   233 		EWaitFastSemaphore,
       
   234 		EWaitFastMutex,
       
   235 		EWaitSleep,
       
   236 		EWaitBlocked,
       
   237 		EWaitDfc,
       
   238 		
       
   239 		ENumWaitTypes
       
   240 		};
       
   241 
       
   242 		
       
   243 	/**
       
   244 	@internalComponent
       
   245 	*/
       
   246 	enum NThreadCSFunction
       
   247 		{
       
   248 		ECSExitPending=-1,
       
   249 		ECSExitInProgress=-2,
       
   250 		ECSDivertPending=-3,
       
   251 		};
       
   252 
       
   253 	/**
       
   254 	@internalComponent
       
   255 	*/
       
   256 	enum NThreadTimeoutOp
       
   257 		{
       
   258 		ETimeoutPreamble=0,
       
   259 		ETimeoutPostamble=1,
       
   260 		ETimeoutSpurious=2,
       
   261 		};
       
   262 public:
       
   263 	NThreadBase();
       
   264 	TInt Create(SNThreadCreateInfo& anInfo,	TBool aInitial);
       
   265 	void UnReadyT();
       
   266 	TBool SuspendOrKill(TInt aCount);
       
   267 	TBool DoSuspendOrKillT(TInt aCount, TSubScheduler* aS);
       
   268 	TBool CancelTimerT();
       
   269 	void DoReleaseT(TInt aReturnCode, TUint aMode);
       
   270 	TBool CheckFastMutexDefer();
       
   271 	void DoCsFunctionT();
       
   272 	TBool Resume(TBool aForce);
       
   273 	IMPORT_C TBool Suspend(TInt aCount);		/**< @internalComponent */
       
   274 	IMPORT_C TBool Resume();					/**< @internalComponent */
       
   275 	IMPORT_C TBool ForceResume();				/**< @internalComponent */
       
   276 	IMPORT_C void Release(TInt aReturnCode, TUint aMode);	/**< @internalComponent */
       
   277 	IMPORT_C void RequestSignal();				/**< @internalComponent */
       
   278 	IMPORT_C void SetPriority(TInt aPriority);	/**< @internalComponent */
       
   279 	void SetMutexPriority(NFastMutex* aMutex);
       
   280 	void LoseInheritedPriorityT();
       
   281 	void ChangeReadyThreadPriority();
       
   282 	TUint32 SetCpuAffinity(TUint32 aAffinity);
       
   283 	TBool TiedEventLeaveInterlock();
       
   284 	TBool TiedEventJoinInterlock();
       
   285 	IMPORT_C void Kill();						/**< @internalComponent */
       
   286 	void Exit();
       
   287 	// hooks for platform-specific code
       
   288 	void OnKill(); 
       
   289 	void OnExit();
       
   290 public:
       
   291 	static void TimerExpired(TAny* aPtr);
       
   292 
       
   293 	/** @internalComponent */
       
   294 	inline void UnknownState(TInt aOp, TInt aParam)
       
   295 		{ (*iHandlers->iStateHandler)((NThread*)this,aOp,aParam); }
       
   296 
       
   297 	/** @internalComponent */
       
   298 	inline TUint8 Attributes()
       
   299 		{ return i_ThrdAttr; }
       
   300 
       
   301 	/** @internalComponent */
       
   302 	inline TUint8 SetAttributes(TUint8 aNewAtt)
       
   303 		{ return __e32_atomic_swp_ord8(&i_ThrdAttr, aNewAtt); }
       
   304 
       
   305 	/** @internalComponent */
       
   306 	inline TUint8 ModifyAttributes(TUint8 aClearMask, TUint8 aSetMask)
       
   307 		{ return __e32_atomic_axo_ord8(&i_ThrdAttr, (TUint8)~(aClearMask|aSetMask), aSetMask); }
       
   308 
       
   309 	/** @internalComponent */
       
   310 	inline void SetAddressSpace(TAny* a)
       
   311 		{ iAddressSpace=a; }
       
   312 
       
   313 	/** @internalComponent */
       
   314 	inline void SetExtraContext(TAny* a, TInt aSize)
       
   315 		{ iExtraContext = a; iExtraContextSize = aSize; }
       
   316 
       
   317 	/** @internalTechnology */
       
   318 	inline TBool IsDead()
       
   319 		{ return iWaitState.ThreadIsDead(); }
       
   320 public:
       
   321 	TPriListLink		iWaitLink;				/**< @internalComponent */	// used to link thread into a wait queue
       
   322 //	TUint8				iBasePri;				/**< @internalComponent */	// priority with no fast mutex held
       
   323 //	TUint8				iMutexPri;				/**< @internalComponent */	// priority from held fast mutex
       
   324 //	TUint8				iInitial;				/**< @internalComponent */	// TRUE if this is an initial thread
       
   325 	TUint8				iLinkedObjType;
       
   326 	TUint8				i_ThrdAttr;				/**< @internalComponent */
       
   327 	TUint8				iNThreadBaseSpare10;
       
   328 	TUint8				iFastMutexDefer;		/**< @internalComponent */
       
   329 
       
   330 	NFastSemaphore		iRequestSemaphore;		/**< @internalComponent */
       
   331 
       
   332 	TInt				iTime;					/**< @internalComponent */	// time remaining, 0 if expired
       
   333 	TInt				iTimeslice;				/**< @internalComponent */	// timeslice for this thread, -ve = no timeslicing
       
   334 
       
   335 	TLinAddr			iSavedSP;				/**< @internalComponent */
       
   336 	TAny*				iAddressSpace;			/**< @internalComponent */
       
   337 
       
   338 	NFastMutex* volatile iHeldFastMutex;		/**< @internalComponent */	// fast mutex held by this thread
       
   339 	TUserModeCallback* volatile iUserModeCallbacks;	/**< @internalComponent */	// Head of singly-linked list of callbacks
       
   340 	TAny* volatile		iLinkedObj;				/**< @internalComponent */	// object to which this thread is linked
       
   341 	NThreadGroup*		iNewParent;				/**< @internalComponent */	// group to join
       
   342 
       
   343 	const SFastExecTable* iFastExecTable;		/**< @internalComponent */
       
   344 	const SSlowExecEntry* iSlowExecTable;		/**< @internalComponent */	// points to first entry iEntries[0]
       
   345 
       
   346 	volatile TInt		iCsCount;				/**< @internalComponent */	// critical section count
       
   347 	volatile TInt		iCsFunction;			/**< @internalComponent */	// what to do on leaving CS: +n=suspend n times, 0=nothing, -1=exit
       
   348 
       
   349 	NThreadWaitState	iWaitState;				/**< @internalComponent */
       
   350 
       
   351 	const SNThreadHandlers* iHandlers;			/**< @internalComponent */	// additional thread event handlers
       
   352 	TInt				iSuspendCount;			/**< @internalComponent */	// -how many times we have been suspended
       
   353 
       
   354 	TLinAddr			iStackBase;				/**< @internalComponent */
       
   355 	TInt				iStackSize;				/**< @internalComponent */
       
   356 
       
   357 	TAny*				iExtraContext;			/**< @internalComponent */	// parent FPSCR value (iExtraContextSize == -1), coprocessor context (iExtraContextSize > 0) or NULL
       
   358 	TInt				iExtraContextSize;		/**< @internalComponent */	// +ve=dynamically allocated, 0=none, -1=iExtraContext stores parent FPSCR value
       
   359 
       
   360 	TUint32				iNThreadBaseSpare6;		/**< @internalComponent */	// spare to allow growth while preserving BC
       
   361 	TUint32				iNThreadBaseSpare7;		/**< @internalComponent */	// spare to allow growth while preserving BC
       
   362 	TUint32				iNThreadBaseSpare8;		/**< @internalComponent */	// spare to allow growth while preserving BC
       
   363 	TUint32				iNThreadBaseSpare9;		/**< @internalComponent */	// spare to allow growth while preserving BC
       
   364 
       
   365 	// For EMI support - HOPEFULLY THIS CAN DIE
       
   366 	TUint32	iTag;							/**< @internalComponent */	// User defined set of bits which is ANDed with a mask when the thread is scheduled, and indicates if a DFC should be scheduled.
       
   367 	TAny* iVemsData;						/**< @internalComponent */	// This pointer can be used by any VEMS to store any data associated with the thread.  This data must be clean up before the Thread Exit Monitor completes.
       
   368 	};
       
   369 
       
   370 __ASSERT_COMPILE(!(_FOFF(NThreadBase,iWaitLink)&7));
       
   371 __ASSERT_COMPILE(!(sizeof(NThreadBase)&7));
       
   372 
       
   373 #ifdef __INCLUDE_NTHREADBASE_DEFINES__
       
   374 #define	iReady				iSpare1				/**< @internalComponent */
       
   375 #define	iCurrent			iSpare2				/**< @internalComponent */
       
   376 #define	iLastCpu			iSpare3				/**< @internalComponent */
       
   377 
       
   378 #define iBasePri			iWaitLink.iSpare1	/**< @internalComponent */
       
   379 #define	iMutexPri			iWaitLink.iSpare2	/**< @internalComponent */
       
   380 #define	i_NThread_Initial	iWaitLink.iSpare3	/**< @internalComponent */
       
   381 
       
   382 #endif
       
   383 
       
   384 /** @internalComponent */
       
   385 #define	i_NThread_BasePri	iWaitLink.iSpare1
       
   386 
       
   387 /** @internalComponent */
       
   388 #define	NTHREADBASE_CPU_AFFINITY_MASK	0x80000000
       
   389 
       
   390 /** @internalComponent */
       
   391 inline NThreadBase* NThreadWaitState::Thread()
       
   392 	{ return _LOFF(this, NThreadBase, iWaitState); }
       
   393 
       
   394 /********************************************
       
   395  * Thread group
       
   396  ********************************************/
       
   397 
       
   398 /**
       
   399 @publishedPartner
       
   400 @prototype
       
   401 
       
   402 Base class for a nanokernel thread or group
       
   403 */
       
   404 class NThreadGroup : public NSchedulable
       
   405 	{
       
   406 public:
       
   407 	NThreadGroup();
       
   408 public:
       
   409 	TInt iThreadCount;										/**< @internalComponent */
       
   410 	TPriList<NThreadBase, KNumPriorities> iNThreadList;		/**< @internalComponent */
       
   411 	};
       
   412 
       
   413 /********************************************
       
   414  * Scheduler
       
   415  ********************************************/
       
   416 
       
   417 /**
       
   418 @internalComponent
       
   419 */
       
   420 class TScheduler;
       
   421 class NThread;
       
   422 class NIrqHandler;
       
   423 class TSubScheduler : public TPriListBase
       
   424 	{
       
   425 public:
       
   426 	TSubScheduler();
       
   427 	void QueueDfcs();
       
   428 	void RotateReadyList(TInt aPriority);
       
   429 	NThread* SelectNextThread();
       
   430 	TBool QueueEvent(NEventHandler* aEvent);
       
   431 	void QueueEventAndKick(NEventHandler* aEvent);
       
   432 	void SaveTimesliceTimer(NThreadBase* aThread);
       
   433 	void UpdateThreadTimes(NThreadBase* aOld, NThreadBase* aNew);
       
   434 private:
       
   435 	SDblQueLink*	iExtraQueues[KNumPriorities-1];
       
   436 public:
       
   437 	TSpinLock		iExIDfcLock;				// lock to protect exogenous IDFC queue
       
   438 
       
   439 	SDblQue			iExIDfcs;					// list of pending exogenous IDFCs (i.e. ones punted over from another CPU)
       
   440 
       
   441 	SDblQue			iDfcs;						// normal IDFC/DFC pending queue (only accessed by this CPU)
       
   442 
       
   443 	TDfc* volatile	iCurrentIDFC;				// pointer to IDFC currently running on this CPU
       
   444 	NThread*		iCurrentThread;				// the thread currently running on this CPU
       
   445 
       
   446 	TUint32			iCpuNum;
       
   447 	TUint32			iCpuMask;
       
   448 
       
   449 	TSpinLock		iReadyListLock;
       
   450 
       
   451 	volatile TUint8	iRescheduleNeededFlag;		// TRUE if a thread reschedule is pending
       
   452 	TUint8			iSubSchedulerSBZ1;			// always zero
       
   453 	volatile TUint8	iDfcPendingFlag;			// TRUE if a normal IDFC is pending
       
   454 	volatile TUint8	iExIDfcPendingFlag;			// TRUE if an exogenous IDFC is pending
       
   455 	TInt			iKernLockCount;				// how many times the current CPU has locked the kernel
       
   456 
       
   457 	TUint8			iInIDFC;					// TRUE if IDFCs are currently being run on this CPU
       
   458 	volatile TUint8	iEventHandlersPending;		// TRUE if an event handler is pending on this CPU
       
   459 	TUint8			iSubSchedulerSpare4;
       
   460 	TUint8			iSubSchedulerSpare5;
       
   461 	TAny*			iAddressSpace;
       
   462 
       
   463 	TUint32			iReschedIPIs;
       
   464 	TScheduler*		iScheduler;
       
   465 
       
   466 	union
       
   467 		{
       
   468 		TUint64		iLastTimestamp64;			// NKern::Timestamp() value at last reschedule or timestamp sync
       
   469 		TUint32		iLastTimestamp32[2];
       
   470 		};
       
   471 	union
       
   472 		{
       
   473 		TUint64		iReschedCount64;
       
   474 		TUint32		iReschedCount32[2];
       
   475 		};
       
   476 
       
   477 	TAny*			iExtras[24];				// Space for platform-specific extras
       
   478 
       
   479 	TGenericIPI*	iNextIPI;					// next generic IPI to run on this CPU
       
   480 	NThread*		iInitialThread;				// Initial (idle) thread on this CPU
       
   481 
       
   482 	TSpinLock		iEventHandlerLock;			// lock to protect event handler queue
       
   483 
       
   484 	SDblQue			iEventHandlers;				// queue of pending event handlers on this CPU
       
   485 
       
   486 	TUint64			iSpinLockOrderCheck;		// bitmask showing which spinlock orders currently held
       
   487 
       
   488 	TUint32			iSubSchedulerPadding[8];
       
   489 	};
       
   490 
       
   491 __ASSERT_COMPILE(!(_FOFF(TSubScheduler,iExIDfcLock)&7));
       
   492 __ASSERT_COMPILE(!(_FOFF(TSubScheduler,iEventHandlerLock)&7));
       
   493 __ASSERT_COMPILE(!(_FOFF(TSubScheduler,iReadyListLock)&7));
       
   494 __ASSERT_COMPILE(!(_FOFF(TSubScheduler,iLastTimestamp64)&7));
       
   495 __ASSERT_COMPILE(!(_FOFF(TSubScheduler,iReschedCount64)&7));
       
   496 __ASSERT_COMPILE(sizeof(TSubScheduler)==512);	// make it a nice power of 2 size for easy indexing
       
   497 
       
   498 /**
       
   499 @internalComponent
       
   500 */
       
   501 class TScheduler
       
   502 	{
       
   503 public:
       
   504 	TScheduler();
       
   505 	static void Reschedule();
       
   506 	IMPORT_C static TScheduler* Ptr();
       
   507 	inline void SetProcessHandler(TLinAddr aHandler) {iProcessHandler=aHandler;}
       
   508 public:
       
   509 	TLinAddr		iMonitorExceptionHandler;
       
   510 	TLinAddr		iProcessHandler;
       
   511 
       
   512 	TLinAddr		iRescheduleHook;
       
   513 	TUint32			iActiveCpus1;				// bit n set if CPU n is accepting unlocked threads
       
   514 
       
   515 	TUint32			iActiveCpus2;				// bit n set if CPU n is accepting generic IPIs
       
   516 	TInt			iNumCpus;					// number of CPUs under the kernel's control
       
   517 
       
   518 	TSubScheduler*	iSub[KMaxCpus];				// one subscheduler per CPU
       
   519 
       
   520 	TAny*			iExtras[24];				// Space for platform-specific extras
       
   521 
       
   522 	NFastMutex		iLock;						// the 'system lock' fast mutex
       
   523 
       
   524 	TSpinLock		iIdleSpinLock;				// lock to protect list of DFCs to be run on idle
       
   525 
       
   526 	SDblQue			iIdleDfcs;					// list of DFCs to run when all CPUs go idle
       
   527 
       
   528 	TUint32			iCpusNotIdle;				// bitmask - Bit n set => CPU n is not idle
       
   529 	TUint8			iIdleGeneration;			// Toggles between 0 and 1 each time iIdleDfcs list is spilled to a CPU IDFC queue
       
   530 	TUint8			iIdleSpillCpu;				// Which CPU last spilled the iIdleDfcs list to its IDFC queue
       
   531 	TUint8			iTSchedulerSpare1;
       
   532 	TUint8			iTSchedulerSpare2;
       
   533 
       
   534 	TUint32			iIdleGenerationCount;		// Incremented each time iIdleDfcs list is spilled to a CPU IDFC queue
       
   535 	TUint32			i_Scheduler_Padding[3];
       
   536 
       
   537 	// For EMI support - HOPEFULLY THIS CAN DIE
       
   538 	NThread* iSigma;	
       
   539 	TDfc* iEmiDfc;
       
   540 	TUint32 iEmiMask;
       
   541 	TUint32 iEmiState;
       
   542 	TUint32 iEmiDfcTrigger;
       
   543 	TBool iLogging;
       
   544 	TAny* iBufferStart;
       
   545 	TAny* iBufferEnd;
       
   546 	TAny* iBufferTail;
       
   547 	TAny* iBufferHead;
       
   548 	};
       
   549 
       
   550 __ASSERT_COMPILE(!(_FOFF(TScheduler,iIdleSpinLock)&7));
       
   551 __ASSERT_COMPILE(sizeof(TScheduler)==512);
       
   552 
       
   553 extern TScheduler TheScheduler;
       
   554 extern TSubScheduler TheSubSchedulers[KMaxCpus];
       
   555 
       
   556 #ifdef __USE_BTRACE_LOCK__
       
   557 extern TSpinLock BTraceLock;
       
   558 
       
   559 #define	__ACQUIRE_BTRACE_LOCK()			TInt _btrace_irq = BTraceLock.LockIrqSave()
       
   560 #define	__RELEASE_BTRACE_LOCK()			BTraceLock.UnlockIrqRestore(_btrace_irq)
       
   561 
       
   562 #else
       
   563 
       
   564 #define	__ACQUIRE_BTRACE_LOCK()
       
   565 #define	__RELEASE_BTRACE_LOCK()
       
   566 
       
   567 #endif
       
   568 
       
   569 /**
       
   570 @internalComponent
       
   571 */
       
   572 extern "C" TSubScheduler& SubScheduler();
       
   573 
       
   574 /**
       
   575 @internalComponent
       
   576 */
       
   577 extern "C" void send_resched_ipis(TUint32 aMask);
       
   578 
       
   579 /**
       
   580 @internalComponent
       
   581 */
       
   582 extern "C" void send_resched_ipi(TInt aCpu);
       
   583 
       
   584 /**
       
   585 @internalComponent
       
   586 */
       
   587 extern "C" void send_resched_ipi_and_wait(TInt aCpu);
       
   588 
       
   589 
       
   590 #include <nk_plat.h>
       
   591 
       
   592 /**
       
   593 Call with kernel locked
       
   594 
       
   595 @internalComponent
       
   596 */
       
   597 inline void RescheduleNeeded()
       
   598 	{ SubScheduler().iRescheduleNeededFlag = 1; }
       
   599 
       
   600 
       
   601 /**
       
   602 @internalComponent
       
   603 */
       
   604 #define	NCurrentThread()	NKern::CurrentThread()
       
   605 
       
   606 /** Optimised current thread function which can only be called from places where
       
   607 	CPU migration is not possible - i.e. with interrupts disabled or preemption
       
   608 	disabled.
       
   609 
       
   610 @internalComponent
       
   611 */
       
   612 extern "C" NThread* NCurrentThreadL();
       
   613 
       
   614 /** @internalComponent */
       
   615 inline TBool CheckCpuAgainstAffinity(TInt aCpu, TUint32 aAffinity)
       
   616 	{
       
   617 	if (aAffinity & NTHREADBASE_CPU_AFFINITY_MASK)
       
   618 		return aAffinity & (1<<aCpu);
       
   619 	return aAffinity==(TUint32)aCpu;
       
   620 	}
       
   621 
       
   622 /**
       
   623 @internalComponent
       
   624 */
       
   625 #define __NK_ASSERT_UNLOCKED	__NK_ASSERT_DEBUG(!NKern::KernelLocked())
       
   626 
       
   627 /**
       
   628 @internalComponent
       
   629 */
       
   630 #define __NK_ASSERT_LOCKED		__NK_ASSERT_DEBUG(NKern::KernelLocked())
       
   631 
       
   632 #ifdef _DEBUG
       
   633 /**
       
   634 @publishedPartner
       
   635 @released
       
   636 */
       
   637 #define __ASSERT_NO_FAST_MUTEX	__NK_ASSERT_DEBUG(!NKern::HeldFastMutex());
       
   638 
       
   639 /**
       
   640 @publishedPartner
       
   641 @released
       
   642 */
       
   643 #define __ASSERT_FAST_MUTEX(m)	__NK_ASSERT_DEBUG((m)->HeldByCurrentThread());
       
   644 
       
   645 /**
       
   646 @publishedPartner
       
   647 @released
       
   648 */
       
   649 #define __ASSERT_SYSTEM_LOCK	__NK_ASSERT_DEBUG(TScheduler::Ptr()->iLock.HeldByCurrentThread());
       
   650 
       
   651 #define __ASSERT_NOT_ISR		__NK_ASSERT_DEBUG(NKern::CurrentContext()!=NKern::EInterrupt)
       
   652 
       
   653 #else
       
   654 #define __ASSERT_NO_FAST_MUTEX
       
   655 #define __ASSERT_FAST_MUTEX(m)
       
   656 #define	__ASSERT_SYSTEM_LOCK
       
   657 #define __ASSERT_NOT_ISR
       
   658 #endif
       
   659 
       
   660 /********************************************
       
   661  * System timer queue
       
   662  ********************************************/
       
   663 
       
   664 /**
       
   665 @publishedPartner
       
   666 @prototype
       
   667 */
       
   668 class NTimerQ
       
   669 	{
       
   670 	friend class NTimer;
       
   671 public:
       
   672 	typedef void (*TDebugFn)(TAny* aPtr, TInt aPos);	/**< @internalComponent */
       
   673 	enum { ETimerQMask=31, ENumTimerQueues=32 };		/**< @internalComponent */	// these are not easily modifiable
       
   674 
       
   675 	/** @internalComponent */
       
   676 	struct STimerQ
       
   677 		{
       
   678 		SDblQue iIntQ;
       
   679 		SDblQue iDfcQ;
       
   680 		};
       
   681 public:
       
   682 	NTimerQ();
       
   683 	static void Init1(TInt aTickPeriod);
       
   684 	static void Init3(TDfcQue* aDfcQ);
       
   685 	IMPORT_C static TAny* TimerAddress();
       
   686 	IMPORT_C void Tick();
       
   687 	IMPORT_C static TInt IdleTime();
       
   688 	IMPORT_C static void Advance(TInt aTicks);
       
   689 private:
       
   690 	static void DfcFn(TAny* aPtr);
       
   691 	void Dfc();
       
   692 	void Add(NTimer* aTimer);
       
   693 	void AddFinal(NTimer* aTimer);
       
   694 public:
       
   695 	STimerQ			iTickQ[ENumTimerQueues];	/**< @internalComponent */	// NOTE: the order of member data is important
       
   696 	TUint32			iPresent;					/**< @internalComponent */	// The assembler code relies on it
       
   697 	TUint32			iMsCount;					/**< @internalComponent */
       
   698 	SDblQue			iHoldingQ;					/**< @internalComponent */
       
   699 	SDblQue			iOrderedQ;					/**< @internalComponent */
       
   700 	SDblQue			iCompletedQ;				/**< @internalComponent */
       
   701 	TDfc			iDfc;						/**< @internalComponent */
       
   702 	TUint8			iTransferringCancelled;		/**< @internalComponent */
       
   703 	TUint8			iCriticalCancelled;			/**< @internalComponent */
       
   704 	TUint8			iPad1;						/**< @internalComponent */
       
   705 	TUint8			iPad2;						/**< @internalComponent */
       
   706 	TDebugFn		iDebugFn;					/**< @internalComponent */
       
   707 	TAny*			iDebugPtr;					/**< @internalComponent */
       
   708 	TInt			iTickPeriod;				/**< @internalComponent */	// in microseconds
       
   709 
       
   710 	/**
       
   711 	This member is intended for use by ASSP/variant interrupt code as a convenient
       
   712 	location to store rounding error information where hardware interrupts are not
       
   713 	exactly one millisecond. The Symbian kernel does not make any use of this member.
       
   714 	@publishedPartner
       
   715 	@prototype
       
   716 	*/
       
   717 	TInt			iRounding;
       
   718 	TInt			iDfcCompleteCount;			/**< @internalComponent */
       
   719 	TSpinLock		iTimerSpinLock;				/**< @internalComponent */
       
   720 	};
       
   721 
       
   722 __ASSERT_COMPILE(!(_FOFF(NTimerQ,iTimerSpinLock)&7));
       
   723 
       
   724 
       
   725 GLREF_D NTimerQ TheTimerQ;
       
   726 
       
   727 /**
       
   728 @internalComponent
       
   729 */
       
   730 inline TUint32 NTickCount()
       
   731 	{return TheTimerQ.iMsCount;}
       
   732 
       
   733 /**
       
   734 @internalComponent
       
   735 */
       
   736 inline TInt NTickPeriod()
       
   737 	{return TheTimerQ.iTickPeriod;}
       
   738 
       
   739 
       
   740 extern "C" {
       
   741 /**
       
   742 @internalComponent
       
   743 */
       
   744 extern void NKCrashHandler(TInt aPhase, const TAny* a0, TInt a1);
       
   745 
       
   746 /**
       
   747 @internalComponent
       
   748 */
       
   749 extern TUint32 CrashState;
       
   750 }
       
   751 
       
   752 
       
   753 /**
       
   754 @internalComponent
       
   755 */
       
   756 class TGenIPIList : public SDblQue
       
   757 	{
       
   758 public:
       
   759 	TGenIPIList();
       
   760 public:
       
   761 	TSpinLock			iGenIPILock;
       
   762 	};
       
   763 
       
   764 /**
       
   765 @internalComponent
       
   766 */
       
   767 class TCancelIPI : public TGenericIPI
       
   768 	{
       
   769 public:
       
   770 	void Send(TDfc* aDfc, TInt aCpu);
       
   771 	static void Isr(TGenericIPI*);
       
   772 public:
       
   773 	TDfc* volatile iDfc;
       
   774 	};
       
   775 
       
   776 
       
   777 /**
       
   778 @internalComponent
       
   779 */
       
   780 TBool InterruptsStatus(TBool aRequest);
       
   781 
       
   782 
       
   783 //declarations for the checking of kernel preconditions
       
   784 #ifdef _DEBUG
       
   785 
       
   786 /**
       
   787 @internalComponent
       
   788 */
       
   789 #define MASK_NO_FAST_MUTEX 0x1
       
   790 #define MASK_CRITICAL 0x2
       
   791 #define MASK_NO_CRITICAL 0x4
       
   792 #define MASK_KERNEL_LOCKED 0x8
       
   793 #define MASK_KERNEL_UNLOCKED 0x10
       
   794 #define MASK_KERNEL_LOCKED_ONCE 0x20
       
   795 #define MASK_INTERRUPTS_ENABLED 0x40
       
   796 #define MASK_INTERRUPTS_DISABLED 0x80
       
   797 #define MASK_SYSTEM_LOCKED 0x100
       
   798 #define MASK_NOT_ISR 0x400
       
   799 #define MASK_NOT_IDFC 0x800 
       
   800 #define MASK_NOT_THREAD 0x1000
       
   801 #define MASK_NO_CRITICAL_IF_USER 0x2000
       
   802 #define MASK_THREAD_STANDARD ( MASK_NO_FAST_MUTEX | MASK_KERNEL_UNLOCKED | MASK_INTERRUPTS_ENABLED | MASK_NOT_ISR | MASK_NOT_IDFC )
       
   803 #define MASK_THREAD_CRITICAL ( MASK_THREAD_STANDARD | MASK_CRITICAL )
       
   804 #define MASK_ALWAYS_FAIL 0x4000
       
   805 #define	MASK_NO_RESCHED 0x8000
       
   806 
       
   807 #if defined(__STANDALONE_NANOKERNEL__) || (!defined (__KERNEL_APIS_CONTEXT_CHECKS_WARNING__)&&!defined (__KERNEL_APIS_CONTEXT_CHECKS_FAULT__))
       
   808 #define CHECK_PRECONDITIONS(mask,function)
       
   809 #define __ASSERT_WITH_MESSAGE_DEBUG(cond,message,function)
       
   810 
       
   811 #else
       
   812 /**
       
   813 @internalComponent
       
   814 */
       
   815 extern "C" TInt CheckPreconditions(TUint32 aConditionMask, const char* aFunction, TLinAddr aAddr);
       
   816 /**
       
   817 @internalComponent
       
   818 */
       
   819 #define CHECK_PRECONDITIONS(mask,function) CheckPreconditions(mask,function,0)
       
   820 
       
   821 #ifdef __KERNEL_APIS_CONTEXT_CHECKS_FAULT__
       
   822 
       
   823 /**
       
   824 @internalComponent
       
   825 */
       
   826 #define __ASSERT_WITH_MESSAGE_DEBUG(cond,message,function) \
       
   827 			__ASSERT_DEBUG( (cond), ( \
       
   828 			DEBUGPRINT("Assertion failed: %s\nFunction: %s\n",message,function),\
       
   829 			NKFault(function, 0)))
       
   830 
       
   831 #else//!__KERNEL_APIS_CONTEXT_CHECKS_FAULT__
       
   832 /**
       
   833 @internalComponent
       
   834 */
       
   835 #define __ASSERT_WITH_MESSAGE_DEBUG(cond,message,function) \
       
   836 			__ASSERT_DEBUG( (cond), \
       
   837 			DEBUGPRINT("Assertion failed: %s\nFunction: %s\n",message,function))
       
   838 
       
   839 
       
   840 #endif//__KERNEL_APIS_CONTEXT_CHECKS_FAULT__
       
   841 #endif//(!defined (__KERNEL_APIS_CONTEXT_CHECKS_WARNING__)&&!defined (__KERNEL_APIS_CONTEXT_CHECKS_FAULT__))
       
   842 
       
   843 #else//if !DEBUG
       
   844 
       
   845 #define CHECK_PRECONDITIONS(mask,function)
       
   846 #define __ASSERT_WITH_MESSAGE_DEBUG(cond,message,function )
       
   847 
       
   848 #endif//_DEBUG
       
   849 
       
   850 #if (!defined (__KERNEL_APIS_CONTEXT_CHECKS_WARNING__)&&!defined (__KERNEL_APIS_CONTEXT_CHECKS_FAULT__))
       
   851 #define __ASSERT_WITH_MESSAGE_ALWAYS(cond,message,function )
       
   852 #else
       
   853 #ifdef __KERNEL_APIS_CONTEXT_CHECKS_FAULT__
       
   854 /**
       
   855 @internalComponent
       
   856 */
       
   857 #define __ASSERT_WITH_MESSAGE_ALWAYS(cond,message,function) \
       
   858 			__ASSERT_ALWAYS( (cond), ( \
       
   859 			DEBUGPRINT("Assertion failed: %s\nFunction: %s\n",message,function),\
       
   860 			NKFault(function, 0)))
       
   861 #else
       
   862 /**
       
   863 @internalComponent
       
   864 */
       
   865 #define __ASSERT_WITH_MESSAGE_ALWAYS(cond,message,function) \
       
   866 			__ASSERT_ALWAYS( (cond), \
       
   867 			DEBUGPRINT("Assertion failed: %s\nFunction: %s\n",message,function))
       
   868 #endif//__KERNEL_APIS_CONTEXT_CHECKS_FAULT__
       
   869 #endif//(!defined (__KERNEL_APIS_CONTEXT_CHECKS_WARNING__)&&!defined (__KERNEL_APIS_CONTEXT_CHECKS_FAULT__))
       
   870 
       
   871 #endif