diff -r 000000000000 -r 96e5fb8b040d kernel/eka/include/nkernsmp/nkern.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/kernel/eka/include/nkernsmp/nkern.h Thu Dec 17 09:24:54 2009 +0200 @@ -0,0 +1,1236 @@ +// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies). +// All rights reserved. +// This component and the accompanying materials are made available +// under the terms of the License "Eclipse Public License v1.0" +// which accompanies this distribution, and is available +// at the URL "http://www.eclipse.org/legal/epl-v10.html". +// +// Initial Contributors: +// Nokia Corporation - initial contribution. +// +// Contributors: +// +// Description: +// e32\include\nkernsmp\nkern.h +// +// WARNING: This file contains some APIs which are internal and are subject +// to change without notice. Such APIs should therefore not be used +// outside the Kernel and Hardware Services package. +// + +#ifndef __NKERN_H__ +#define __NKERN_H__ + +#ifdef __STANDALONE_NANOKERNEL__ +#undef __IN_KERNEL__ +#define __IN_KERNEL__ +#endif + +#include +#include +#include +#include +#include +#include + +extern "C" { +/** @internalComponent */ +IMPORT_C void NKFault(const char* file, TInt line); +/** @internalComponent */ +void NKIdle(TInt aStage); +} + +/** +@publishedPartner +@released +*/ +#define FAULT() NKFault(__FILE__,__LINE__) + +#ifdef _DEBUG + +/** +@publishedPartner +@released +*/ +#define __NK_ASSERT_DEBUG(c) ((void) ((c)||(FAULT(),0)) ) + +#else + +#define __NK_ASSERT_DEBUG(c) + +#endif + +/** +@publishedPartner +@released +*/ +#define __NK_ASSERT_ALWAYS(c) ((void) ((c)||(FAULT(),0)) ) + +/** + @publishedPartner + @released +*/ +const TInt KNumPriorities=64; + +const TInt KMaxCpus=8; + +class NSchedulable; +class NThread; +class NThreadGroup; + + +/** Spin lock + + Used for protecting a code fragment against both interrupts and concurrent + execution on another processor. + + List of spin locks in the nanokernel, in deadlock-prevention order: + A NEventHandler::TiedLock (preemption) + B NFastMutex spin locks (preemption) + C Thread spin locks (preemption) + D Thread group spin locks (preemption) + E Per-CPU ready list lock (preemption) + + a Idle DFC list lock (interrupts) + b Per-CPU exogenous IDFC queue lock (interrupts) + c NTimerQ spin lock (interrupts) + d Generic IPI list locks (interrupts) + e NIrq spin locks (interrupts) + f Per-CPU event handler list lock (interrupts) + z BTrace lock (interrupts) + + z must be minimum since BTrace can appear anywhere + + interrupt-disabling spinlocks must be lower than preemption-disabling ones + + Nestings which actually occur are: + A > C + B > C > D > E + c > f + Nothing (except possibly z) nested inside a, b, d, f + e is held while calling HW-poking functions (which might use other spinlocks) + +@publishedPartner +@prototype +*/ +class TSpinLock + { +public: + enum TOrder + { + // Bit 7 of order clear for locks used with interrupts disabled + EOrderGenericIrqLow0 =0x00u, // Device driver spin locks, low range + EOrderGenericIrqLow1 =0x01u, // Device driver spin locks, low range + EOrderGenericIrqLow2 =0x02u, // Device driver spin locks, low range + EOrderGenericIrqLow3 =0x03u, // Device driver spin locks, low range + EOrderBTrace =0x04u, // BTrace lock + EOrderEventHandlerList =0x07u, // Per-CPU event handler list lock + EOrderCacheMaintenance =0x08u, // CacheMaintenance (for PL310) + EOrderNIrq =0x0Au, // NIrq lock + EOrderGenericIPIList =0x0Du, // Generic IPI list lock + EOrderNTimerQ =0x10u, // Nanokernel timer queue lock + EOrderExIDfcQ =0x13u, // Per-CPU exogenous IDFC queue list lock + EOrderIdleDFCList =0x16u, // Idle DFC list lock + EOrderGenericIrqHigh0 =0x18u, // Device driver spin locks, high range + EOrderGenericIrqHigh1 =0x19u, // Device driver spin locks, high range + EOrderGenericIrqHigh2 =0x1Au, // Device driver spin locks, high range + EOrderGenericIrqHigh3 =0x1Bu, // Device driver spin locks, high range + + // Bit 7 of order set for locks used with interrupts enabled, preemption disabled + EOrderGenericPreLow0 =0x80u, // Device driver spin locks, low range + EOrderGenericPreLow1 =0x81u, // Device driver spin locks, low range + EOrderReadyList =0x88u, // Per-CPU ready list lock + EOrderThreadGroup =0x90u, // Thread group locks + EOrderThread =0x91u, // Thread locks + EOrderFastMutex =0x98u, // Fast mutex locks + EOrderEventHandlerTied =0x9Cu, // Event handler tied lock + EOrderGenericPreHigh0 =0x9Eu, // Device driver spin locks, high range + EOrderGenericPreHigh1 =0x9Fu, // Device driver spin locks, high range + + EOrderNone =0xFFu // No order check required (e.g. for dynamic ordering) + }; +public: + IMPORT_C TSpinLock(TUint aOrder); + IMPORT_C void LockIrq(); /**< @internalComponent disable interrupts and acquire the lock */ + IMPORT_C void UnlockIrq(); /**< @internalComponent release the lock and enable interrupts */ + IMPORT_C TBool FlashIrq(); /**< @internalComponent if someone else is waiting for the lock, UnlockIrq() then LockIrq() */ + IMPORT_C void LockOnly(); /**< @internalComponent acquire the lock, assuming interrupts/preemption already disabled */ + IMPORT_C void UnlockOnly(); /**< @internalComponent release the lock, don't change interrupt/preemption state */ + IMPORT_C TBool FlashOnly(); /**< @internalComponent if someone else is waiting for the lock, UnlockOnly() then LockOnly() */ + IMPORT_C TInt LockIrqSave(); /**< @internalComponent remember original interrupt state then disable interrupts and acquire the lock */ + IMPORT_C void UnlockIrqRestore(TInt); /**< @internalComponent release the lock then restore original interrupt state */ + IMPORT_C TBool FlashIrqRestore(TInt); /**< @internalComponent if someone else is waiting for the lock, UnlockIrqRestore() then LockIrq() */ + IMPORT_C TBool FlashPreempt(); /**< @internalComponent if someone else is waiting for the lock, UnlockOnly(); NKern::PreemptionPoint(); LockOnly(); */ +private: + volatile TUint64 iLock; + }; + + +/** Macro to disable interrupts and acquire the lock. + +@publishedPartner +@prototype +*/ +#define __SPIN_LOCK_IRQ(lock) ((lock).LockIrq()) + +/** Macro to release the lock and enable interrupts. + +@publishedPartner +@prototype +*/ +#define __SPIN_UNLOCK_IRQ(lock) (lock).UnlockIrq() + +/** Macro to see if someone else is waiting for the lock, enabling IRQs + then disabling IRQs again. + +@publishedPartner +@prototype +*/ +#define __SPIN_FLASH_IRQ(lock) (lock).FlashIrq() + +/** Macro to remember original interrupt state then disable interrupts + and acquire the lock. + +@publishedPartner +@prototype +*/ +#define __SPIN_LOCK_IRQSAVE(lock) ((lock).LockIrqSave()) + +/** Macro to release the lock then restore original interrupt state to that + supplied. + +@publishedPartner +@prototype +*/ +#define __SPIN_UNLOCK_IRQRESTORE(lock,irq) (lock).UnlockIrqRestore(irq) + +/** Macro to see if someone else is waiting for the lock, enabling IRQs to + the original state supplied then disabling IRQs again. + +@publishedPartner +@prototype +*/ +#define __SPIN_FLASH_IRQRESTORE(lock,irq) (lock).FlashIrqRestore(irq) + +/** Macro to acquire the lock. This assumes the caller has already disabled + interrupts/preemption. + + If interrupts/preemption is not disabled a run-time assert will occur + This is to protect against unsafe code that might lead to same core + deadlock. + + In device driver code it is safer to use __SPIN_LOCK_IRQSAVE() instead, + although not as efficient should interrupts aleady be disabled for the + duration the lock is held. + +@publishedPartner +@prototype +*/ +#define __SPIN_LOCK(lock) ((lock).LockOnly()) + +/** Macro to release the lock, don't change interrupt/preemption state. + +@publishedPartner +@prototype +*/ +#define __SPIN_UNLOCK(lock) (lock).UnlockOnly() + +/** +@internalComponent +*/ +#define __SPIN_FLASH(lock) (lock).FlashOnly() + +/** Macro to see if someone else is waiting for the lock, enabling preemption + then disabling it again. + +@publishedPartner +@prototype +*/ +#define __SPIN_FLASH_PREEMPT(lock) (lock).FlashPreempt() + + +/** Read/Write Spin lock + +@publishedPartner +@prototype +*/ +class TRWSpinLock + { +public: + IMPORT_C TRWSpinLock(TUint aOrder); // Uses same order space as TSpinLock + + IMPORT_C void LockIrqR(); /**< @internalComponent disable interrupts and acquire read lock */ + IMPORT_C void UnlockIrqR(); /**< @internalComponent release read lock and enable interrupts */ + IMPORT_C TBool FlashIrqR(); /**< @internalComponent if someone else is waiting for write lock, UnlockIrqR() then LockIrqR() */ + IMPORT_C void LockIrqW(); /**< @internalComponent disable interrupts and acquire write lock */ + IMPORT_C void UnlockIrqW(); /**< @internalComponent release write lock and enable interrupts */ + IMPORT_C TBool FlashIrqW(); /**< @internalComponent if someone else is waiting for the lock, UnlockIrqW() then LockIrqW() */ + IMPORT_C void LockOnlyR(); /**< @internalComponent acquire read lock, assuming interrupts/preemption already disabled */ + IMPORT_C void UnlockOnlyR(); /**< @internalComponent release read lock, don't change interrupt/preemption state */ + IMPORT_C TBool FlashOnlyR(); /**< @internalComponent if someone else is waiting for write lock, UnlockOnlyR() then LockOnlyR() */ + IMPORT_C void LockOnlyW(); /**< @internalComponent acquire write lock, assuming interrupts/preemption already disabled */ + IMPORT_C void UnlockOnlyW(); /**< @internalComponent release write lock, don't change interrupt/preemption state */ + IMPORT_C TBool FlashOnlyW(); /**< @internalComponent if someone else is waiting for the lock, UnlockOnlyW() then LockOnlyW() */ + IMPORT_C TInt LockIrqSaveR(); /**< @internalComponent disable interrupts and acquire read lock, return original interrupt state */ + IMPORT_C void UnlockIrqRestoreR(TInt); /**< @internalComponent release read lock and reset original interrupt state */ + IMPORT_C TBool FlashIrqRestoreR(TInt); /**< @internalComponent if someone else is waiting for write lock, UnlockIrqRestoreR() then LockIrqR() */ + IMPORT_C TInt LockIrqSaveW(); /**< @internalComponent disable interrupts and acquire write lock, return original interrupt state */ + IMPORT_C void UnlockIrqRestoreW(TInt); /**< @internalComponent release write lock and reset original interrupt state */ + IMPORT_C TBool FlashIrqRestoreW(TInt); /**< @internalComponent if someone else is waiting for the lock, UnlockIrqRestoreW() then LockIrqW() */ + IMPORT_C TBool FlashPreemptR(); /**< @internalComponent if someone else is waiting for write lock, UnlockOnlyR(); NKern::PreemptionPoint(); LockOnlyR(); */ + IMPORT_C TBool FlashPreemptW(); /**< @internalComponent if someone else is waiting for the lock, UnlockOnlyW(); NKern::PreemptionPoint(); LockOnlyW(); */ +private: + volatile TUint64 iLock; + }; + + +/** +@publishedPartner +@prototype +*/ +#define __SPIN_LOCK_IRQ_R(lock) (lock).LockIrqR() + +/** +@publishedPartner +@prototype +*/ +#define __SPIN_UNLOCK_IRQ_R(lock) (lock).UnlockIrqR() + +/** +@publishedPartner +@prototype +*/ +#define __SPIN_FLASH_IRQ_R(lock) ((lock).FlashIrqR()) + +/** +@publishedPartner +@prototype +*/ +#define __SPIN_LOCK_IRQ_W(lock) (lock).LockIrqW() + +/** +@publishedPartner +@prototype +*/ +#define __SPIN_UNLOCK_IRQ_W(lock) (lock).UnlockIrqW() + +/** +@publishedPartner +@prototype +*/ +#define __SPIN_FLASH_IRQ_W(lock) ((lock).FlashIrqW()) + + +/** +@publishedPartner +@prototype +*/ +#define __SPIN_LOCK_R(lock) (lock).LockOnlyR() + +/** +@publishedPartner +@prototype +*/ +#define __SPIN_UNLOCK_R(lock) (lock).UnlockOnlyR() + +/** +@internalComponent +*/ +#define __SPIN_FLASH_R(lock) ((lock).FlashOnlyR()) + +/** +@publishedPartner +@prototype +*/ +#define __SPIN_LOCK_W(lock) (lock).LockOnlyW() + +/** +@publishedPartner +@prototype +*/ +#define __SPIN_UNLOCK_W(lock) (lock).UnlockOnlyW() + +/** +@internalComponent +*/ +#define __SPIN_FLASH_W(lock) ((lock).FlashOnlyW()) + + +/** +@publishedPartner +@prototype +*/ +#define __SPIN_LOCK_IRQSAVE_R(lock) (lock).LockIrqSaveR() + +/** +@publishedPartner +@prototype +*/ +#define __SPIN_UNLOCK_IRQRESTORE_R(lock,irq) (lock).UnlockIrqRestoreR(irq) + +/** +@publishedPartner +@prototype +*/ +#define __SPIN_FLASH_IRQRESTORE_R(lock,irq) ((lock).FlashIrqRestoreR(irq)) + +/** +@publishedPartner +@prototype +*/ +#define __SPIN_LOCK_IRQSAVE_W(lock) (lock).LockIrqSaveW() + +/** +@publishedPartner +@prototype +*/ +#define __SPIN_UNLOCK_IRQRESTORE_W(lock,irq) (lock).UnlockIrqRestoreW(irq) + +/** +@publishedPartner +@prototype +*/ +#define __SPIN_FLASH_IRQRESTORE_W(lock,irq) ((lock).FlashIrqRestoreW(irq)) + + +/** +@publishedPartner +@prototype +*/ +#define __SPIN_FLASH_PREEMPT_R(lock) ((lock).FlashPreemptR()) + +/** +@publishedPartner +@prototype +*/ +#define __SPIN_FLASH_PREEMPT_W(lock) ((lock).FlashPreemptW()) + + +#ifdef _DEBUG +#define __INCLUDE_SPIN_LOCK_CHECKS__ +#endif + + +/** Nanokernel fast semaphore + + A light-weight semaphore class that only supports a single waiting thread, + suitable for the Symbian OS thread I/O semaphore. + + Initialising a NFastSemaphore involves two steps: + + - Constructing the semaphore + - Setting the semaphore owning thread (the one allowed to wait on it) + + For example, creating one for the current thread to wait on: + + @code + NFastSemaphore sem; + sem.iOwningThread = NKern::CurrentThread(); + @endcode + + @publishedPartner + @prototype +*/ +class NFastSemaphore + { +public: + inline NFastSemaphore(); + inline NFastSemaphore(NThreadBase* aThread); + IMPORT_C void SetOwner(NThreadBase* aThread); + IMPORT_C void Wait(); + IMPORT_C void Signal(); + IMPORT_C void SignalN(TInt aCount); + IMPORT_C void Reset(); + void WaitCancel(); + + TInt Dec(NThreadBase* aThread); // does mb() if >0 + NThreadBase* Inc(TInt aCount); // does mb() + NThreadBase* DoReset(); // does mb() +public: + /** If >=0 the semaphore count + If <0, (thread>>2)|0x80000000 + @internalComponent + */ + TInt iCount; + + /** The thread allowed to wait on the semaphore + @internalComponent + */ + NThreadBase* iOwningThread; + }; + +/** Create a fast semaphore + + @publishedPartner + @prototype +*/ +inline NFastSemaphore::NFastSemaphore() + : iCount(0), iOwningThread(NULL) + {} + +/** Nanokernel fast mutex + + A light-weight priority-inheritance mutex that can be used if the following + conditions apply: + + - Threads that hold the mutex never block. + - The mutex is never acquired in a nested fashion + + If either of these conditions is not met, a DMutex object is more appropriate. + + @publishedPartner + @prototype +*/ +class NFastMutex + { +public: + IMPORT_C NFastMutex(); + IMPORT_C void Wait(); + IMPORT_C void Signal(); + IMPORT_C TBool HeldByCurrentThread(); +private: + void DoWaitL(); + void DoSignalL(); + + friend class NKern; +public: + /** @internalComponent + + If mutex is free and no-one is waiting, iHoldingThread=0 + If mutex is held and no-one is waiting, iHoldingThread points to holding thread + If mutex is free but threads are waiting, iHoldingThread=1 + If mutex is held and threads are waiting, iHoldingThread points to holding thread but with bit 0 set + */ + NThreadBase* iHoldingThread; + + TUint32 i_NFastMutex_Pad1; /**< @internalComponent */ + + /** @internalComponent + + Spin lock to protect mutex + */ + TSpinLock iMutexLock; + + /** @internalComponent + + List of NThreads which are waiting for the mutex. The threads are linked via + their iWaitLink members. + */ + TPriList iWaitQ; + }; + +__ASSERT_COMPILE(!(_FOFF(NFastMutex,iMutexLock)&7)); + + +/** +@publishedPartner +@prototype + +The type of the callback function used by the nanokernel timer. + +@see NTimer +*/ +typedef NEventFn NTimerFn; + + + + +/** +@publishedPartner +@prototype + +A basic relative timer provided by the nanokernel. + +It can generate either a one-shot interrupt or periodic interrupts. + +A timeout handler is called when the timer expires, either: +- from the timer ISR - if the timer is queued via OneShot(TInt aTime) or OneShot(TInt aTime, TBool EFalse), or +- from the nanokernel timer dfc1 thread - if the timer is queued via OneShot(TInt aTime, TBool ETrue) call, or +- from any other dfc thread that provided DFC belongs to - if the timer is queued via OneShot(TInt aTime, TDfc& aDfc) call. +Call-back mechanism cannot be changed in the life time of a timer. + +These timer objects may be manipulated from any context. +The timers are driven from a periodic system tick interrupt, +usually a 1ms period. + +@see NTimerFn +*/ +class NTimerQ; +class NTimer : public NEventHandler + { +public: + /** + Default constructor. + */ + inline NTimer() + { + iHType = EEventHandlerNTimer; + i8888.iHState1 = EIdle; + } + /** + Constructor taking a callback function and a pointer to be passed + to the callback function. + + @param aFunction The callback function. + @param aPtr A pointer to be passed to the callback function + when called. + */ + inline NTimer(NTimerFn aFunction, TAny* aPtr) + { + iPtr = aPtr; + iFn = aFunction; + iHType = EEventHandlerNTimer; + i8888.iHState1 = EIdle; + } + IMPORT_C NTimer(NSchedulable* aTied, NTimerFn aFunction, TAny* aPtr); + IMPORT_C NTimer(TDfcFn aFunction, TAny* aPtr, TInt aPriority); // create DFC, queue to be set later + IMPORT_C NTimer(TDfcFn aFunction, TAny* aPtr, TDfcQue* aDfcQ, TInt aPriority); // create DFC + IMPORT_C void SetDfcQ(TDfcQue* aDfcQ); + IMPORT_C ~NTimer(); + IMPORT_C TInt SetTied(NSchedulable* aTied); + IMPORT_C TInt OneShot(TInt aTime); + IMPORT_C TInt OneShot(TInt aTime, TBool aDfc); + IMPORT_C TInt OneShot(TInt aTime, TDfc& aDfc); + IMPORT_C TInt Again(TInt aTime); + IMPORT_C TBool Cancel(); + IMPORT_C TBool IsPending(); +private: + enum { ECancelDestroy=1 }; +private: + inline TBool IsNormal() + { return iHType==EEventHandlerNTimer; } + inline TBool IsMutating() + { return iHType +#endif