kernel/eka/nkernsmp/nkern.cpp
changeset 0 a41df078684a
child 90 947f0dc9f7a8
child 256 c1f20ce4abcf
equal deleted inserted replaced
-1:000000000000 0:a41df078684a
       
     1 // Copyright (c) 2005-2009 Nokia Corporation and/or its subsidiary(-ies).
       
     2 // All rights reserved.
       
     3 // This component and the accompanying materials are made available
       
     4 // under the terms of the License "Eclipse Public License v1.0"
       
     5 // which accompanies this distribution, and is available
       
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
       
     7 //
       
     8 // Initial Contributors:
       
     9 // Nokia Corporation - initial contribution.
       
    10 //
       
    11 // Contributors:
       
    12 //
       
    13 // Description:
       
    14 // e32\nkernsmp\nkern.cpp
       
    15 // 
       
    16 //
       
    17 
       
    18 // NThreadBase member data
       
    19 #define __INCLUDE_NTHREADBASE_DEFINES__
       
    20 
       
    21 #include "nk_priv.h"
       
    22 
       
    23 /******************************************************************************
       
    24  * Fast mutex
       
    25  ******************************************************************************/
       
    26 
       
    27 /** Acquires the fast mutex.
       
    28 
       
    29     This will block until the mutex is available, and causes
       
    30 	the thread to enter an implicit critical section until the mutex is released.
       
    31 
       
    32 	Generally threads would use NKern::FMWait() which manipulates the kernel lock
       
    33 	for you.
       
    34 	
       
    35 	@pre Kernel must be locked, with lock count 1.
       
    36 	@pre The calling thread holds no fast mutexes.
       
    37 	
       
    38 	@post Kernel is locked, with lock count 1.
       
    39 	@post The calling thread holds the mutex.
       
    40 	
       
    41 	@see NFastMutex::Signal()
       
    42 	@see NKern::FMWait()
       
    43 */
       
    44 EXPORT_C void NFastMutex::Wait()
       
    45 	{
       
    46 	NThreadBase* pC = NCurrentThreadL();
       
    47 	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED_ONCE|MASK_NO_FAST_MUTEX,"NFastMutex::Wait");
       
    48 
       
    49 	pC->iHeldFastMutex = this;		// to handle kill/suspend between here and setting iHeldFastMutex
       
    50 	DoWaitL();
       
    51 	}
       
    52 
       
    53 void NFastMutex::DoWaitL()
       
    54 	{
       
    55 	NThreadBase* pC = NCurrentThreadL();
       
    56 	__KTRACE_OPT(KNKERN,DEBUGPRINT("%T FMWait %M",pC,this));
       
    57 	TBool waited = FALSE;				// set if we actually had to wait
       
    58 	iMutexLock.LockOnly();			// acquire mutex spin lock
       
    59 	__e32_atomic_ior_rlx_ptr(&iHoldingThread, 1);		// set contention flag to make sure any other thread must acquire the mutex spin lock
       
    60 	pC->AcqSLock();
       
    61 	FOREVER
       
    62 		{
       
    63 		if (pC->iFastMutexDefer == 1)
       
    64 			--pC->iParent->iFreezeCpu;
       
    65 		pC->iFastMutexDefer = 0;
       
    66 		NThreadBase* pH = (NThreadBase*)(TLinAddr(iHoldingThread) &~ 1);
       
    67 		if (!pH)
       
    68 			{
       
    69 			// mutex is free
       
    70 			TInt wp = iWaitQ.HighestPriority();		// -1 if no other thread wants the mutex
       
    71 
       
    72 			// don't grab mutex if we have been suspended/killed/migrated by the previous holding thread
       
    73 			if (!pC->iSuspended && pC->iCsFunction!=NThreadBase::ECSDivertPending && (!pC->iParent->iCpuChange || pC->iParent->iFreezeCpu))
       
    74 				{
       
    75 				TInt p = pC->iPriority;
       
    76 				if (p>wp || (p==wp && waited))
       
    77 					{
       
    78 					// if we are highest priority waiting thread or equal and we have waited then grab the mutex
       
    79 					// don't just grab it if we are equal priority and someone else was already waiting
       
    80 					// set contention flag if other threads waiting or if current thread has a round robin outstanding
       
    81 					pC->iMutexPri = (TUint8)(wp>=0 ? wp : 0);	// pC's actual priority doesn't change since p>=wp
       
    82 					iHoldingThread = (wp>=0 || TUint32(pC->iTime)==0x80000000u) ? (NThreadBase*)(TLinAddr(pC)|1) : pC;
       
    83 					__KTRACE_OPT(KNKERN,DEBUGPRINT("%T got mutex %M CF=%d WP=%d",TLinAddr(iHoldingThread)&~1,this,TLinAddr(iHoldingThread)&1,wp));
       
    84 					pC->RelSLock();
       
    85 					iMutexLock.UnlockOnly();
       
    86 #ifdef BTRACE_FAST_MUTEX
       
    87 					BTraceContext4(BTrace::EFastMutex, BTrace::EFastMutexWait, this);
       
    88 #endif
       
    89 					return;
       
    90 					}
       
    91 				}
       
    92 			}
       
    93 		pC->iFastMutexDefer = 2;	// signal to scheduler to allow ctxsw without incrementing iParent->iFreezeCpu
       
    94 		if (!pC->iSuspended && pC->iCsFunction!=NThreadBase::ECSDivertPending && (!pC->iParent->iCpuChange || pC->iParent->iFreezeCpu))
       
    95 			{
       
    96 			// this forces priority changes to wait for the mutex lock
       
    97 			pC->iLinkedObjType = NThreadBase::EWaitFastMutex;
       
    98 			pC->iLinkedObj = this;
       
    99 			pC->iWaitState.SetUpWait(NThreadBase::EWaitFastMutex, NThreadWaitState::EWtStObstructed, this);
       
   100 			pC->iWaitLink.iPriority = pC->iPriority;
       
   101 			iWaitQ.Add(&pC->iWaitLink);
       
   102 			pC->RelSLock();
       
   103 			if (pH)
       
   104 				pH->SetMutexPriority(this);
       
   105 do_pause:
       
   106 			iMutexLock.UnlockOnly();
       
   107 			RescheduleNeeded();
       
   108 #ifdef BTRACE_FAST_MUTEX
       
   109 			BTraceContext4(BTrace::EFastMutex, BTrace::EFastMutexBlock, this);
       
   110 #endif
       
   111 			NKern::PreemptionPoint();	// we block here until the mutex is released and we are 'nominated' for it or we are suspended/killed
       
   112 			iMutexLock.LockOnly();
       
   113 			pC->AcqSLock();
       
   114 			if (pC->iPauseCount || pC->iSuspended || pC->iCsFunction==NThreadBase::ECSDivertPending || (pC->iParent->iCpuChange && !pC->iParent->iFreezeCpu))
       
   115 				{
       
   116 				pC->RelSLock();
       
   117 				goto do_pause;			// let pause/suspend/kill take effect
       
   118 				}
       
   119 			// if thread was suspended it will have been removed from the wait queue
       
   120 			if (!pC->iLinkedObj)
       
   121 				goto thread_suspended;
       
   122 			iWaitQ.Remove(&pC->iWaitLink);	// take ourselves off the wait/contend queue while we try to grab the mutex
       
   123 			pC->iWaitLink.iNext = 0;
       
   124 			pC->iLinkedObj = 0;
       
   125 			pC->iLinkedObjType = NThreadBase::EWaitNone;
       
   126 			waited = TRUE;
       
   127 			// if we are suspended or killed, we loop round again and do the 'else' clause next time
       
   128 			}
       
   129 		else
       
   130 			{
       
   131 			pC->RelSLock();
       
   132 			if (pC->iSuspended || pC->iCsFunction==NThreadBase::ECSDivertPending)
       
   133 				{
       
   134 				// wake up next thread to take this one's place
       
   135 				if (!pH && !iWaitQ.IsEmpty())
       
   136 					{
       
   137 					NThreadBase* pT = _LOFF(iWaitQ.First(), NThreadBase, iWaitLink);
       
   138 					pT->AcqSLock();
       
   139 					// if thread is still blocked on this fast mutex, release it but leave it on the wait queue
       
   140 					// NOTE: it can't be suspended
       
   141 					pT->iWaitState.UnBlockT(NThreadBase::EWaitFastMutex, this, KErrNone);
       
   142 					pT->RelSLock();
       
   143 					}
       
   144 				}
       
   145 			iMutexLock.UnlockOnly();
       
   146 			NKern::PreemptionPoint();	// thread suspends/dies/migrates here
       
   147 			iMutexLock.LockOnly();
       
   148 			pC->AcqSLock();
       
   149 thread_suspended:
       
   150 			waited = FALSE;
       
   151 			// set contention flag to make sure any other thread must acquire the mutex spin lock
       
   152 			// need to do it again since mutex may have been released while thread was suspended
       
   153 			__e32_atomic_ior_rlx_ptr(&iHoldingThread, 1);
       
   154 			}
       
   155 		}
       
   156 	}
       
   157 
       
   158 
       
   159 #ifndef __FAST_MUTEX_MACHINE_CODED__
       
   160 /** Releases a previously acquired fast mutex.
       
   161 	
       
   162 	Generally, threads would use NKern::FMSignal() which manipulates the kernel lock
       
   163 	for you.
       
   164 	
       
   165 	@pre The calling thread holds the mutex.
       
   166 	@pre Kernel must be locked.
       
   167 	
       
   168 	@post Kernel is locked.
       
   169 	
       
   170 	@see NFastMutex::Wait()
       
   171 	@see NKern::FMSignal()
       
   172 */
       
   173 EXPORT_C void NFastMutex::Signal()
       
   174 	{
       
   175 	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED,"NFastMutex::Signal");
       
   176 #ifdef BTRACE_FAST_MUTEX
       
   177 	BTraceContext4(BTrace::EFastMutex, BTrace::EFastMutexSignal, this);
       
   178 #endif
       
   179 	NThreadBase* pC = NCurrentThreadL();
       
   180 	((volatile TUint32&)pC->iHeldFastMutex) |= 1;	// flag to indicate about to release mutex
       
   181 
       
   182 	if (__e32_atomic_cas_rel_ptr(&iHoldingThread, &pC, 0))
       
   183 		{
       
   184 		// tricky if suspend/kill here
       
   185 		// suspend/kill should check flag set above and aMutex->iHoldingThread
       
   186 		// if bit 0 of iHeldFastMutex set and iHoldingThread==pC then set iHeldFastMutex=0 and proceed
       
   187 
       
   188 		// no-one else was waiting for the mutex - simple
       
   189 		pC->iHeldFastMutex = 0;
       
   190 		return;
       
   191 		}
       
   192 
       
   193 	// there was contention so do it the hard way
       
   194 	DoSignalL();
       
   195 	}
       
   196 #endif
       
   197 
       
   198 void NFastMutex::DoSignalL()
       
   199 	{
       
   200 	NThreadBase* pC = NCurrentThreadL();
       
   201 	__KTRACE_OPT(KNKERN,DEBUGPRINT("%T FMSignal %M",pC,this));
       
   202 	__ASSERT_WITH_MESSAGE_DEBUG(HeldByCurrentThread(),"The calling thread holds the mutex","NFastMutex::Signal");
       
   203 
       
   204 	iMutexLock.LockOnly();
       
   205 	if (!iWaitQ.IsEmpty())
       
   206 		{
       
   207 		NThreadBase* pT = _LOFF(iWaitQ.First(), NThreadBase, iWaitLink);
       
   208 		pT->AcqSLock();
       
   209 
       
   210 		// if thread is still blocked on this fast mutex, release it but leave it on the wait queue
       
   211 		// NOTE: it can't be suspended
       
   212 		pT->iWaitState.UnBlockT(NThreadBase::EWaitFastMutex, this, KErrNone);
       
   213 		pT->RelSLock();
       
   214 		iHoldingThread = (NThreadBase*)1;	// mark mutex as released but contended
       
   215 		}
       
   216 	else
       
   217 		iHoldingThread = 0;	// mark mutex as released and uncontended
       
   218 	__KTRACE_OPT(KNKERN,DEBUGPRINT("SiHT=%d",iHoldingThread));
       
   219 	pC->AcqSLock();
       
   220 	pC->iHeldFastMutex = 0;
       
   221 	iMutexLock.UnlockOnly();
       
   222 	pC->iMutexPri = 0;
       
   223 	if (pC->iPriority != pC->iBasePri)
       
   224 		{
       
   225 		// lose any inherited priority
       
   226 		pC->LoseInheritedPriorityT();
       
   227 		}
       
   228 	if (TUint32(pC->iTime)==0x80000000u)
       
   229 		{
       
   230 		pC->iTime = 0;
       
   231 		RescheduleNeeded();	// handle deferred timeslicing
       
   232 		__KTRACE_OPT(KNKERN,DEBUGPRINT("DTS %T",pC));
       
   233 		}
       
   234 	if (pC->iFastMutexDefer)
       
   235 		{
       
   236 		pC->iFastMutexDefer = 0;
       
   237 		--pC->iParent->iFreezeCpu;
       
   238 		}
       
   239 	if (pC->iParent->iCpuChange && !pC->iParent->iFreezeCpu)
       
   240 		RescheduleNeeded();	// need to migrate to another CPU
       
   241 	if (!pC->iCsCount && pC->iCsFunction)
       
   242 		pC->DoCsFunctionT();
       
   243 	pC->RelSLock();
       
   244 	}
       
   245 
       
   246 
       
   247 /** Checks if the current thread holds this fast mutex
       
   248 
       
   249 	@return TRUE if the current thread holds this fast mutex
       
   250 	@return FALSE if not
       
   251 	@pre	Call in thread context.
       
   252 */
       
   253 EXPORT_C TBool NFastMutex::HeldByCurrentThread()
       
   254 	{
       
   255 	return (TLinAddr(iHoldingThread)&~1) == (TLinAddr)NKern::CurrentThread();
       
   256 	}
       
   257 
       
   258 
       
   259 /** Returns the fast mutex held by the calling thread, if any.
       
   260 
       
   261 	@return	If the calling thread currently holds a fast mutex, this function
       
   262 			returns a pointer to it; otherwise it returns NULL.
       
   263 	@pre	Call in thread context.
       
   264 */
       
   265 EXPORT_C NFastMutex* NKern::HeldFastMutex()
       
   266 	{
       
   267 	NThreadBase* t = NKern::CurrentThread();
       
   268 	NFastMutex* m = (NFastMutex*)(TLinAddr(t->iHeldFastMutex)&~3);
       
   269 	return (m && m->HeldByCurrentThread()) ? m : 0;
       
   270 	}
       
   271 
       
   272 	
       
   273 #ifndef __FAST_MUTEX_MACHINE_CODED__
       
   274 /** Acquires a fast mutex.
       
   275 
       
   276     This will block until the mutex is available, and causes
       
   277 	the thread to enter an implicit critical section until the mutex is released.
       
   278 
       
   279 	@param aMutex The fast mutex to acquire.
       
   280 	
       
   281 	@post The calling thread holds the mutex.
       
   282 	
       
   283 	@see NFastMutex::Wait()
       
   284 	@see NKern::FMSignal()
       
   285 
       
   286 	@pre No fast mutex can be held.
       
   287 	@pre Call in a thread context.
       
   288 	@pre Kernel must be unlocked
       
   289 	@pre interrupts enabled
       
   290 
       
   291 */
       
   292 EXPORT_C void NKern::FMWait(NFastMutex* aMutex)
       
   293 	{
       
   294 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NFMW %M", aMutex));
       
   295 	CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"NKern::FMWait");
       
   296 	NThreadBase* pC = NKern::CurrentThread();
       
   297 
       
   298 	// If the reschedule IPI from an external suspend or kill occurs after this
       
   299 	// point the initiating CPU must observe the write to iHeldFastMutex before
       
   300 	// the cas operation.
       
   301 	pC->iHeldFastMutex = aMutex;	// kill/suspend after this point should set mutex contention flag
       
   302 	NThreadBase* expect = 0;
       
   303 	if (__e32_atomic_cas_acq_ptr(&aMutex->iHoldingThread, &expect, pC))
       
   304 		{
       
   305 		// mutex was free and we have just claimed it - simple
       
   306 #ifdef BTRACE_FAST_MUTEX
       
   307 		BTraceContext4(BTrace::EFastMutex, BTrace::EFastMutexWait, aMutex);
       
   308 #endif
       
   309 		return;
       
   310 		}
       
   311 
       
   312 	// care required if suspend/kill here
       
   313 
       
   314 	// there is contention so do it the hard way
       
   315 	NKern::Lock();
       
   316 	aMutex->DoWaitL();
       
   317 	NKern::Unlock();
       
   318 	}
       
   319 
       
   320 
       
   321 /** Releases a previously acquired fast mutex.
       
   322 	
       
   323 	@param aMutex The fast mutex to release.
       
   324 	
       
   325 	@pre The calling thread holds the mutex.
       
   326 	
       
   327 	@see NFastMutex::Signal()
       
   328 	@see NKern::FMWait()
       
   329 */
       
   330 EXPORT_C void NKern::FMSignal(NFastMutex* aMutex)
       
   331 	{
       
   332 	NThreadBase* pC = NKern::CurrentThread();
       
   333 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NFMS %M", aMutex));
       
   334 #ifdef BTRACE_FAST_MUTEX
       
   335 	BTraceContext4(BTrace::EFastMutex, BTrace::EFastMutexSignal, aMutex);
       
   336 #endif
       
   337 	((volatile TUint32&)pC->iHeldFastMutex) |= 1;	// flag to indicate about to release mutex
       
   338 
       
   339 	if (__e32_atomic_cas_rel_ptr(&aMutex->iHoldingThread, &pC, 0))
       
   340 		{
       
   341 		// no-one else was waiting for the mutex and we have just released it
       
   342 
       
   343 		// tricky if suspend/kill here
       
   344 		// suspend/kill should check flag set above and aMutex->iHoldingThread
       
   345 		// if bit 0 of iHeldFastMutex set and iHoldingThread==pC then set iHeldFastMutex=0 and proceed
       
   346 
       
   347 		// If the reschedule IPI from an external suspend or kill occurs after this
       
   348 		// point the initiating CPU must observe the write to iHeldFastMutex after
       
   349 		// the cas operation.
       
   350 		pC->iHeldFastMutex = 0;
       
   351 		return;
       
   352 		}
       
   353 
       
   354 	// there was contention so do it the hard way
       
   355 	NKern::Lock();
       
   356 	aMutex->DoSignalL();
       
   357 	NKern::Unlock();
       
   358 	}
       
   359 
       
   360 /** Acquires the System Lock.
       
   361 
       
   362     This will block until the mutex is available, and causes
       
   363 	the thread to enter an implicit critical section until the mutex is released.
       
   364 
       
   365 	@post System lock is held.
       
   366 
       
   367 	@see NKern::UnlockSystem()
       
   368 	@see NKern::FMWait()
       
   369 
       
   370 	@pre No fast mutex can be held.
       
   371 	@pre Call in a thread context.
       
   372 	@pre Kernel must be unlocked
       
   373 	@pre interrupts enabled
       
   374 
       
   375 */
       
   376 EXPORT_C void NKern::LockSystem()
       
   377 	{
       
   378 	NKern::FMWait(&TheScheduler.iLock);
       
   379 	}
       
   380 
       
   381 
       
   382 /** Releases the System Lock.
       
   383 
       
   384 	@pre System lock must be held.
       
   385 
       
   386 	@see NKern::LockSystem()
       
   387 	@see NKern::FMSignal()
       
   388 */
       
   389 EXPORT_C void NKern::UnlockSystem()
       
   390 	{
       
   391 	NKern::FMSignal(&TheScheduler.iLock);
       
   392 	}
       
   393 
       
   394 
       
   395 /** Temporarily releases a fast mutex if there is contention.
       
   396 
       
   397     If there is another thread attempting to acquire the mutex, the calling
       
   398 	thread releases the mutex and then acquires it again.
       
   399 	
       
   400 	This is more efficient than the equivalent code:
       
   401 	
       
   402 	@code
       
   403 	NKern::FMSignal();
       
   404 	NKern::FMWait();
       
   405 	@endcode
       
   406 
       
   407 	@return	TRUE if the mutex was relinquished, FALSE if not.
       
   408 
       
   409 	@pre	The mutex must be held.
       
   410 
       
   411 	@post	The mutex is held.
       
   412 */
       
   413 EXPORT_C TBool NKern::FMFlash(NFastMutex* aM)
       
   414 	{
       
   415 	NThreadBase* pC = NKern::CurrentThread();
       
   416 	__ASSERT_WITH_MESSAGE_DEBUG(aM->HeldByCurrentThread(),"The calling thread holds the mutex","NKern::FMFlash");
       
   417 	TBool w = (pC->iMutexPri >= pC->iBasePri);	// a thread of greater or equal priority is waiting
       
   418 	if (w)
       
   419 		{
       
   420 		NKern::Lock();
       
   421 		aM->Signal();
       
   422 		NKern::PreemptionPoint();
       
   423 		aM->Wait();
       
   424 		NKern::Unlock();
       
   425 		}
       
   426 #ifdef BTRACE_FAST_MUTEX
       
   427 	else
       
   428 		{
       
   429 		BTraceContext4(BTrace::EFastMutex, BTrace::EFastMutexFlash, aM);
       
   430 		}
       
   431 #endif
       
   432 	return w;
       
   433 	}
       
   434 
       
   435 
       
   436 /** Temporarily releases the System Lock if there is contention.
       
   437 
       
   438     If there
       
   439 	is another thread attempting to acquire the System lock, the calling
       
   440 	thread releases the mutex and then acquires it again.
       
   441 	
       
   442 	This is more efficient than the equivalent code:
       
   443 	
       
   444 	@code
       
   445 	NKern::UnlockSystem();
       
   446 	NKern::LockSystem();
       
   447 	@endcode
       
   448 
       
   449 	Note that this can only allow higher priority threads to use the System
       
   450 	lock as lower priority cannot cause contention on a fast mutex.
       
   451 
       
   452 	@return	TRUE if the system lock was relinquished, FALSE if not.
       
   453 
       
   454 	@pre	System lock must be held.
       
   455 
       
   456 	@post	System lock is held.
       
   457 
       
   458 	@see NKern::LockSystem()
       
   459 	@see NKern::UnlockSystem()
       
   460 */
       
   461 EXPORT_C TBool NKern::FlashSystem()
       
   462 	{
       
   463 	CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED,"NKern::FlashSystem");
       
   464 	return NKern::FMFlash(&TheScheduler.iLock);
       
   465 	}
       
   466 #endif
       
   467 
       
   468 /******************************************************************************
       
   469  * Fast semaphore
       
   470  ******************************************************************************/
       
   471 
       
   472 /** Sets the owner of a fast semaphore.
       
   473 
       
   474 	@param aThread The thread to own this semaphore. If aThread==0, then the
       
   475 					owner is set to the current thread.
       
   476 
       
   477 	@pre Kernel must be locked.
       
   478 	@pre If changing ownership form one thread to another, the there must be no
       
   479 		 pending signals or waits.
       
   480 	@pre Call either in a thread or an IDFC context.
       
   481 	
       
   482 	@post Kernel is locked.
       
   483 */
       
   484 EXPORT_C void NFastSemaphore::SetOwner(NThreadBase* aThread)
       
   485 	{
       
   486 	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NFastSemaphore::SetOwner");		
       
   487 	if (!aThread)
       
   488 		aThread = NCurrentThreadL();
       
   489 	if (iOwningThread && iOwningThread!=aThread)
       
   490 		{
       
   491 		__NK_ASSERT_ALWAYS(!iCount);	// Can't change owner if iCount!=0
       
   492 		}
       
   493 	iOwningThread = aThread;
       
   494 	}
       
   495 
       
   496 
       
   497 #ifndef __FAST_SEM_MACHINE_CODED__
       
   498 /** Waits on a fast semaphore.
       
   499 
       
   500     Decrements the signal count for the semaphore and
       
   501 	removes the calling thread from the ready-list if the semaphore becomes
       
   502 	unsignalled. Only the thread that owns a fast semaphore can wait on it.
       
   503 	
       
   504 	Note that this function does not block, it merely updates the NThread state,
       
   505 	rescheduling will only occur when the kernel is unlocked. Generally threads
       
   506 	would use NKern::FSWait() which manipulates the kernel lock for you.
       
   507 
       
   508 	@pre The calling thread must own the semaphore.
       
   509 	@pre No fast mutex can be held.
       
   510 	@pre Kernel must be locked.
       
   511 	
       
   512 	@post Kernel is locked.
       
   513 	
       
   514 	@see NFastSemaphore::Signal()
       
   515 	@see NKern::FSWait()
       
   516 	@see NKern::Unlock()
       
   517  */
       
   518 EXPORT_C void NFastSemaphore::Wait()
       
   519 	{
       
   520 	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NO_FAST_MUTEX,"NFastSemaphore::Wait");
       
   521 	NThreadBase* pC = NCurrentThreadL();
       
   522 	__ASSERT_WITH_MESSAGE_ALWAYS(pC==iOwningThread,"The calling thread must own the semaphore","NFastSemaphore::Wait");
       
   523 	pC->iWaitState.SetUpWait(NThreadBase::EWaitFastSemaphore, 0, this);
       
   524 	if (Dec(pC))						// full barrier
       
   525 		pC->iWaitState.CancelWait();	// don't have to wait
       
   526 	else
       
   527 		RescheduleNeeded();				// have to wait
       
   528 	}
       
   529 
       
   530 
       
   531 /** Signals a fast semaphore.
       
   532 
       
   533     Increments the signal count of a fast semaphore by
       
   534 	one and releases any waiting thread if the semphore becomes signalled.
       
   535 	
       
   536 	Note that a reschedule will not occur before this function returns, this will
       
   537 	only take place when the kernel is unlocked. Generally threads
       
   538 	would use NKern::FSSignal() which manipulates the kernel lock for you.
       
   539 	
       
   540 	@pre Kernel must be locked.
       
   541 	@pre Call either in a thread or an IDFC context.
       
   542 	
       
   543 	@post Kernel is locked.
       
   544 	
       
   545 	@see NFastSemaphore::Wait()
       
   546 	@see NKern::FSSignal()
       
   547 	@see NKern::Unlock()
       
   548  */
       
   549 EXPORT_C void NFastSemaphore::Signal()
       
   550 	{
       
   551 	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NFastSemaphore::Signal");			
       
   552 	NThreadBase* t = Inc(1);	// release semantics
       
   553 	if (t)
       
   554 		{
       
   555 		t->AcqSLock();
       
   556 		t->iWaitState.UnBlockT(NThreadBase::EWaitFastSemaphore, this, KErrNone);
       
   557 		t->RelSLock();
       
   558 		}
       
   559 	}
       
   560 
       
   561 
       
   562 /** Signals a fast semaphore multiple times.
       
   563 
       
   564 	@pre Kernel must be locked.
       
   565 	@pre Call either in a thread or an IDFC context.
       
   566 	
       
   567 	@post Kernel is locked.
       
   568 
       
   569 	@internalComponent	
       
   570  */
       
   571 EXPORT_C void NFastSemaphore::SignalN(TInt aCount)
       
   572 	{
       
   573 	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NFastSemaphore::SignalN");			
       
   574 	__NK_ASSERT_DEBUG(aCount>=0);
       
   575 	if (aCount > 0)
       
   576 		{
       
   577 		NThreadBase* t = Inc(aCount);
       
   578 		if (t)
       
   579 			{
       
   580 			t->AcqSLock();
       
   581 			t->iWaitState.UnBlockT(NThreadBase::EWaitFastSemaphore, this, KErrNone);
       
   582 			t->RelSLock();
       
   583 			}
       
   584 		}
       
   585 	}
       
   586 
       
   587 
       
   588 /** Cancels a wait on a fast semaphore.
       
   589 
       
   590 	@pre Kernel must be locked.
       
   591 	@pre Call either in a thread or an IDFC context.
       
   592 	
       
   593 	@post Kernel is locked.
       
   594 
       
   595 	@internalComponent	
       
   596  */
       
   597 void NFastSemaphore::WaitCancel()
       
   598 	{
       
   599 	Inc(1);
       
   600 	}
       
   601 
       
   602 
       
   603 /** Waits for a signal on the current thread's I/O semaphore.
       
   604 
       
   605 	@pre No fast mutex can be held.
       
   606 	@pre Call in a thread context.
       
   607 	@pre Kernel must be unlocked
       
   608 	@pre interrupts enabled
       
   609  */
       
   610 EXPORT_C void NKern::WaitForAnyRequest()
       
   611 	{
       
   612 	CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"NKern::WaitForAnyRequest");
       
   613 	__KTRACE_OPT(KNKERN,DEBUGPRINT("WfAR"));
       
   614 	NThreadBase* t = NKern::LockC();
       
   615 	NFastSemaphore* s = &t->iRequestSemaphore;
       
   616 	t->iWaitState.SetUpWait(NThreadBase::EWaitFastSemaphore, 0, s);
       
   617 	if (s->Dec(t))					// fully ordered semantics
       
   618 		t->iWaitState.CancelWait();	// don't have to wait
       
   619 	else
       
   620 		RescheduleNeeded();			// have to wait
       
   621 	NKern::Unlock();
       
   622 	}
       
   623 #endif
       
   624 
       
   625 
       
   626 /** Resets a fast semaphore.
       
   627 
       
   628 	@pre Kernel must be locked.
       
   629 	@pre Call either in a thread or an IDFC context.
       
   630 	
       
   631 	@post Kernel is locked.
       
   632 
       
   633 	@internalComponent	
       
   634  */
       
   635 EXPORT_C void NFastSemaphore::Reset()
       
   636 	{
       
   637 	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NFastSemaphore::Reset");
       
   638 	NThreadBase* t = DoReset();
       
   639 	if (t)
       
   640 		{
       
   641 		t->AcqSLock();
       
   642 		t->iWaitState.UnBlockT(NThreadBase::EWaitFastSemaphore, this, KErrNone);
       
   643 		t->RelSLock();
       
   644 		}
       
   645 	}
       
   646 
       
   647 
       
   648 /** Sets the owner of a fast semaphore.
       
   649 
       
   650 	@param aSem The semaphore to change ownership off.
       
   651 	@param aThread The thread to own this semaphore. If aThread==0, then the
       
   652 					owner is set to the current thread.
       
   653 
       
   654 	@pre If changing ownership form one thread to another, the there must be no
       
   655 		 pending signals or waits.
       
   656 */
       
   657 EXPORT_C void NKern::FSSetOwner(NFastSemaphore* aSem,NThreadBase* aThread)
       
   658 	{
       
   659 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::FSSetOwner %m %T",aSem,aThread));
       
   660 	NKern::Lock();
       
   661 	aSem->SetOwner(aThread);
       
   662 	NKern::Unlock();
       
   663 	}
       
   664 
       
   665 #ifndef __FAST_SEM_MACHINE_CODED__
       
   666 /** Waits on a fast semaphore.
       
   667 
       
   668     Decrements the signal count for the semaphore
       
   669 	and waits for a signal if the semaphore becomes unsignalled. Only the
       
   670 	thread that owns a fast	semaphore can wait on it.
       
   671 
       
   672 	@param aSem The semaphore to wait on.
       
   673 	
       
   674 	@pre The calling thread must own the semaphore.
       
   675 	@pre No fast mutex can be held.
       
   676 	
       
   677 	@see NFastSemaphore::Wait()
       
   678 */
       
   679 EXPORT_C void NKern::FSWait(NFastSemaphore* aSem)
       
   680 	{
       
   681 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NFSW %m",aSem));
       
   682 	NKern::Lock();
       
   683 	aSem->Wait();
       
   684 	NKern::Unlock();
       
   685 	}
       
   686 
       
   687 
       
   688 /** Signals a fast semaphore.
       
   689 
       
   690     Increments the signal count of a fast semaphore
       
   691 	by one and releases any	waiting thread if the semaphore becomes signalled.
       
   692 	
       
   693 	@param aSem The semaphore to signal.
       
   694 
       
   695 	@see NKern::FSWait()
       
   696 
       
   697 	@pre Interrupts must be enabled.
       
   698 	@pre Do not call from an ISR
       
   699  */
       
   700 EXPORT_C void NKern::FSSignal(NFastSemaphore* aSem)
       
   701 	{
       
   702 	CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR,"NKern::FSSignal(NFastSemaphore*)");
       
   703 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NFSS %m",aSem));
       
   704 	NKern::Lock();
       
   705 	aSem->Signal();
       
   706 	NKern::Unlock();
       
   707 	}
       
   708 
       
   709 
       
   710 /** Signals a fast semaphore multiple times.
       
   711 
       
   712     Increments the signal count of a
       
   713 	fast semaphore by aCount and releases any waiting thread if the semphore
       
   714 	becomes signalled.
       
   715 	
       
   716 	@param aSem The semaphore to signal.
       
   717 	@param aCount The number of times to signal the semaphore.
       
   718 
       
   719 	@see NKern::FSWait()
       
   720 
       
   721 	@pre Interrupts must be enabled.
       
   722 	@pre Do not call from an ISR
       
   723  */
       
   724 EXPORT_C void NKern::FSSignalN(NFastSemaphore* aSem, TInt aCount)
       
   725 	{
       
   726 	CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR,"NKern::FSSignalN(NFastSemaphore*, TInt)");
       
   727 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NFSSN %m %d",aSem,aCount));
       
   728 	__NK_ASSERT_DEBUG(aCount>=0);
       
   729 	if (aCount == 0)
       
   730 		return;
       
   731 	NKern::Lock();
       
   732 	aSem->SignalN(aCount);
       
   733 	NKern::Unlock();
       
   734 	}
       
   735 
       
   736 
       
   737 /** Signals the request semaphore of a nanothread.
       
   738 
       
   739 	This function is intended to be used by the EPOC layer and personality
       
   740 	layers.  Device drivers should use Kern::RequestComplete instead.
       
   741 
       
   742 	@param aThread Nanothread to signal. Must be non NULL.
       
   743 
       
   744 	@see Kern::RequestComplete()
       
   745 
       
   746 	@pre Interrupts must be enabled.
       
   747 	@pre Do not call from an ISR
       
   748  */
       
   749 EXPORT_C void NKern::ThreadRequestSignal(NThread* aThread)
       
   750 	{
       
   751 	NKern::FSSignal(&aThread->iRequestSemaphore);
       
   752 	}
       
   753 
       
   754 
       
   755 /** Signals the request semaphore of a nanothread several times.
       
   756 
       
   757 	This function is intended to be used by the EPOC layer and personality
       
   758 	layers.  Device drivers should use Kern::RequestComplete instead.
       
   759 
       
   760 	@param aThread Nanothread to signal.  If NULL, the current thread is signaled.
       
   761 	@param aCount Number of times the request semaphore must be signaled.
       
   762 	
       
   763 	@pre aCount >= 0
       
   764 
       
   765 	@see Kern::RequestComplete()
       
   766  */
       
   767 EXPORT_C void NKern::ThreadRequestSignal(NThread* aThread, TInt aCount)
       
   768 	{
       
   769 	__ASSERT_WITH_MESSAGE_DEBUG(aCount >= 0,"aCount >= 0","NKern::ThreadRequestSignal");
       
   770 	if (!aThread)
       
   771 		aThread = (NThread*)NKern::CurrentThread();
       
   772 	NKern::FSSignalN(&aThread->iRequestSemaphore, aCount);
       
   773 	}
       
   774 #endif
       
   775 
       
   776 
       
   777 
       
   778 /** Atomically signals a fast semaphore and releases a fast mutex.
       
   779 
       
   780 	Rescheduling only occurs after both synchronisation operations are complete.
       
   781 	
       
   782 	@param aSem The semaphore to signal.
       
   783 	@param aMutex The mutex to release. If NULL, the System Lock is released
       
   784 
       
   785 	@pre The calling thread must hold the mutex.
       
   786 	
       
   787 	@see NKern::FMSignal()
       
   788  */
       
   789 EXPORT_C void NKern::FSSignal(NFastSemaphore* aSem, NFastMutex* aMutex)
       
   790 	{
       
   791 	if (!aMutex)
       
   792 		aMutex=&TheScheduler.iLock;
       
   793 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NFSS %m +FM %M",aSem,aMutex));
       
   794 	NKern::Lock();
       
   795 	aSem->Signal();
       
   796 	aMutex->Signal();
       
   797 	NKern::Unlock();
       
   798 	}
       
   799 
       
   800 
       
   801 /** Atomically signals a fast semaphore multiple times and releases a fast mutex.
       
   802 
       
   803 	Rescheduling only occurs after both synchronisation operations are complete.
       
   804 	
       
   805 	@param aSem The semaphore to signal.
       
   806 	@param aCount The number of times to signal the semaphore.
       
   807 	@param aMutex The mutex to release. If NULL, the System Lock is released.
       
   808 
       
   809 	@pre The calling thread must hold the mutex.
       
   810 	
       
   811 	@see NKern::FMSignal()
       
   812  */
       
   813 EXPORT_C void NKern::FSSignalN(NFastSemaphore* aSem, TInt aCount, NFastMutex* aMutex)
       
   814 	{
       
   815 	if (!aMutex)
       
   816 		aMutex=&TheScheduler.iLock;
       
   817 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NFSSN %m %d + FM %M",aSem,aCount,aMutex));
       
   818 	NKern::Lock();
       
   819 	aSem->SignalN(aCount);
       
   820 	aMutex->Signal();
       
   821 	NKern::Unlock();
       
   822 	}
       
   823 
       
   824 
       
   825 /******************************************************************************
       
   826  * Thread
       
   827  ******************************************************************************/
       
   828 
       
   829 void NThreadBase::DoCsFunctionT()
       
   830 	{
       
   831 	__KTRACE_OPT(KNKERN,DEBUGPRINT("%T nDoCsFuncT %d",this,iCsFunction));
       
   832 	TInt f=iCsFunction;
       
   833 	if (f==0)
       
   834 		return;
       
   835 	if (f>0)
       
   836 		{
       
   837 		// suspend this thread f times
       
   838 		iCsFunction = 0;
       
   839 		iSuspendCount += f;
       
   840 		iSuspended = 1;
       
   841 		RescheduleNeeded();
       
   842 		return;
       
   843 		}
       
   844 	if (f==ECSExitPending || f==ECSDivertPending)
       
   845 		{
       
   846 		// We need to exit now
       
   847 		RelSLock();
       
   848 		Exit();	// this won't return
       
   849 		}
       
   850 //	UnknownState(ELeaveCS,f);	// call into RTOS personality
       
   851 	__NK_ASSERT_ALWAYS(0);
       
   852 	}
       
   853 
       
   854 TBool NThreadBase::DoSuspendOrKillT(TInt aCount, TSubScheduler* aS)
       
   855 	{
       
   856 	TBool result = TRUE;
       
   857 	if (aCount>=0)
       
   858 		{
       
   859 		if (iSuspended)
       
   860 			result = FALSE;
       
   861 		iSuspendCount+=aCount;
       
   862 		iSuspended = 1;
       
   863 		if (!iCurrent)
       
   864 			{
       
   865 			if (aS)
       
   866 				UnReadyT();
       
   867 			else if (iReady)
       
   868 				{
       
   869 				NThreadGroup* g = (NThreadGroup*)iParent;
       
   870 				g->iNThreadList.Remove(this);
       
   871 				}
       
   872 			}
       
   873 		if (this == NCurrentThreadL())
       
   874 			RescheduleNeeded();
       
   875 		if (aS)
       
   876 			aS->iReadyListLock.UnlockOnly();
       
   877 		}
       
   878 	else
       
   879 		{
       
   880 		iCsFunction = ECSDivertPending;
       
   881 		iSuspendCount = 0;
       
   882 		iSuspended = 0;
       
   883 		if (aS)
       
   884 			aS->iReadyListLock.UnlockOnly();
       
   885 		DoReleaseT(KErrDied,0);
       
   886 		if (!iReady && !iPauseCount)
       
   887 			ReadyT(0);
       
   888 		}
       
   889 	return result;
       
   890 	}
       
   891 
       
   892 // If aCount>=0 suspend the thread aCount times
       
   893 // If aCount<0 kill the thread
       
   894 TBool NThreadBase::SuspendOrKill(TInt aCount)
       
   895 	{
       
   896 	__KTRACE_OPT(KNKERN,DEBUGPRINT("%T nSuspendOrKill %d", this, aCount));
       
   897 	if (aCount==0)
       
   898 		return FALSE;
       
   899 	TBool result = FALSE;
       
   900 	TBool concurrent = FALSE;
       
   901 	TSubScheduler* ss = 0;
       
   902 	AcqSLock();
       
   903 	NFastMutex* wfm = 0;
       
   904 	if (iLinkedObj && iLinkedObjType==EWaitFastMutex)
       
   905 		wfm = (NFastMutex*)iLinkedObj;
       
   906 	if (iCsFunction<0)
       
   907 		goto done2;	// if already exiting ignore suspend or kill
       
   908 	if (wfm)
       
   909 		{
       
   910 		// if thread is waiting on a fast mutex, need to acquire mutex lock
       
   911 		++iPauseCount;
       
   912 		RelSLock();
       
   913 		wfm->iMutexLock.LockOnly();
       
   914 		AcqSLock();
       
   915 		UnPauseT();
       
   916 		}
       
   917 	if (iReady && iParent->iReady)
       
   918 		{
       
   919 		ss = TheSubSchedulers + (iParent->iReady & EReadyCpuMask);
       
   920 		ss->iReadyListLock.LockOnly();
       
   921 		}
       
   922 	concurrent = (iCurrent && this!=NCurrentThreadL());
       
   923 	if (iWaitState.ThreadIsDead())				// already dead so suspension/kill is a no-op
       
   924 		goto done;
       
   925 	if (concurrent)
       
   926 		{
       
   927 		// thread is actually running on another CPU
       
   928 		// interrupt that CPU and wait for it to enter interrupt mode
       
   929 		// this allows a snapshot of the thread state to be observed
       
   930 		// in this state, the thread cannot enter or leave a critical section
       
   931 		send_resched_ipi_and_wait(iLastCpu);
       
   932 		}
       
   933 	if (iCsCount)
       
   934 		{
       
   935 suspend_or_kill_in_cs:
       
   936 		__KTRACE_OPT(KNKERN,DEBUGPRINT("n Suspend %T (CSF %d) %d",this,iCsFunction,aCount));
       
   937 		if (aCount>0)				// -ve means thread is about to exit
       
   938 			iCsFunction+=aCount;	// so thread will suspend itself when it leaves the critical section
       
   939 		else
       
   940 			iCsFunction = ECSExitPending;
       
   941 		goto done;
       
   942 		}
       
   943 	// iCsCount==0 and it can't become nonzero until we release the thread spin lock
       
   944 	// (since threads may not set iCsCount to a nonzero value with the kernel lock held)
       
   945 	// Make sure the thread isn't actually about to exit by itself
       
   946 	if (iCsFunction<0)
       
   947 		goto done;	// if already exiting ignore suspend or kill
       
   948 	if (wfm)
       
   949 		{
       
   950 		wfm->iWaitQ.Remove(&iWaitLink);	// take thread off the wait/contend queue
       
   951 		iWaitLink.iNext = 0;
       
   952 		iLinkedObj = 0;
       
   953 		iLinkedObjType = EWaitNone;
       
   954 		result = DoSuspendOrKillT(aCount, ss);
       
   955 		if (aCount>0)
       
   956 			DoReleaseT(KErrGeneral, 0);	// thread isn't blocked any more, just suspended
       
   957 		RelSLock();
       
   958 
       
   959 		// May need to adjust holding thread's inherited priority.
       
   960 		// May need to wake up next thread to take this one's place.
       
   961 		NThreadBase* pH = (NThreadBase*)(TLinAddr(wfm->iHoldingThread) &~ 1);
       
   962 		if (pH)
       
   963 			pH->SetMutexPriority(wfm);
       
   964 		else if (!pH && !wfm->iWaitQ.IsEmpty())
       
   965 			{
       
   966 			NThreadBase* pT = _LOFF(wfm->iWaitQ.First(), NThreadBase, iWaitLink);
       
   967 			pT->AcqSLock();
       
   968 			pT->iWaitState.UnBlockT(NThreadBase::EWaitFastMutex, wfm, KErrNone);
       
   969 			pT->RelSLock();
       
   970 			}
       
   971 		wfm->iMutexLock.UnlockOnly();
       
   972 		return result;
       
   973 		}
       
   974 	if (CheckFastMutexDefer())
       
   975 		goto suspend_or_kill_in_cs;
       
   976 
       
   977 	// thread not in critical section, so suspend it
       
   978 	result = DoSuspendOrKillT(aCount, ss);
       
   979 	goto done2;
       
   980 
       
   981 done:
       
   982 	if (wfm)
       
   983 		wfm->iMutexLock.UnlockOnly();
       
   984 	if (ss)
       
   985 		ss->iReadyListLock.UnlockOnly();
       
   986 done2:
       
   987 	RelSLock();
       
   988 
       
   989 	return result;
       
   990 	}
       
   991 
       
   992 
       
   993 /** Suspends a nanothread the specified number of times.
       
   994 	
       
   995 	For use by RTOS personality layers.
       
   996 	Do not use this function directly on a Symbian OS thread.
       
   997 	Since the kernel is locked on entry, any reschedule will be deferred until
       
   998 	it is unlocked.
       
   999 	The suspension will be deferred if the target thread is currently in a
       
  1000 	critical section; in this case the suspension will take effect when it exits
       
  1001 	the critical section.
       
  1002 	The thread's unknown state handler will be invoked with function ESuspend and
       
  1003 	parameter aCount if the current NState is not recognised and it is not in a
       
  1004 	critical section.
       
  1005 
       
  1006 	@param	aCount = the number of times to suspend.
       
  1007 	@return	TRUE, if the suspension has taken immediate effect;
       
  1008 			FALSE, if the thread is in a critical section or is already suspended.
       
  1009 	
       
  1010 	@pre	Kernel must be locked.
       
  1011 	@pre	Call in a thread context.
       
  1012 	
       
  1013 	@post	Kernel is locked.
       
  1014  */
       
  1015 EXPORT_C TBool NThreadBase::Suspend(TInt aCount)
       
  1016 	{
       
  1017 	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NThreadBase::Suspend");
       
  1018 	__NK_ASSERT_ALWAYS(aCount>=0);
       
  1019 
       
  1020 	// If thread is executing a critical section, we must defer the suspend
       
  1021 
       
  1022 	return SuspendOrKill(aCount);
       
  1023 	}
       
  1024 
       
  1025 
       
  1026 TBool NThreadBase::Resume(TBool aForce)
       
  1027 	{
       
  1028 	TBool result = FALSE;
       
  1029 	AcqSLock();
       
  1030 	if (iWaitState.ThreadIsDead() || iCsFunction<0)		// already dead or dying so resume is a no-op
       
  1031 		goto done;
       
  1032 
       
  1033 	if (iCsFunction>0)
       
  1034 		{
       
  1035 		if (aForce)
       
  1036 			iCsFunction = 0;
       
  1037 		else
       
  1038 			--iCsFunction;
       
  1039 		}
       
  1040 	else if (iSuspendCount)
       
  1041 		{
       
  1042 		if (aForce)
       
  1043 			iSuspendCount = 0;
       
  1044 		else
       
  1045 			--iSuspendCount;
       
  1046 		if (!iSuspendCount)
       
  1047 			{
       
  1048 			result = TRUE;
       
  1049 			iSuspended = 0;
       
  1050 			if (!iPauseCount && !iReady && !iWaitState.iWtC.iWtStFlags)
       
  1051 				ReadyT(0);
       
  1052 			}
       
  1053 		}
       
  1054 
       
  1055 done:
       
  1056 	RelSLock();
       
  1057 	return result;
       
  1058 	}
       
  1059 
       
  1060 /** Resumes a nanothread, cancelling one suspension.
       
  1061 	
       
  1062 	For use by RTOS personality layers.
       
  1063 	Do not use this function directly on a Symbian OS thread.
       
  1064 	Since the kernel is locked on entry, any reschedule will be deferred until
       
  1065 	it is unlocked.
       
  1066 	If the target thread is currently in a critical section this will simply
       
  1067 	cancel one deferred suspension.
       
  1068 	The thread's unknown state handler will be invoked with function EResume if
       
  1069 	the current NState is not recognised and it is not in a	critical section.
       
  1070 
       
  1071 	@return	TRUE, if the resumption has taken immediate effect;
       
  1072 			FALSE, if the thread is in a critical section or is still suspended.
       
  1073 	
       
  1074 	@pre	Kernel must be locked.
       
  1075 	@pre	Call either in a thread or an IDFC context.
       
  1076 	
       
  1077 	@post	Kernel must be locked.
       
  1078  */
       
  1079 EXPORT_C TBool NThreadBase::Resume()
       
  1080 	{
       
  1081 	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NThreadBase::Resume");
       
  1082 	__KTRACE_OPT(KNKERN,DEBUGPRINT("%T nRsm",this));
       
  1083 
       
  1084 	return Resume(FALSE);
       
  1085 	}
       
  1086 
       
  1087 
       
  1088 /** Resumes a nanothread, cancelling all outstanding suspensions.
       
  1089 	
       
  1090 	For use by RTOS personality layers.
       
  1091 	Do not use this function directly on a Symbian OS thread.
       
  1092 	Since the kernel is locked on entry, any reschedule will be deferred until
       
  1093 	it is unlocked.
       
  1094 	If the target thread is currently in a critical section this will simply
       
  1095 	cancel all deferred suspensions.
       
  1096 	The thread's unknown state handler will be invoked with function EForceResume
       
  1097 	if the current NState is not recognised and it is not in a	critical section.
       
  1098 
       
  1099 	@return	TRUE, if the resumption has taken immediate effect;
       
  1100 			FALSE, if the thread is in a critical section.
       
  1101 
       
  1102 	@pre	Kernel must be locked.
       
  1103 	@pre	Call either in a thread or an IDFC context.
       
  1104 
       
  1105 	@post	Kernel is locked.
       
  1106  */
       
  1107 EXPORT_C TBool NThreadBase::ForceResume()
       
  1108 	{
       
  1109 	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NThreadBase::ForceResume");
       
  1110 	__KTRACE_OPT(KNKERN,DEBUGPRINT("%T nFRsm",this));
       
  1111 
       
  1112 	return Resume(TRUE);
       
  1113 	}
       
  1114 
       
  1115 
       
  1116 void NThreadBase::DoReleaseT(TInt aReturnCode, TUint aMode)
       
  1117 	{
       
  1118 	TAny* wobj = 0;
       
  1119 	TUint32 b = iWaitState.ReleaseT(wobj, aReturnCode);	// cancels timer if necessary
       
  1120 
       
  1121 	// if wait pending or no wait, done
       
  1122 	// if wait in effect and nothing else stopping it, make thread ready
       
  1123 	// cancel any outstanding wait on fast semaphore if abnormal release
       
  1124 	// FIXME: Potential problems with abnormal release of generic wait objects
       
  1125 	if (aReturnCode<0 && ((b>>8)&0xff)==NThreadBase::EWaitFastSemaphore && wobj)
       
  1126 		((NFastSemaphore*)wobj)->WaitCancel();
       
  1127 
       
  1128 	if ((b & NThreadWaitState::EWtStWaitActive) && !iPauseCount && !iSuspended)
       
  1129 		ReadyT(aMode);
       
  1130 	}
       
  1131 
       
  1132 /** Releases a waiting nanokernel thread.
       
  1133 
       
  1134 	For use by RTOS personality layers.
       
  1135 	Do not use this function directly on a Symbian OS thread.
       
  1136 	This function should make the thread ready (provided it is not explicitly
       
  1137 	suspended) and cancel any wait timeout. It should also remove it from any
       
  1138 	wait queues.
       
  1139 	If aReturnCode is nonnegative it indicates normal completion of the wait.
       
  1140 	If aReturnCode is negative it indicates early/abnormal completion of the
       
  1141 	wait and so any wait object should be reverted as if the wait had never
       
  1142 	occurred (eg semaphore count should be incremented as this thread has not
       
  1143 	actually acquired the semaphore).
       
  1144 	The thread's unknown state handler will be invoked with function ERelease
       
  1145 	and parameter aReturnCode if the current NState is not recognised.
       
  1146 	
       
  1147 	@param aReturnCode	The reason code for release.
       
  1148 
       
  1149 	@pre	Kernel must be locked.
       
  1150 	@pre	Call either in a thread or an IDFC context.
       
  1151 	
       
  1152 	@post	Kernel is locked.
       
  1153  */
       
  1154 EXPORT_C void NThreadBase::Release(TInt aReturnCode, TUint aMode)
       
  1155 	{
       
  1156 	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NThreadBase::Release");		
       
  1157 	__KTRACE_OPT(KNKERN,DEBUGPRINT("%T nRel %d",this,aReturnCode));
       
  1158 	AcqSLock();
       
  1159 	DoReleaseT(aReturnCode, aMode);
       
  1160 	RelSLock();
       
  1161 	}
       
  1162 
       
  1163 
       
  1164 /** Signals a nanokernel thread's request semaphore.
       
  1165 
       
  1166 	This can also be used on Symbian OS threads.
       
  1167 	
       
  1168 	@pre	Kernel must be locked.
       
  1169 	@pre	Call either in a thread or an IDFC context.
       
  1170 	
       
  1171 	@post	Kernel is locked.
       
  1172  */
       
  1173 EXPORT_C void NThreadBase::RequestSignal()
       
  1174 	{
       
  1175 	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NThreadBase::RequestSignal");		
       
  1176 	iRequestSemaphore.Signal();
       
  1177 	}
       
  1178 
       
  1179 
       
  1180 void exit_sync_fn(TAny* aDfc)
       
  1181 	{
       
  1182 	((TDfc*)aDfc)->Enque();
       
  1183 	}
       
  1184 
       
  1185 void NThreadBase::Exit()
       
  1186 	{
       
  1187 	// The current thread is exiting
       
  1188 	// Enter with kernel locked, don't return
       
  1189 	__NK_ASSERT_DEBUG(this==NCurrentThreadL());
       
  1190 
       
  1191 	OnExit();
       
  1192 
       
  1193 	TInt threadCS = iCsCount;
       
  1194 	TInt kernCS = SubScheduler().iKernLockCount;
       
  1195 	iCsCount = 1;
       
  1196 	AcqSLock();
       
  1197 	iCsFunction = ECSExitInProgress;
       
  1198 	NFastMutex* m = NKern::HeldFastMutex();
       
  1199 	iHeldFastMutex = 0;
       
  1200 	RelSLock();
       
  1201 	NKern::Unlock();
       
  1202 	__KTRACE_OPT(KSCHED,DEBUGPRINT("Exit %T %u",this,NTickCount()));
       
  1203 	__KTRACE_OPT(KNKERN,DEBUGPRINT("%T nExit, CSC %d HeldFM %M KernCS %d",this,threadCS,iHeldFastMutex,kernCS));
       
  1204 	if (kernCS!=1)
       
  1205 		FAULT();
       
  1206 	if (m)
       
  1207 		FAULT();
       
  1208 	if (threadCS)
       
  1209 		FAULT();
       
  1210 	TDfc* pD = NULL;
       
  1211 	NThreadExitHandler xh = iHandlers->iExitHandler;
       
  1212 	if (xh)
       
  1213 		pD = (*xh)((NThread*)this);		// call exit handler
       
  1214 
       
  1215 	// detach any tied events
       
  1216 	DetachTiedEvents();
       
  1217 
       
  1218 	NKern::LeaveGroup();	// detach from group if exit handler didn't do it
       
  1219 
       
  1220 	NKern::Lock();
       
  1221 #ifdef BTRACE_THREAD_IDENTIFICATION
       
  1222 	BTrace4(BTrace::EThreadIdentification,BTrace::ENanoThreadDestroy,this);
       
  1223 #endif
       
  1224 	__NK_ASSERT_ALWAYS(iCsFunction == ECSExitInProgress);
       
  1225 	iWaitState.SetDead(pD);	// doesn't return
       
  1226 	FAULT();
       
  1227 	}
       
  1228 
       
  1229 /** Kills a nanokernel thread.
       
  1230 
       
  1231 	For use by RTOS personality layers.
       
  1232 	Do not use this function directly on a Symbian OS thread.
       
  1233 
       
  1234 	When acting on the calling thread, causes the calling thread to exit.
       
  1235 
       
  1236 	When acting on another thread, causes that thread to exit unless it is
       
  1237 	currently in a critical section. In this case the thread is marked as
       
  1238 	"exit pending" and will exit as soon as it leaves the critical section.
       
  1239 
       
  1240 	In either case the exiting thread first invokes its exit handler (if it
       
  1241 	exists). The handler runs with preemption enabled and with the thread in a
       
  1242 	critical section so that it may not be suspended or killed again. The
       
  1243 	handler may return a pointer to a TDfc, which will be enqueued just before
       
  1244 	the thread finally terminates (after the kernel has been relocked). This DFC
       
  1245 	will therefore execute once the NThread has been safely removed from the
       
  1246 	scheduler and is intended to be used to cleanup the NThread object and any
       
  1247 	associated personality layer resources.
       
  1248 	
       
  1249 	@pre	Kernel must be locked.
       
  1250 	@pre	Call in a thread context.
       
  1251 	@pre	If acting on calling thread, calling thread must not be in a
       
  1252 			critical section; if it is the kernel will fault. Also, the kernel
       
  1253 			must be locked exactly once (iKernCSLocked = 1).
       
  1254 	
       
  1255 	@post	Kernel is locked, if not acting on calling thread.
       
  1256 	@post	Does not return if it acts on the calling thread.
       
  1257  */
       
  1258 EXPORT_C void NThreadBase::Kill()
       
  1259 	{
       
  1260 	// Kill a thread
       
  1261 	// Enter with kernel locked
       
  1262 	// Exit with kernel locked if not current thread, otherwise does not return
       
  1263 	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED_ONCE|MASK_NOT_IDFC|MASK_NOT_ISR,"NThreadBase::Kill");
       
  1264 	__KTRACE_OPT(KNKERN,DEBUGPRINT("%T nKill",this));
       
  1265 	OnKill(); // platform-specific hook
       
  1266 	NThreadBase* pC = NCurrentThreadL();
       
  1267 	if (this==pC)
       
  1268 		{
       
  1269 		if (iCsFunction==ECSExitInProgress)
       
  1270 			FAULT();
       
  1271 		Exit();				// this will not return
       
  1272 		}
       
  1273 	SuspendOrKill(-1);
       
  1274 	}
       
  1275 
       
  1276 
       
  1277 /** Change the CPU affinity of a thread
       
  1278 
       
  1279 	@pre	Kernel must be locked.
       
  1280 	@pre	Call in a thread context.
       
  1281 
       
  1282 	@param	The number of the CPU to which this thread should be locked, or
       
  1283 			KCpuAny if it should be able to run on any CPU.
       
  1284 	@return The previous affinity mask.
       
  1285 */
       
  1286 TUint32 NThreadBase::SetCpuAffinity(TUint32 aAffinity)
       
  1287 	{
       
  1288 	// check aAffinity is valid
       
  1289 	AcqSLock();
       
  1290 	TUint32 old_aff = iParent->iCpuAffinity;
       
  1291 	TBool migrate = FALSE;
       
  1292 	TBool make_ready = FALSE;
       
  1293 	TSubScheduler* ss0 = &SubScheduler();
       
  1294 	TSubScheduler* ss = 0;
       
  1295 	__KTRACE_OPT(KNKERN,DEBUGPRINT("%T nSetCpu %08x->%08x, F:%d R:%02x PR:%02x",this,iParent->iCpuAffinity,aAffinity,iParent->iFreezeCpu,iReady,iParent->iReady));
       
  1296 	if (i_NThread_Initial)
       
  1297 		goto done;	// can't change affinity of initial thread
       
  1298 	iParent->iCpuAffinity = aAffinity;		// set new affinity, might not take effect yet
       
  1299 	if (!iParent->iReady)
       
  1300 		goto done;	// thread/group not currently on a ready list so can just change affinity
       
  1301 	migrate = !CheckCpuAgainstAffinity(iParent->iReady & EReadyCpuMask, aAffinity);	// TRUE if thread's current CPU is incompatible with the new affinity
       
  1302 	if (!migrate)
       
  1303 		goto done;	// don't need to move thread, so just change affinity
       
  1304 	ss = TheSubSchedulers + (iParent->iReady & EReadyCpuMask);
       
  1305 	ss->iReadyListLock.LockOnly();
       
  1306 	if (iParent->iCurrent)
       
  1307 		{
       
  1308 		iParent->iCpuChange = TRUE;			// mark CPU change pending
       
  1309 		if (ss == ss0)
       
  1310 			RescheduleNeeded();
       
  1311 		else
       
  1312 			// kick other CPU now so migration happens before acquisition of fast mutex
       
  1313 			send_resched_ipi_and_wait(iParent->iReady & EReadyCpuMask);
       
  1314 		}
       
  1315 	else
       
  1316 		{
       
  1317 		// Note: Need to know here if any thread in group would return TRUE from CheckFastMutexDefer()
       
  1318 		// This is handled by the scheduler - when a thread belonging to a group is context switched
       
  1319 		// out while holding a fast mutex its iFastMutexDefer is set to 1 and the group's iFreezeCpu
       
  1320 		// is incremented.
       
  1321 		if (iParent->iFreezeCpu || (iParent==this && CheckFastMutexDefer()))
       
  1322 			iParent->iCpuChange = TRUE;	// CPU frozen or fast mutex held so just mark deferred CPU migration
       
  1323 		else
       
  1324 			{
       
  1325 			ss->Remove(iParent);
       
  1326 			iParent->iReady = 0;
       
  1327 			make_ready = TRUE;
       
  1328 			}
       
  1329 		}
       
  1330 	ss->iReadyListLock.UnlockOnly();
       
  1331 	if (make_ready)
       
  1332 		iParent->ReadyT(0);
       
  1333 done:
       
  1334 	RelSLock();
       
  1335 	return old_aff;
       
  1336 	}
       
  1337 
       
  1338 
       
  1339 /******************************************************************************
       
  1340  * Thread wait state
       
  1341  ******************************************************************************/
       
  1342 #ifndef __NTHREAD_WAITSTATE_MACHINE_CODED__
       
  1343 void NThreadWaitState::SetUpWait(TUint aType, TUint aFlags, TAny* aWaitObj)
       
  1344 	{
       
  1345 	SetUpWait(aType, aFlags, aWaitObj, 0);
       
  1346 	}
       
  1347 
       
  1348 void NThreadWaitState::SetUpWait(TUint aType, TUint aFlags, TAny* aWaitObj, TUint32 aTimeout)
       
  1349 	{
       
  1350 	aFlags &= EWtStObstructed;
       
  1351 	aFlags |= EWtStWaitPending;
       
  1352 	aType &= 0xff;
       
  1353 	TUint64 ws64 = (TUint32)aWaitObj;
       
  1354 	ws64 <<= 32;
       
  1355 	ws64 |= ((aType<<8)|aFlags);
       
  1356 	TUint64 oldws64 = __e32_atomic_swp_rlx64(&iWtSt64, ws64);
       
  1357 	if (I64LOW(oldws64)!=0)
       
  1358 		__crash();	// ??we were already waiting for something else??
       
  1359 	iTimer.iTriggerTime = aTimeout;
       
  1360 	}
       
  1361 
       
  1362 void NThreadWaitState::CancelWait()
       
  1363 	{
       
  1364 	TUint64 oldws64 = __e32_atomic_swp_rlx64(&iWtSt64, 0);
       
  1365 	if (oldws64 & (EWtStDead|EWtStWaitActive))
       
  1366 		__crash();
       
  1367 	}
       
  1368 
       
  1369 TInt NThreadWaitState::DoWait()
       
  1370 	{
       
  1371 	TUint64 oldws64 = iWtSt64;
       
  1372 	TUint64 ws64;
       
  1373 	TUint32 timeout = iTimer.iTriggerTime;
       
  1374 	TUint32 set = timeout ? (EWtStWaitActive|EWtStTimeout) : EWtStWaitActive;
       
  1375 	do	{
       
  1376 		TUint32 ws32 = I64LOW(oldws64);
       
  1377 		if (ws32 & EWtStDead)
       
  1378 			return KErrDied;
       
  1379 		if (!(ws32 & EWtStWaitPending))
       
  1380 			return KErrGeneral;
       
  1381 		ws64 = oldws64;
       
  1382 		ws64 &= ~TUint64(EWtStWaitPending);
       
  1383 		ws64 |= TUint64(set);
       
  1384 		} while(!__e32_atomic_cas_rlx64(&iWtSt64, &oldws64, ws64));
       
  1385 	if (timeout)
       
  1386 		{
       
  1387 		if (iTimer.OneShot(timeout, TRUE)!=KErrNone)
       
  1388 			__crash();
       
  1389 		++iTimer.iNTimerSpare1;
       
  1390 		}
       
  1391 	return TUint32(oldws64)>>8;
       
  1392 	}
       
  1393 
       
  1394 TInt NThreadWaitState::UnBlockT(TUint aType, TAny* aWaitObj, TInt aReturnValue)
       
  1395 	{
       
  1396 	TUint64 exp = TUint32(aWaitObj);
       
  1397 	exp <<= 32;
       
  1398 	exp |= (aType<<8);
       
  1399 	TUint64 oldws64 = iWtSt64;
       
  1400 	TUint64 ws64;
       
  1401 	do	{
       
  1402 		if ((oldws64 ^ exp) < TUint64(EWtStDead))
       
  1403 			ws64 = TUint64(TUint32(aReturnValue))<<32;
       
  1404 		else
       
  1405 			ws64 = oldws64;
       
  1406 		} while(!__e32_atomic_cas_rel64(&iWtSt64, &oldws64, ws64));
       
  1407 	if ((oldws64 ^ exp) >= TUint64(EWtStDead))
       
  1408 		return KErrGeneral;	// not unblocked - no matching wait
       
  1409 	if (oldws64 & EWtStTimeout)
       
  1410 		CancelTimerT();
       
  1411 	if (oldws64 & EWtStWaitActive)
       
  1412 		{
       
  1413 		NThreadBase* t = Thread();
       
  1414 		if (!t->iPauseCount && !t->iSuspended)
       
  1415 			t->ReadyT(0);
       
  1416 		}
       
  1417 	return KErrNone;
       
  1418 	}
       
  1419 
       
  1420 TUint32 NThreadWaitState::ReleaseT(TAny*& aWaitObj, TInt aReturnValue)
       
  1421 	{
       
  1422 	TUint64 leave = EWtStDead;
       
  1423 	TUint64 set = TUint64(TUint32(aReturnValue))<<32;
       
  1424 	TUint64 ws64 = __e32_atomic_axo_ord64(&iWtSt64, leave, set);
       
  1425 	aWaitObj = (TAny*)I64HIGH(ws64);
       
  1426 	TUint32 ws32 = I64LOW(ws64);
       
  1427 	if (ws32 & EWtStTimeout)
       
  1428 		CancelTimerT();
       
  1429 	return ws32;
       
  1430 	}
       
  1431 #endif
       
  1432 
       
  1433 void NThreadWaitState::SetDead(TDfc* aKillDfc)
       
  1434 	{
       
  1435 	TDfc syncDfc(&exit_sync_fn, aKillDfc, TheTimerQ.iDfc.iDfcQ, 0);
       
  1436 	NThreadBase* t = Thread();
       
  1437 	t->AcqSLock();
       
  1438 	iWtC.iWtStFlags = NThreadWaitState::EWtStDead;
       
  1439 	iWtC.iWtObjType = NThreadBase::EWaitNone;
       
  1440 	CancelTimerT();
       
  1441 	if (aKillDfc && iTimer.iNTimerSpare1)
       
  1442 		{
       
  1443 		// There is an outstanding timer expiry handler still running
       
  1444 		// so we must synchronise with DfcThread1.
       
  1445 		// Add a priority 0 DFC to DfcThread1 so this thread's exit DFC can
       
  1446 		// only run after the timer expiry handler has completed.
       
  1447 		aKillDfc = &syncDfc;
       
  1448 		}
       
  1449 	iWtC.iKillDfc = aKillDfc;
       
  1450 	RescheduleNeeded();
       
  1451 	t->RelSLock();
       
  1452 	NKern::Unlock();	// this won't return
       
  1453 	}
       
  1454 
       
  1455 void NThreadWaitState::CancelTimerT()
       
  1456 	{
       
  1457 	__KTRACE_OPT(KNKERN,DEBUGPRINT("%T nCancelTimerT ",Thread()));
       
  1458 	if (iTimer.Cancel())
       
  1459 		--iTimer.iNTimerSpare1;
       
  1460 	else
       
  1461 		{
       
  1462 		// Potential race condition - timer must have completed but expiry
       
  1463 		// handler has not yet run. Signal to the handler that it should do
       
  1464 		// nothing by flipping the bottom bit of iTimer.iPtr
       
  1465 		// This condition cannot possibly recur until the expiry handler has
       
  1466 		// run since all expiry handlers run in DfcThread1.
       
  1467 		volatile TLinAddr& x = *(volatile TLinAddr*)&iTimer.iPtr;
       
  1468 		x ^= 1;
       
  1469 		}
       
  1470 	}
       
  1471 
       
  1472 // Timeout handler, called in DfcThread1
       
  1473 // NOTE: aPtr is sampled with the timer queue locked, so if Cancel() on the timer fails
       
  1474 // and iTimer.iPtr is then changed, aPtr here will differ from iTimer.iPtr.
       
  1475 // This fact is used here to detect expiry of cancelled timers.
       
  1476 void NThreadWaitState::TimerExpired(TAny* aPtr)
       
  1477 	{
       
  1478 	TLinAddr cookie = (TLinAddr)aPtr;
       
  1479 	NThreadWaitState* pW = (NThreadWaitState*)(cookie &~ 3);
       
  1480 	NThread* pT = (NThread*)pW->Thread();
       
  1481 	__KTRACE_OPT(KNKERN,DEBUGPRINT("%T nTmExp",pT));
       
  1482 	NThreadTimeoutHandler th = pT->iHandlers->iTimeoutHandler;
       
  1483 	pT->LAcqSLock();
       
  1484 	TUint flags = pW->iWtSt32[0];
       
  1485 	if (!(flags & EWtStWaitActive) || ((flags>>8)&0xff)!=NThreadBase::EWaitBlocked)
       
  1486 		th = 0;
       
  1487 	if (th)
       
  1488 		{
       
  1489 		// Use higher level timeout handler
       
  1490 		pT->RelSLockU();
       
  1491 		(*th)(pT, NThreadBase::ETimeoutPreamble);
       
  1492 		TInt param = NThreadBase::ETimeoutPostamble;
       
  1493 		pT->LAcqSLock();
       
  1494 		TLinAddr current_cookie = *(volatile TLinAddr*)&pW->iTimer.iPtr;
       
  1495 		if ((cookie ^ current_cookie) & 1)
       
  1496 			{
       
  1497 			// The timer was cancelled just after expiring but before this function
       
  1498 			// managed to acquire the thread spin lock, so it's spurious
       
  1499 			param = NThreadBase::ETimeoutSpurious;
       
  1500 			}
       
  1501 		pT->RelSLockU();
       
  1502 		(*th)(pT, param);
       
  1503 		pT->LAcqSLock();
       
  1504 		--pW->iTimer.iNTimerSpare1;	// note timer has expired
       
  1505 		pT->RelSLockU();
       
  1506 		return;
       
  1507 		}
       
  1508 	TLinAddr current_cookie = *(volatile TLinAddr*)&pW->iTimer.iPtr;
       
  1509 	if ((cookie ^ current_cookie) & 1)
       
  1510 		// The timer was cancelled just after expiring but before this function
       
  1511 		// managed to acquire the thread spin lock, so just return without doing anything.
       
  1512 		goto done;
       
  1513 	pT->DoReleaseT(KErrTimedOut,0);
       
  1514 done:
       
  1515 	pT->RelSLockU();
       
  1516 	}
       
  1517 
       
  1518 
       
  1519 
       
  1520 /******************************************************************************
       
  1521  * NKern:: static functions
       
  1522  ******************************************************************************/
       
  1523 
       
  1524 /** Suspends the execution of a thread.
       
  1525 
       
  1526 	This function is intended to be used by the EPOC layer and personality layers.
       
  1527 	Do not use this function directly on a Symbian OS thread - use Kern::ThreadSuspend().
       
  1528 
       
  1529     If the thread is in a critical section or holds a fast mutex, the suspension will
       
  1530     be deferred until the thread leaves the critical section or signals the fast mutex.
       
  1531     Otherwise the thread will be suspended with immediate effect. If the thread it's
       
  1532     running, the execution of the thread will be suspended and a reschedule will occur.
       
  1533 
       
  1534     @param aThread Thread to be suspended.
       
  1535     @param aCount  Number of times to suspend this thread.
       
  1536     
       
  1537     @return TRUE, if the thread had changed the state from non-suspended to suspended;
       
  1538 	        FALSE, otherwise.
       
  1539 	     
       
  1540 	@see Kern::ThreadSuspend()
       
  1541 */
       
  1542 EXPORT_C TBool NKern::ThreadSuspend(NThread* aThread, TInt aCount)
       
  1543 	{	
       
  1544 	NKern::Lock();
       
  1545 	TBool r=aThread->Suspend(aCount);
       
  1546 	NKern::Unlock();
       
  1547 	return r;
       
  1548 	}
       
  1549 
       
  1550 
       
  1551 /** Resumes the execution of a thread.
       
  1552 
       
  1553 	This function is intended to be used by the EPOC layer and personality layers.
       
  1554 	Do not use this function directly on a Symbian OS thread - use Kern::ThreadResume().
       
  1555 
       
  1556     This function resumes the thread once. If the thread was suspended more than once
       
  1557     the thread will remain suspended.
       
  1558     If the thread is in a critical section, this function will decrease the number of
       
  1559     deferred suspensions.
       
  1560 
       
  1561     @param aThread Thread to be resumed.
       
  1562     
       
  1563     @return TRUE, if the thread had changed the state from suspended to non-suspended;
       
  1564             FALSE, otherwise.
       
  1565             
       
  1566 	@see Kern::ThreadResume()
       
  1567 */
       
  1568 EXPORT_C TBool NKern::ThreadResume(NThread* aThread)
       
  1569 	{	
       
  1570 	NKern::Lock();
       
  1571 	TBool r=aThread->Resume();
       
  1572 	NKern::Unlock();
       
  1573 	return r;
       
  1574 	}
       
  1575 
       
  1576 
       
  1577 /** Resumes the execution of a thread and signals a mutex.
       
  1578 
       
  1579 	This function is intended to be used by the EPOC layer and personality layers.
       
  1580 	Do not use this function directly on a Symbian OS thread - use Kern::ThreadResume().
       
  1581 
       
  1582     This function resumes the thread once. If the thread was suspended more than once
       
  1583     the thread will remain suspended.
       
  1584     If the thread is in a critical section, this function will decrease the number of
       
  1585     deferred suspensions.
       
  1586 
       
  1587     @param aThread Thread to be resumed.
       
  1588     @param aMutex Mutex to be signalled. If NULL, the scheduler's mutex will be signalled.
       
  1589 
       
  1590     @return TRUE, if the thread had changed the state from suspended to non-suspended;
       
  1591             FALSE, otherwise.
       
  1592            
       
  1593 	@see Kern::ThreadResume()
       
  1594 */
       
  1595 EXPORT_C TBool NKern::ThreadResume(NThread* aThread, NFastMutex* aMutex)
       
  1596 	{
       
  1597 	if (!aMutex)
       
  1598 		aMutex=&TheScheduler.iLock;
       
  1599 	__KTRACE_OPT(KNKERN,DEBUGPRINT("%T NRsm + FM %M",aThread,aMutex));
       
  1600 	NKern::Lock();
       
  1601 	TBool r=aThread->Resume();
       
  1602 	aMutex->Signal();
       
  1603 	NKern::Unlock();
       
  1604 	return r;
       
  1605 	}
       
  1606 
       
  1607 
       
  1608 /** Forces the execution of a thread to be resumed.
       
  1609 
       
  1610 	This function is intended to be used by the EPOC layer and personality layers.
       
  1611 	Do not use this function directly on a Symbian OS thread - use Kern::ThreadResume().
       
  1612 
       
  1613     This function cancels all suspensions on a thread.
       
  1614 
       
  1615     @param aThread Thread to be resumed.
       
  1616     
       
  1617     @return TRUE, if the thread had changed the state from suspended to non-suspended;
       
  1618             FALSE, otherwise.
       
  1619             
       
  1620 	@see Kern::ThreadResume()
       
  1621 */
       
  1622 EXPORT_C TBool NKern::ThreadForceResume(NThread* aThread)
       
  1623 	{	
       
  1624 	NKern::Lock();
       
  1625 	TBool r=aThread->ForceResume();
       
  1626 	NKern::Unlock();
       
  1627 	return r;
       
  1628 	}
       
  1629 
       
  1630 
       
  1631 /** Forces the execution of a thread to be resumed and signals a mutex.
       
  1632 
       
  1633 	This function is intended to be used by the EPOC layer and personality layers.
       
  1634 	Do not use this function directly on a Symbian OS thread - use Kern::ThreadResume().
       
  1635 
       
  1636     This function cancels all suspensions on a thread.
       
  1637 
       
  1638     @param aThread Thread to be resumed.
       
  1639     @param aMutex Mutex to be signalled. If NULL, the scheduler's mutex will be signalled.
       
  1640     
       
  1641     @return TRUE, if the thread had changed the state from suspended to non-suspended;
       
  1642             FALSE, otherwise.
       
  1643             
       
  1644     @see Kern::ThreadResume()
       
  1645 */
       
  1646 EXPORT_C TBool NKern::ThreadForceResume(NThread* aThread, NFastMutex* aMutex)
       
  1647 	{
       
  1648 	if (!aMutex)
       
  1649 		aMutex=&TheScheduler.iLock;
       
  1650 	__KTRACE_OPT(KNKERN,DEBUGPRINT("%T NFRsm + FM %M",aThread,aMutex));
       
  1651 	NKern::Lock();
       
  1652 	TBool r=aThread->ForceResume();
       
  1653 	aMutex->Signal();
       
  1654 	NKern::Unlock();
       
  1655 	return r;
       
  1656 	}
       
  1657 
       
  1658 
       
  1659 /** Awakens a nanothread.
       
  1660 
       
  1661 	This function is used to implement synchronisation primitives in the EPOC
       
  1662 	kernel (e.g. DMutex and DSemaphore) and in personality layers.  It is not
       
  1663 	intended to be used directly by device drivers.
       
  1664 
       
  1665 	If the nanothread is waiting on a fast semaphore, waiting for a DFC, or is
       
  1666 	blocked in a call to NKern::Block, it is awakened and put back on the ready
       
  1667 	list.  Otherwise, the thread state is unchanged.  In particular, nothing
       
  1668 	happens if the nanothread has been explicitly suspended.
       
  1669 
       
  1670 	@param aThread Thread to release.
       
  1671 	@param aReturnValue Value returned by NKern::Block if the thread was blocked.
       
  1672 
       
  1673 	@see NKern::Block()
       
  1674 
       
  1675 	@pre Interrupts must be enabled.
       
  1676 	@pre Do not call from an ISR
       
  1677  */
       
  1678 EXPORT_C void NKern::ThreadRelease(NThread* aThread, TInt aReturnValue)
       
  1679 	{
       
  1680 	CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR,"NKern::ThreadRelease(NThread*, TInt)");
       
  1681 	NKern::Lock();
       
  1682 	aThread->Release(aReturnValue,0);
       
  1683 	NKern::Unlock();
       
  1684 	}
       
  1685 
       
  1686 
       
  1687 /** Atomically awakens a nanothread and signals a fast mutex.
       
  1688 
       
  1689 	This function is used to implement synchronisation primitives in the EPOC
       
  1690 	kernel (e.g. DMutex and DSemaphore) and in personality layers.  It is not
       
  1691 	intended to be used directly by device drivers.
       
  1692 
       
  1693 	@param aThread Thread to release.
       
  1694 	@param aReturnValue Value returned by NKern::Block if the thread was blocked.
       
  1695 	@param aMutex Fast mutex to signal. If NULL, the system lock is signalled.
       
  1696 
       
  1697 	@see NKern::ThreadRelease(NThread*, TInt)
       
  1698 	@see NKern::Block()
       
  1699 
       
  1700 	@pre	Call in a thread context.
       
  1701 	@pre	Interrupts must be enabled.
       
  1702 	@pre	Kernel must be unlocked.
       
  1703 	@pre	Specified mutex must be held
       
  1704  */
       
  1705 EXPORT_C void NKern::ThreadRelease(NThread* aThread, TInt aReturnValue, NFastMutex* aMutex)
       
  1706 	{
       
  1707 	CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadRelease(NThread*,TInt,NFastMutex*)");
       
  1708 	if (!aMutex)
       
  1709 		aMutex=&TheScheduler.iLock;
       
  1710 	__KTRACE_OPT(KNKERN,DEBUGPRINT("%T NRel ret %d + FM %M",aThread,aReturnValue,aMutex));
       
  1711 	NKern::Lock();
       
  1712 	aThread->Release(aReturnValue,0);
       
  1713 	aMutex->Signal();
       
  1714 	NKern::Unlock();
       
  1715 	}
       
  1716 
       
  1717 
       
  1718 /** Changes the priority of a thread.
       
  1719 
       
  1720 	This function is intended to be used by the EPOC layer and personality layers.
       
  1721 	Do not use this function directly on a Symbian OS thread - use Kern::ThreadSetPriority().
       
  1722 
       
  1723     @param aThread Thread to receive the new priority.
       
  1724     @param aPriority New priority for aThread.
       
  1725     
       
  1726 	@see Kern::SetThreadPriority()
       
  1727 */
       
  1728 EXPORT_C void NKern::ThreadSetPriority(NThread* aThread, TInt aPriority)
       
  1729 	{
       
  1730 	NKern::Lock();
       
  1731 	aThread->SetPriority(aPriority);
       
  1732 	NKern::Unlock();
       
  1733 	}
       
  1734 
       
  1735 
       
  1736 /** Changes the priority of a thread and signals a mutex.
       
  1737 
       
  1738 	This function is intended to be used by the EPOC layer and personality layers.
       
  1739 	Do not use this function directly on a Symbian OS thread - use Kern::ThreadSetPriority().
       
  1740 
       
  1741     @param aThread Thread to receive the new priority.
       
  1742     @param aPriority New priority for aThread.
       
  1743     @param aMutex Mutex to be signalled. If NULL, the scheduler's mutex will be signalled.
       
  1744         
       
  1745 	@see Kern::SetThreadPriority()
       
  1746 */
       
  1747 EXPORT_C void NKern::ThreadSetPriority(NThread* aThread, TInt aPriority, NFastMutex* aMutex)
       
  1748 	{	
       
  1749 	if (!aMutex)
       
  1750 		aMutex=&TheScheduler.iLock;
       
  1751 	__KTRACE_OPT(KNKERN,DEBUGPRINT("%T NSPri->%d + FM %M",aThread,aPriority,aMutex));
       
  1752 	NKern::Lock();
       
  1753 	aThread->SetPriority(aPriority);
       
  1754 	aMutex->Signal();
       
  1755 	NKern::Unlock();
       
  1756 	}
       
  1757 
       
  1758 
       
  1759 /** Atomically signals the request semaphore of a nanothread and a fast mutex.
       
  1760 
       
  1761 	This function is intended to be used by the EPOC layer and personality
       
  1762 	layers.  Device drivers should use Kern::RequestComplete instead.
       
  1763 
       
  1764 	@param aThread Nanothread to signal.  Must be non NULL.
       
  1765 	@param aMutex Fast mutex to signal.  If NULL, the system lock is signaled.
       
  1766 
       
  1767 	@see Kern::RequestComplete()
       
  1768 
       
  1769 	@pre	Call in a thread context.
       
  1770 	@pre	Interrupts must be enabled.
       
  1771 	@pre	Kernel must be unlocked.
       
  1772 	@pre	Specified mutex must be held
       
  1773  */
       
  1774 EXPORT_C void NKern::ThreadRequestSignal(NThread* aThread, NFastMutex* aMutex)
       
  1775 	{
       
  1776 	CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadRequestSignal(NThread*,NFastMutex*)");
       
  1777 	if (!aMutex)
       
  1778 		aMutex = &TheScheduler.iLock;
       
  1779 	NKern::Lock();
       
  1780 	aThread->iRequestSemaphore.Signal();
       
  1781 	aMutex->Signal();
       
  1782 	NKern::Unlock();
       
  1783 	}
       
  1784 
       
  1785 
       
  1786 /**	Kills a nanothread.
       
  1787 
       
  1788 	This function is intended to be used by the EPOC layer and personality layers.
       
  1789 	Do not use this function directly on a Symbian OS thread - use Kern::ThreadKill().
       
  1790 
       
  1791 	This function does not return if the current thread is killed.  
       
  1792 	This function is asynchronous (i.e. the thread to kill may still be alive when the call returns).
       
  1793 
       
  1794 	@param aThread Thread to kill.  Must be non NULL.
       
  1795 
       
  1796 	@pre If acting on calling thread, calling thread must not be in a
       
  1797 			critical section
       
  1798 	@pre Thread must not already be exiting.
       
  1799 
       
  1800 	@see Kern::ThreadKill()
       
  1801  */
       
  1802 EXPORT_C void NKern::ThreadKill(NThread* aThread)
       
  1803 	{
       
  1804 	NKern::Lock();
       
  1805 	aThread->Kill();
       
  1806 	NKern::Unlock();
       
  1807 	}
       
  1808 
       
  1809 
       
  1810 /**	Atomically kills a nanothread and signals a fast mutex.
       
  1811 
       
  1812 	This function is intended to be used by the EPOC layer and personality layers.
       
  1813 	Do not use this function directly on a Symbian OS thread - use Kern::ThreadKill().
       
  1814 
       
  1815 	@param aThread Thread to kill.  Must be non NULL.
       
  1816 	@param aMutex Fast mutex to signal.  If NULL, the system lock is signalled.
       
  1817 
       
  1818 	@pre	If acting on calling thread, calling thread must not be in a
       
  1819 			critical section
       
  1820 	@pre Thread must not already be exiting.
       
  1821 
       
  1822 	@see NKern::ThreadKill(NThread*)
       
  1823  */
       
  1824 EXPORT_C void NKern::ThreadKill(NThread* aThread, NFastMutex* aMutex)
       
  1825 	{
       
  1826 	if (!aMutex)
       
  1827 		aMutex = &TheScheduler.iLock;
       
  1828 	NThreadBase* pC = NKern::LockC();
       
  1829 	if (aThread==pC)
       
  1830 		{
       
  1831 		__NK_ASSERT_DEBUG(pC->iCsCount==0);	// Make sure thread isn't in critical section
       
  1832 		__NK_ASSERT_ALWAYS(aMutex->HeldByCurrentThread());
       
  1833 		pC->AcqSLock();
       
  1834 		aThread->iCsFunction = NThreadBase::ECSExitPending;
       
  1835 		pC->RelSLock();
       
  1836 		aMutex->iHoldingThread = (NThreadBase*)(TLinAddr(aThread) | 1);
       
  1837 		aMutex->Signal();	// this will make us exit
       
  1838 		FAULT();			// should never get here
       
  1839 		}
       
  1840 	else
       
  1841 		{
       
  1842 		aThread->Kill();
       
  1843 		aMutex->Signal();
       
  1844 		}
       
  1845 	NKern::Unlock();
       
  1846 	}
       
  1847 
       
  1848 
       
  1849 /** Enters thread critical section.
       
  1850 
       
  1851 	This function can safely be used in device drivers.
       
  1852 
       
  1853     The current thread will enter its critical section. While in critical section
       
  1854     the thread cannot be suspended or killed. Any suspension or kill will be deferred
       
  1855     until the thread leaves the critical section.
       
  1856     Some API explicitly require threads to be in critical section before calling that
       
  1857     API.
       
  1858     Only User threads need to call this function as the concept of thread critical
       
  1859     section applies to User threads only.
       
  1860 
       
  1861 	@pre	Call in a thread context.
       
  1862 	@pre	Kernel must be unlocked.
       
  1863 */
       
  1864 EXPORT_C void NKern::ThreadEnterCS()
       
  1865 	{
       
  1866 	CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadEnterCS");
       
  1867 	NThreadBase* pC = NKern::CurrentThread();
       
  1868 	__KTRACE_OPT(KNKERN,DEBUGPRINT("%T NEntCS",pC));
       
  1869 	__NK_ASSERT_DEBUG(pC->iCsCount>=0);
       
  1870 	++pC->iCsCount;
       
  1871 	}
       
  1872 
       
  1873 NThread* NKern::_ThreadEnterCS()
       
  1874 	{
       
  1875 	NThreadBase* pC = NKern::CurrentThread();
       
  1876 	__NK_ASSERT_DEBUG(pC->iCsCount>=0);
       
  1877 	++pC->iCsCount;
       
  1878 	return (NThread*)pC;
       
  1879 	}
       
  1880 
       
  1881 
       
  1882 /** Leaves thread critical section.
       
  1883 
       
  1884 	This function can safely be used in device drivers.
       
  1885 
       
  1886     The current thread will leave its critical section. If the thread was suspended/killed
       
  1887     while in critical section, the thread will be suspended/killed after leaving the
       
  1888     critical section by calling this function.
       
  1889     Only User threads need to call this function as the concept of thread critical
       
  1890     section applies to User threads only.
       
  1891 
       
  1892 	@pre	Call in a thread context.
       
  1893 	@pre	Kernel must be unlocked.
       
  1894 */
       
  1895 EXPORT_C void NKern::ThreadLeaveCS()
       
  1896 	{
       
  1897 	CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadLeaveCS");
       
  1898 	NThreadBase* pC = NKern::LockC();
       
  1899 	__KTRACE_OPT(KNKERN,DEBUGPRINT("%T NLvCS",pC));
       
  1900 	pC->AcqSLock();
       
  1901 	__NK_ASSERT_DEBUG(pC->iCsCount>0);
       
  1902 	if (--pC->iCsCount==0 && pC->iCsFunction!=0)
       
  1903 		{
       
  1904 		NFastMutex* m = HeldFastMutex();
       
  1905 		if (m)
       
  1906 			m->iHoldingThread = (NThreadBase*)(TLinAddr(pC) | 1);
       
  1907 		else
       
  1908 			pC->DoCsFunctionT();
       
  1909 		}
       
  1910 	pC->RelSLock();
       
  1911 	NKern::Unlock();
       
  1912 	}
       
  1913 
       
  1914 void NKern::_ThreadLeaveCS()
       
  1915 	{
       
  1916 	NThreadBase* pC = NKern::LockC();
       
  1917 	pC->AcqSLock();
       
  1918 	__NK_ASSERT_DEBUG(pC->iCsCount>0);
       
  1919 	if (--pC->iCsCount==0 && pC->iCsFunction!=0)
       
  1920 		{
       
  1921 		NFastMutex* m = HeldFastMutex();
       
  1922 		if (m)
       
  1923 			m->iHoldingThread = (NThreadBase*)(TLinAddr(pC) | 1);
       
  1924 		else
       
  1925 			pC->DoCsFunctionT();
       
  1926 		}
       
  1927 	pC->RelSLock();
       
  1928 	NKern::Unlock();
       
  1929 	}
       
  1930 
       
  1931 /** Freeze the CPU of the current thread
       
  1932 
       
  1933 	After this the current thread will not migrate to another processor
       
  1934 
       
  1935 	@return	A cookie to be passed to NKern::EndFreezeCpu() to allow nesting
       
  1936 */
       
  1937 EXPORT_C TInt NKern::FreezeCpu()
       
  1938 	{
       
  1939 	CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::FreezeCpu");
       
  1940 	NKern::Lock();
       
  1941 	TSubScheduler& ss = SubScheduler();
       
  1942 	NThreadBase* pC = ss.iCurrentThread;
       
  1943 	__KTRACE_OPT(KNKERN,DEBUGPRINT("%T NFrzCpu",pC));
       
  1944 	if (pC->iFreezeCpu)
       
  1945 		{
       
  1946 		NKern::Unlock();
       
  1947 		return 1;
       
  1948 		}
       
  1949 	pC->iFreezeCpu = 1;
       
  1950 	if (pC->iParent != pC)
       
  1951 		{
       
  1952 		pC->AcqSLock();
       
  1953 		++pC->iParent->iFreezeCpu;
       
  1954 		pC->RelSLock();
       
  1955 		}
       
  1956 	NKern::Unlock();
       
  1957 	return 0;
       
  1958 	}
       
  1959 
       
  1960 
       
  1961 /** Unfreeze the current thread's CPU
       
  1962 
       
  1963 	After this the current thread will again be eligible to migrate to another processor
       
  1964 
       
  1965 	@param	aCookie the value returned by NKern::FreezeCpu()
       
  1966 */
       
  1967 EXPORT_C void NKern::EndFreezeCpu(TInt aCookie)
       
  1968 	{
       
  1969 	CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::EndFreezeCpu");
       
  1970 	__KTRACE_OPT(KNKERN,DEBUGPRINT("%T NEndFrz %d",NKern::CurrentThread(),aCookie));
       
  1971 	if (aCookie)
       
  1972 		return;
       
  1973 	NKern::Lock();
       
  1974 	TSubScheduler& ss = SubScheduler();
       
  1975 	NThreadBase* pC = ss.iCurrentThread;
       
  1976 	if (pC->iFreezeCpu)
       
  1977 		{
       
  1978 		pC->iFreezeCpu = 0;
       
  1979 		mb();
       
  1980 		if (pC->iParent != pC)
       
  1981 			{
       
  1982 			pC->AcqSLock();
       
  1983 			if (!--pC->iParent->iFreezeCpu && pC->iParent->iCpuChange)
       
  1984 				RescheduleNeeded();
       
  1985 			pC->RelSLock();
       
  1986 			}
       
  1987 		else if (pC->iCpuChange)		// deferred CPU change?
       
  1988 			RescheduleNeeded();
       
  1989 		}
       
  1990 	NKern::Unlock();
       
  1991 	}
       
  1992 
       
  1993 
       
  1994 /** Change the CPU affinity of a thread
       
  1995 
       
  1996 	@pre	Call in a thread context.
       
  1997 
       
  1998 	@param	The new CPU affinity mask
       
  1999 	@return The old affinity mask
       
  2000  */
       
  2001 EXPORT_C TUint32 NKern::ThreadSetCpuAffinity(NThread* aThread, TUint32 aAffinity)
       
  2002 	{
       
  2003 	NKern::Lock();
       
  2004 	TUint32 r = aThread->SetCpuAffinity(aAffinity);
       
  2005 	NKern::Unlock();
       
  2006 	return r;
       
  2007 	}
       
  2008 
       
  2009 
       
  2010 /** Modify a thread's timeslice
       
  2011 
       
  2012 	@pre	Call in a thread context.
       
  2013 
       
  2014 	@param	aTimeslice	The new timeslice value
       
  2015  */
       
  2016 EXPORT_C void NKern::ThreadSetTimeslice(NThread* aThread, TInt aTimeslice)
       
  2017 	{
       
  2018 	NKern::Lock();
       
  2019 	aThread->AcqSLock();
       
  2020 	if (aThread->iTimeslice == aThread->iTime || aTimeslice<0)
       
  2021 		aThread->iTime = aTimeslice;
       
  2022 	aThread->iTimeslice = aTimeslice;
       
  2023 	aThread->RelSLock();
       
  2024 	NKern::Unlock();
       
  2025 	}
       
  2026 
       
  2027 
       
  2028 /** Blocks current nanothread.
       
  2029 
       
  2030 	This function is used to implement synchronisation primitives in the EPOC
       
  2031 	layer and in personality layers.  It is not intended to be used directly by
       
  2032 	device drivers.  
       
  2033 
       
  2034 	@param aTimeout If greater than 0, the nanothread will be blocked for at most
       
  2035 					aTimeout microseconds.
       
  2036 	@param aMode	Bitmask whose possible values are documented in TBlockMode.  
       
  2037 	@param aMutex	Fast mutex to operate on.  If NULL, the system lock is used.
       
  2038 
       
  2039 	@see NKern::ThreadRelease()
       
  2040 	@see TBlockMode
       
  2041 
       
  2042 	@pre	Call in a thread context.
       
  2043 	@pre	Interrupts must be enabled.
       
  2044 	@pre	Kernel must be unlocked.
       
  2045 	@pre	Specified mutex must be held
       
  2046  */
       
  2047 EXPORT_C TInt NKern::Block(TUint32 aTimeout, TUint aMode, NFastMutex* aMutex)
       
  2048 	{
       
  2049 	CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::Block(TUint32,TUint,NFastMutex*)");
       
  2050 	if (!aMutex)
       
  2051 		aMutex = &TheScheduler.iLock;
       
  2052 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::Block time %d mode %d FM %M",aTimeout,aMode,aMutex));
       
  2053 	if (aMode & EEnterCS)
       
  2054 		NKern::_ThreadEnterCS();	// NOTE: MUST DO THIS BEFORE CALLING NKern::Lock()
       
  2055 	NThreadBase* pC = NKern::LockC();
       
  2056 	TUint flags = (aMode & NKern::EObstruct) ? NThreadWaitState::EWtStObstructed : 0;
       
  2057 	pC->iWaitState.SetUpWait(NThreadBase::EWaitBlocked, flags, 0, aTimeout);
       
  2058 	if (aMode & ERelease)
       
  2059 		aMutex->Signal();
       
  2060 	RescheduleNeeded();
       
  2061 	NKern::Unlock();	// thread blocks here
       
  2062 	TInt r = pC->iWaitState.iWtC.iRetVal;	// sample here since it will be overwritten if we block on the fast mutex
       
  2063 	if (aMode & EClaim)
       
  2064 		FMWait(aMutex);
       
  2065 	return r;
       
  2066 	}
       
  2067 
       
  2068 
       
  2069 /**
       
  2070 @pre	Call in a thread context.
       
  2071 @pre	Interrupts must be enabled.
       
  2072 @pre	Kernel must be unlocked.
       
  2073 @pre	No fast mutex can be held
       
  2074 */
       
  2075 /** @see NKern::Block(TUint32, TUint, NFastMutex*) */
       
  2076 EXPORT_C TInt NKern::Block(TUint32 aTimeout, TUint aMode)
       
  2077 	{
       
  2078 	CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"NKern::Block(TUint32,TUint)");
       
  2079 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::Block time %d mode %d",aTimeout,aMode));
       
  2080 	if (aMode & EEnterCS)
       
  2081 		NKern::_ThreadEnterCS();	// NOTE: MUST DO THIS BEFORE CALLING NKern::Lock()
       
  2082 	NThreadBase* pC = NKern::LockC();
       
  2083 	TUint flags = (aMode & NKern::EObstruct) ? NThreadWaitState::EWtStObstructed : 0;
       
  2084 	pC->iWaitState.SetUpWait(NThreadBase::EWaitBlocked, flags, 0, aTimeout);
       
  2085 	RescheduleNeeded();
       
  2086 	NKern::Unlock();	// thread blocks here
       
  2087 	return pC->iWaitState.iWtC.iRetVal;
       
  2088 	}
       
  2089 
       
  2090 
       
  2091 
       
  2092 
       
  2093 /**
       
  2094 Places the current nanothread into a wait state on an externally
       
  2095 defined wait object.
       
  2096 	
       
  2097 For use by RTOS personality layers.
       
  2098 Do not use this function directly on a Symbian OS thread.
       
  2099 
       
  2100 Since the kernel is locked on entry, any reschedule will be deferred until
       
  2101 it is unlocked. The thread should be added to any necessary wait queue after
       
  2102 a call to this function, since this function removes it from the ready list.
       
  2103 The thread's wait timer is started if aTimeout is nonzero.
       
  2104 The thread's NState and wait object are updated.
       
  2105 
       
  2106 Call NThreadBase::Release() when the wait condition is resolved.
       
  2107 
       
  2108 @param aTimeout The maximum time for which the thread should block, in nanokernel timer ticks.
       
  2109                 A zero value means wait forever.
       
  2110                 If the thread is still blocked when the timeout expires,
       
  2111                 then the timeout state handler will be called.
       
  2112 @param aState   The nanokernel thread state (N-State) value to be set.
       
  2113                 This state corresponds to the externally defined wait object.
       
  2114                 This value will be written into the member NThreadBase::iNState.
       
  2115 @param aWaitObj A pointer to an externally defined wait object.
       
  2116                 This value will be written into the member NThreadBase::iWaitObj.
       
  2117 
       
  2118 @pre	Kernel must be locked.
       
  2119 @pre	Call in a thread context.
       
  2120 
       
  2121 @post	Kernel is locked.
       
  2122 
       
  2123 @see	NThreadBase::Release()
       
  2124 */
       
  2125 EXPORT_C void NKern::NanoBlock(TUint32 aTimeout, TUint aState, TAny* aWaitObj)
       
  2126 	{
       
  2127 	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::NanoBlock");		
       
  2128 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NanoBlock time %d state %d obj %08x", aTimeout, aState, aWaitObj));
       
  2129 	NThreadBase* pC = NCurrentThreadL();
       
  2130 	pC->iWaitState.SetUpWait(aState, aState>>8, aWaitObj, aTimeout);
       
  2131 	RescheduleNeeded();
       
  2132 	}
       
  2133 
       
  2134 
       
  2135 
       
  2136 
       
  2137 EXPORT_C void NKern::Sleep(TUint32 aTime)
       
  2138 /**
       
  2139 Puts the current nanothread to sleep for the specified duration.
       
  2140 
       
  2141 It can be called from Symbian OS threads.
       
  2142 
       
  2143 @param	aTime sleep time in nanokernel timer ticks.
       
  2144 
       
  2145 @pre    No fast mutex can be held.
       
  2146 @pre    Kernel must be unlocked.
       
  2147 @pre	Call in a thread context.
       
  2148 @pre	Interrupts must be enabled.
       
  2149 */
       
  2150 	{
       
  2151 	CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"NKern::Sleep");
       
  2152 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NSlp %d",aTime));
       
  2153 	NThreadBase* pC = NKern::LockC();
       
  2154 	pC->iWaitState.SetUpWait(NThreadBase::EWaitSleep, 0, 0, aTime);
       
  2155 	RescheduleNeeded();
       
  2156 	NKern::Unlock();
       
  2157 	}
       
  2158 
       
  2159 
       
  2160 /**	Terminates the current nanothread.
       
  2161 
       
  2162 	Calls to this function never return.
       
  2163 
       
  2164 	For use by RTOS personality layers.
       
  2165 	Do not use this function directly on a Symbian OS thread.
       
  2166 
       
  2167 	@pre	Call in a thread context.
       
  2168 	@pre	Interrupts must be enabled.
       
  2169 	@pre	Kernel must be unlocked.	
       
  2170  */
       
  2171 EXPORT_C void NKern::Exit()
       
  2172 	{
       
  2173 	CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::Exit");
       
  2174 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NExit"));
       
  2175 	NKern::LockC()->Exit();		// this won't return
       
  2176 	FAULT();
       
  2177 	}
       
  2178 
       
  2179 
       
  2180 /**	Terminates the current nanothread at the next possible point.
       
  2181 
       
  2182 	If the calling thread is not currently in a critical section and does not
       
  2183 	currently hold a fast mutex, it exits immediately and this function does
       
  2184 	not return. On the other hand if the thread is in a critical section or
       
  2185 	holds a fast mutex the thread continues executing but it will exit as soon
       
  2186 	as it leaves the critical section and/or releases the fast mutex.
       
  2187 
       
  2188 	@pre	Call in a thread context.
       
  2189 	@pre	Interrupts must be enabled.
       
  2190 	@pre	Kernel must be unlocked.	
       
  2191  */
       
  2192 EXPORT_C void NKern::DeferredExit()
       
  2193 	{
       
  2194 	CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::DeferredExit");
       
  2195 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NDefExit"));
       
  2196 	NFastMutex* m = HeldFastMutex();
       
  2197 	NThreadBase* pC = NKern::LockC();
       
  2198 	if (!m && !pC->iCsCount)
       
  2199 		pC->Exit();			// this won't return
       
  2200 	pC->AcqSLock();
       
  2201 	if (pC->iCsFunction >= 0)	// don't touch it if we are already exiting
       
  2202 		pC->iCsFunction = NThreadBase::ECSExitPending;
       
  2203 	pC->RelSLock();
       
  2204 	if (m && !pC->iCsCount)
       
  2205 		m->iHoldingThread = (NThreadBase*)(TLinAddr(pC) | 1);
       
  2206 	NKern::Unlock();
       
  2207 	}
       
  2208 
       
  2209 
       
  2210 /** Prematurely terminates the current thread's timeslice
       
  2211 
       
  2212 	@pre	Kernel must be unlocked.
       
  2213 	@pre	Call in a thread context.
       
  2214 	
       
  2215 	@post	Kernel is unlocked.
       
  2216  */
       
  2217 EXPORT_C void NKern::YieldTimeslice()
       
  2218 	{
       
  2219 	CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::YieldTimeslice");
       
  2220 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::YieldTimeslice"));
       
  2221 	NThreadBase* t = NKern::LockC();
       
  2222 	t->iTime = 0;
       
  2223 	mb();
       
  2224 	if (t->iNext!=t || t->iParent->iNext!=t->iParent)
       
  2225 		RescheduleNeeded();
       
  2226 	NKern::Unlock();
       
  2227 	}
       
  2228 
       
  2229 
       
  2230 /** Returns the number of CPUs available to Symbian OS
       
  2231 
       
  2232 	@return the number of CPUs
       
  2233 	
       
  2234 	@pre Call in any context.
       
  2235 */
       
  2236 EXPORT_C TInt NKern::NumberOfCpus()
       
  2237 	{
       
  2238 	return TheScheduler.iNumCpus;
       
  2239 	}
       
  2240 
       
  2241 
       
  2242 /** Rotates the specified CPU ready list for threads at the specified priority.
       
  2243 	
       
  2244 	For use by RTOS personality layers to allow external control of round-robin
       
  2245 	scheduling. Not intended for direct use by device drivers.
       
  2246 
       
  2247 	@param	aPriority = priority at which threads should be rotated.
       
  2248 						-1 means use calling thread's priority.
       
  2249 	@param	aCpu		CPU to act on
       
  2250 	
       
  2251 	@pre	Kernel must be unlocked.
       
  2252 	@pre	Call in a thread context.
       
  2253 	
       
  2254 	@post	Kernel is unlocked.
       
  2255  */
       
  2256 
       
  2257 EXPORT_C void NKern::RotateReadyList(TInt aPriority, TInt aCpu)
       
  2258 	{
       
  2259 //	CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::RotateReadyList");
       
  2260 //	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::RotateReadyList %d",aPriority));
       
  2261 //	if (aPriority<0 || aPriority>=KNumPriorities)
       
  2262 //		aPriority=NKern::CurrentThread()->iPriority;
       
  2263 //	NKern::Lock();
       
  2264 //	TheScheduler.RotateReadyList(aPriority);
       
  2265 //	NKern::Unlock();
       
  2266 	}
       
  2267 
       
  2268 
       
  2269 /** Rotates the ready list for threads at the specified priority.
       
  2270 	
       
  2271 	For use by RTOS personality layers to allow external control of round-robin
       
  2272 	scheduling. Not intended for direct use by device drivers.
       
  2273 
       
  2274 	@param	aPriority = priority at which threads should be rotated.
       
  2275 						-1 means use calling thread's priority.
       
  2276 	
       
  2277 	@pre	Kernel must be unlocked.
       
  2278 	@pre	Call in a thread context.
       
  2279 	
       
  2280 	@post	Kernel is unlocked.
       
  2281  */
       
  2282 EXPORT_C void NKern::RotateReadyList(TInt aPriority)
       
  2283 	{
       
  2284 	RotateReadyList(aPriority, -1);
       
  2285 	}
       
  2286 
       
  2287 
       
  2288 /** Returns a pointer to the thread group to which the current thread belongs,
       
  2289 	if any.	Returns NULL if current thread is a standalone thread.
       
  2290 	
       
  2291 	@pre	Call in a thread context.
       
  2292  */
       
  2293 EXPORT_C NThreadGroup* NKern::CurrentGroup()
       
  2294 	{
       
  2295 	NThreadBase* pC = NKern::CurrentThread();
       
  2296 	return (pC->iParent == pC) ? (NThreadGroup*)0 : (NThreadGroup*)pC->iParent;
       
  2297 	}
       
  2298 
       
  2299 
       
  2300 /** Detaches the current thread from the group to which it currently belongs,
       
  2301 	if any.	Returns a pointer to the group (NULL if none).
       
  2302 		
       
  2303 	@pre	Call in a thread context.
       
  2304 	@pre	Interrupts enabled
       
  2305 	@pre	Kernel unlocked
       
  2306  */
       
  2307 EXPORT_C NThreadGroup* NKern::LeaveGroup()
       
  2308 	{
       
  2309 	CHECK_PRECONDITIONS(MASK_NOT_ISR|MASK_NOT_IDFC|MASK_INTERRUPTS_ENABLED|MASK_KERNEL_UNLOCKED, "NKern::LeaveGroup");
       
  2310 	NKern::Lock();
       
  2311 	TSubScheduler& ss = SubScheduler();
       
  2312 	NThreadBase* pC = ss.iCurrentThread;
       
  2313 	pC->iNewParent = 0;	// cancel any pending Join
       
  2314 	NThreadGroup* g = (pC->iParent == pC) ? (NThreadGroup*)0 : (NThreadGroup*)pC->iParent;
       
  2315 	TBool make_group_ready = FALSE;
       
  2316 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NLeaveGroup %T (%G)",pC,g));
       
  2317 	if (g)
       
  2318 		{
       
  2319 		while (!pC->TiedEventLeaveInterlock())
       
  2320 			{
       
  2321 			TInt irq = NKern::DisableAllInterrupts();
       
  2322 			ss.QueueDfcs();
       
  2323 			NKern::RestoreInterrupts(irq);
       
  2324 			}
       
  2325 		pC->AcqSLock();
       
  2326 		ss.iReadyListLock.LockOnly();
       
  2327 		pC->UnReadyT();
       
  2328 		pC->iParent = pC;
       
  2329 		g->iCurrent = 0;	// since current thread is no longer in g
       
  2330 		ss.AddHead(pC);
       
  2331 		pC->iReady = TUint8(ss.iCpuNum | NSchedulable::EReadyOffset);
       
  2332 		pC->iCpuAffinity = g->iCpuAffinity;	// keep same CPU affinity
       
  2333 		// if we're frozen, the group's freeze count was incremented
       
  2334 		if (pC->iFreezeCpu)
       
  2335 			--g->iFreezeCpu;
       
  2336 		// if we've been marked as deferring, the group's freeze count was incremented
       
  2337 		if (pC->iFastMutexDefer == 1)
       
  2338 			{
       
  2339 			--g->iFreezeCpu;
       
  2340 			pC->iFastMutexDefer = 0;
       
  2341 			}
       
  2342 		// if the group was waiting to change cpu then this thread needs to change still
       
  2343 		if (g->iCpuChange)
       
  2344 			{
       
  2345 			pC->iCpuChange = g->iCpuChange;
       
  2346 			RescheduleNeeded();
       
  2347 			if (!g->iFreezeCpu)
       
  2348 				{
       
  2349 				// we were the last thread in the group stopping it from moving
       
  2350 				// but there may be no other threads left after UnReadyT'ing this one
       
  2351 				g->iCpuChange = FALSE;
       
  2352 				if (g->iReady)
       
  2353 					{
       
  2354 					ss.Remove(g);
       
  2355 					g->iReady = 0;
       
  2356 					make_group_ready = TRUE;
       
  2357 					}
       
  2358 				}
       
  2359 			}
       
  2360 		ss.iReadyListLock.UnlockOnly();
       
  2361 		--g->iThreadCount;
       
  2362 		if (make_group_ready)
       
  2363 			g->ReadyT(0);
       
  2364 		g->RelSLock();		// since pC is no longer attached to g
       
  2365 		pC->RelSLock();
       
  2366 		}
       
  2367 	NKern::Unlock();
       
  2368 	return g;
       
  2369 	}
       
  2370 
       
  2371 
       
  2372 /** Adds the current thread to the specified group.
       
  2373 	
       
  2374 	@param	aGroup = pointer to group to join
       
  2375 	
       
  2376 	@pre	Call in a thread context, not in one of the idle threads.
       
  2377 	@pre	Interrupts enabled
       
  2378 	@pre	Kernel unlocked
       
  2379 	@pre	Thread does not hold a fast mutex
       
  2380 	@pre	Thread does not have a freeze on CPU migration
       
  2381 	@pre	Current thread is not already in a group
       
  2382  */
       
  2383 EXPORT_C void NKern::JoinGroup(NThreadGroup* aGroup)
       
  2384 	{
       
  2385 	CHECK_PRECONDITIONS(MASK_THREAD_STANDARD, "NKern::JoinGroup");
       
  2386 	NKern::Lock();
       
  2387 	TSubScheduler& ss = SubScheduler();
       
  2388 	NThreadBase* pC = ss.iCurrentThread;
       
  2389 	__ASSERT_WITH_MESSAGE_DEBUG(pC->iParent==pC, "Thread not already in a group", "NKern::JoinGroup");
       
  2390 	__ASSERT_WITH_MESSAGE_DEBUG(!pC->iFreezeCpu, "No interdiction on CPU migration", "NKern::JoinGroup");
       
  2391 	__ASSERT_WITH_MESSAGE_DEBUG(!pC->i_NThread_Initial, "Not idle thread", "NKern::JoinGroup");
       
  2392 	__NK_ASSERT_ALWAYS(pC->iParent==pC && !pC->iFreezeCpu);
       
  2393 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NJoinGroup %T->%G",pC,aGroup));
       
  2394 	pC->AcqSLock();
       
  2395 	aGroup->AcqSLock();
       
  2396 	TBool migrate = !CheckCpuAgainstAffinity(ss.iCpuNum, aGroup->iCpuAffinity);	// TRUE if thread's current CPU is incompatible with the group's affinity
       
  2397 	if (!aGroup->iReady || aGroup->iReady==pC->iReady)
       
  2398 		{
       
  2399 		// group not ready or ready on this CPU
       
  2400 		if (!migrate)
       
  2401 			{
       
  2402 			ss.iReadyListLock.LockOnly();
       
  2403 			pC->UnReadyT();
       
  2404 			pC->iParent = aGroup;
       
  2405 			aGroup->iNThreadList.AddHead(pC);
       
  2406 			if (!aGroup->iReady)
       
  2407 				{
       
  2408 				aGroup->iPriority = pC->iPriority;
       
  2409 				ss.AddHead(aGroup);
       
  2410 				aGroup->iReady = TUint8(ss.iCpuNum | NSchedulable::EReadyOffset);
       
  2411 				}
       
  2412 			else if (pC->iPriority > aGroup->iPriority)
       
  2413 				{
       
  2414 				ss.ChangePriority(aGroup, pC->iPriority);
       
  2415 				}
       
  2416 			pC->iReady = NSchedulable::EReadyGroup;
       
  2417 			aGroup->iCurrent = aGroup->iReady;
       
  2418 			ss.iReadyListLock.UnlockOnly();
       
  2419 			++aGroup->iThreadCount;
       
  2420 			goto done;
       
  2421 			}
       
  2422 		}
       
  2423 	// this thread needs to migrate to another CPU
       
  2424 	pC->iNewParent = aGroup;
       
  2425 	RescheduleNeeded();
       
  2426 
       
  2427 	// the following reschedule definitely joins the group even if the
       
  2428 	// thread's CPU affinity is incompatible with that of the group
       
  2429 	// (the thread's CPU affinity is subsequently determined by that of
       
  2430 	// the group)
       
  2431 
       
  2432 done:
       
  2433 	if (pC->iParent != aGroup)
       
  2434 		aGroup->RelSLock();
       
  2435 	pC->RelSLock();
       
  2436 	while (!pC->TiedEventJoinInterlock())
       
  2437 		{
       
  2438 		TInt irq = NKern::DisableAllInterrupts();
       
  2439 		ss.QueueDfcs();
       
  2440 		NKern::RestoreInterrupts(irq);
       
  2441 		}
       
  2442 	NKern::Unlock();
       
  2443 	}
       
  2444 
       
  2445 
       
  2446 /******************************************************************************
       
  2447  * Priority Lists
       
  2448  ******************************************************************************/
       
  2449 
       
  2450 #ifndef __PRI_LIST_MACHINE_CODED__
       
  2451 /** Returns the priority of the highest priority item present on a priority list.
       
  2452 
       
  2453 	@return	The highest priority present or -1 if the list is empty.
       
  2454  */
       
  2455 EXPORT_C TInt TPriListBase::HighestPriority()
       
  2456 	{
       
  2457 //	TUint64 present = MAKE_TUINT64(iPresent[1], iPresent[0]);
       
  2458 //	return __e32_find_ms1_64(present);
       
  2459 	return __e32_find_ms1_64(iPresent64);
       
  2460 	}
       
  2461 
       
  2462 
       
  2463 /** Finds the highest priority item present on a priority list.
       
  2464 
       
  2465 	If multiple items at the same priority are present, return the first to be
       
  2466 	added in chronological order.
       
  2467 
       
  2468 	@return	A pointer to the item or NULL if the list is empty.
       
  2469  */
       
  2470 EXPORT_C TPriListLink* TPriListBase::First()
       
  2471 	{
       
  2472 	TInt p = HighestPriority();
       
  2473 	return p >=0 ? static_cast<TPriListLink*>(iQueue[p]) : NULL;
       
  2474 	}
       
  2475 
       
  2476 
       
  2477 /** Adds an item to a priority list at the tail of the queue for its priority.
       
  2478 
       
  2479 	@param aLink A pointer to the item - must not be NULL.
       
  2480  */
       
  2481 EXPORT_C void TPriListBase::Add(TPriListLink* aLink)
       
  2482 	{
       
  2483 	TInt p = aLink->iPriority;
       
  2484 	SDblQueLink* head = iQueue[p];
       
  2485 	if (head)
       
  2486 		{
       
  2487 		// already some at this priority
       
  2488 		aLink->InsertBefore(head);
       
  2489 		}
       
  2490 	else
       
  2491 		{
       
  2492 		// 'create' new list
       
  2493 		iQueue[p] = aLink;
       
  2494 		aLink->iNext = aLink->iPrev = aLink;
       
  2495 		iPresent[p>>5] |= 1u << (p & 0x1f);
       
  2496 		}
       
  2497 	}
       
  2498 
       
  2499 
       
  2500 /** Removes an item from a priority list.
       
  2501 
       
  2502 	@param aLink A pointer to the item - must not be NULL.
       
  2503  */
       
  2504 EXPORT_C void TPriListBase::Remove(TPriListLink* aLink)
       
  2505 	{
       
  2506 	if (!aLink->Alone())
       
  2507 		{
       
  2508 		// not the last on this list
       
  2509 		TInt p = aLink->iPriority;
       
  2510 		if (iQueue[p] == aLink)
       
  2511 			iQueue[p] = aLink->iNext;
       
  2512 		aLink->Deque();
       
  2513 		}
       
  2514 	else
       
  2515 		{
       
  2516 		TInt p = aLink->iPriority;
       
  2517 		iQueue[p] = 0;
       
  2518 		iPresent[p>>5] &= ~(1u << (p & 0x1f));
       
  2519 		KILL_LINK(aLink);
       
  2520 		}
       
  2521 	}
       
  2522 
       
  2523 
       
  2524 /** Changes the priority of an item on a priority list.
       
  2525 
       
  2526 	@param	aLink A pointer to the item to act on - must not be NULL.
       
  2527 	@param	aNewPriority A new priority for the item.
       
  2528  */
       
  2529 EXPORT_C void TPriListBase::ChangePriority(TPriListLink* aLink, TInt aNewPriority)
       
  2530 	{
       
  2531 	if (aLink->iPriority!=aNewPriority)
       
  2532 		{
       
  2533 		Remove(aLink);
       
  2534 		aLink->iPriority=TUint8(aNewPriority);
       
  2535 		Add(aLink);
       
  2536 		}
       
  2537 	}
       
  2538 #endif
       
  2539 
       
  2540 /** Adds an item to a priority list at the head of the queue for its priority.
       
  2541 
       
  2542 	@param aLink A pointer to the item - must not be NULL.
       
  2543  */
       
  2544 EXPORT_C void TPriListBase::AddHead(TPriListLink* aLink)
       
  2545 	{
       
  2546 	TInt p = aLink->iPriority;
       
  2547 	SDblQueLink* head = iQueue[p];
       
  2548 	iQueue[p] = aLink;
       
  2549 	if (head)
       
  2550 		{
       
  2551 		// already some at this priority
       
  2552 		aLink->InsertBefore(head);
       
  2553 		}
       
  2554 	else
       
  2555 		{
       
  2556 		// 'create' new list
       
  2557 		aLink->iNext = aLink->iPrev = aLink;
       
  2558 		iPresent[p>>5] |= 1u << (p & 0x1f);
       
  2559 		}
       
  2560 	}
       
  2561 
       
  2562 
       
  2563 /******************************************************************************
       
  2564  * Generic IPIs
       
  2565  ******************************************************************************/
       
  2566 
       
  2567 TGenIPIList::TGenIPIList()
       
  2568 	:	iGenIPILock(TSpinLock::EOrderGenericIPIList)
       
  2569 	{
       
  2570 	}
       
  2571 
       
  2572 TGenIPIList GenIPIList;
       
  2573 
       
  2574 extern "C" {
       
  2575 extern void send_generic_ipis(TUint32);
       
  2576 
       
  2577 void generic_ipi_isr(TSubScheduler* aS)
       
  2578 	{
       
  2579 	TGenericIPI* ipi = aS->iNextIPI;
       
  2580 	if (!ipi)
       
  2581 		return;
       
  2582 	TUint32 m = aS->iCpuMask;
       
  2583 	SDblQueLink* anchor = &GenIPIList.iA;
       
  2584 	while (ipi != anchor)
       
  2585 		{
       
  2586 		__e32_atomic_and_acq32(&ipi->iCpusIn, ~m);
       
  2587 		(*ipi->iFunc)(ipi);
       
  2588 		TInt irq = GenIPIList.iGenIPILock.LockIrqSave();
       
  2589 		TGenericIPI* n = (TGenericIPI*)ipi->iNext;
       
  2590 		ipi->iCpusOut &= ~m;
       
  2591 		if (ipi->iCpusOut == 0)
       
  2592 			{
       
  2593 			ipi->Deque();
       
  2594 			mb();
       
  2595 			ipi->iNext = 0;
       
  2596 			}
       
  2597 		ipi = n;
       
  2598 		while (ipi!=anchor && !(ipi->iCpusIn & m))
       
  2599 			ipi = (TGenericIPI*)ipi->iNext;
       
  2600 		if (ipi == anchor)
       
  2601 			aS->iNextIPI = 0;
       
  2602 		GenIPIList.iGenIPILock.UnlockIrqRestore(irq);
       
  2603 		}
       
  2604 	}
       
  2605 }
       
  2606 
       
  2607 void TGenericIPI::Queue(TGenericIPIFn aFunc, TUint32 aCpuMask)
       
  2608 	{
       
  2609 	__KTRACE_OPT(KSCHED2,DEBUGPRINT("GenIPI F=%08x M=%08x", aFunc, aCpuMask));
       
  2610 	iFunc = aFunc;
       
  2611 	TScheduler& s = TheScheduler;
       
  2612 	TInt i;
       
  2613 	TUint32 ipis = 0;
       
  2614 	TInt irq = GenIPIList.iGenIPILock.LockIrqSave();
       
  2615 	if (aCpuMask & 0x80000000u)
       
  2616 		{
       
  2617 		if (aCpuMask==0xffffffffu)
       
  2618 			aCpuMask = s.iActiveCpus2;
       
  2619 		else if (aCpuMask==0xfffffffeu)
       
  2620 			aCpuMask = s.iActiveCpus2 &~ SubScheduler().iCpuMask;
       
  2621 		else
       
  2622 			aCpuMask = 0;
       
  2623 		}
       
  2624 	iCpusIn = aCpuMask;
       
  2625 	iCpusOut = aCpuMask;
       
  2626 	if (!aCpuMask)
       
  2627 		{
       
  2628 		GenIPIList.iGenIPILock.UnlockIrqRestore(irq);
       
  2629 		iNext = 0;
       
  2630 		return;
       
  2631 		}
       
  2632 	GenIPIList.Add(this);
       
  2633 	for (i=0; i<s.iNumCpus; ++i)
       
  2634 		{
       
  2635 		if (!(aCpuMask & (1<<i)))
       
  2636 			continue;
       
  2637 		TSubScheduler& ss = *s.iSub[i];
       
  2638 		if (!ss.iNextIPI)
       
  2639 			{
       
  2640 			ss.iNextIPI = this;
       
  2641 			ipis |= (1<<i);
       
  2642 			}
       
  2643 		}
       
  2644 	send_generic_ipis(ipis);
       
  2645 	GenIPIList.iGenIPILock.UnlockIrqRestore(irq);
       
  2646 	__KTRACE_OPT(KSCHED2,DEBUGPRINT("GenIPI ipis=%08x", ipis));
       
  2647 	}
       
  2648 
       
  2649 void TGenericIPI::QueueAll(TGenericIPIFn aFunc)
       
  2650 	{
       
  2651 	Queue(aFunc, 0xffffffffu);
       
  2652 	}
       
  2653 
       
  2654 void TGenericIPI::QueueAllOther(TGenericIPIFn aFunc)
       
  2655 	{
       
  2656 	Queue(aFunc, 0xfffffffeu);
       
  2657 	}
       
  2658 
       
  2659 // Call from thread or IDFC with interrupts enabled
       
  2660 void TGenericIPI::WaitEntry()
       
  2661 	{
       
  2662 	CHECK_PRECONDITIONS(MASK_NOT_ISR|MASK_INTERRUPTS_ENABLED,"TGenericIPI::WaitEntry");
       
  2663 	while (iCpusIn)
       
  2664 		{
       
  2665 		__chill();
       
  2666 		}
       
  2667 	mb();
       
  2668 	}
       
  2669 
       
  2670 // Call from thread or IDFC with interrupts enabled
       
  2671 void TGenericIPI::WaitCompletion()
       
  2672 	{
       
  2673 	CHECK_PRECONDITIONS(MASK_NOT_ISR|MASK_INTERRUPTS_ENABLED,"TGenericIPI::WaitCompletion");
       
  2674 	volatile TInt* p = (volatile TInt*)&iNext;
       
  2675 	while (*p)
       
  2676 		{
       
  2677 		__chill();
       
  2678 		}
       
  2679 	mb();
       
  2680 	}
       
  2681 
       
  2682 /**	Stop all other CPUs
       
  2683 
       
  2684 	Call with kernel locked
       
  2685 */
       
  2686 void TStopIPI::StopCPUs()
       
  2687 	{
       
  2688 	iFlag = 0;
       
  2689 	QueueAllOther(&Isr);	// send IPIs to all other CPUs
       
  2690 	WaitEntry();			// wait for other CPUs to reach the ISR
       
  2691 	}
       
  2692 
       
  2693 void TStopIPI::ReleaseCPUs()
       
  2694 	{
       
  2695 	iFlag = 1;				// allow other CPUs to proceed
       
  2696 	WaitCompletion();		// wait for them to finish with this IPI
       
  2697 	}
       
  2698 
       
  2699 void TStopIPI::Isr(TGenericIPI* a)
       
  2700 	{
       
  2701 	TStopIPI* s = (TStopIPI*)a;
       
  2702 	while (!s->iFlag)
       
  2703 		{
       
  2704 		__chill();
       
  2705 		}
       
  2706 	}
       
  2707 
       
  2708