kernel/eka/nkern/arm/ncsched.cia
changeset 0 a41df078684a
child 90 947f0dc9f7a8
equal deleted inserted replaced
-1:000000000000 0:a41df078684a
       
     1 // Copyright (c) 1997-2009 Nokia Corporation and/or its subsidiary(-ies).
       
     2 // All rights reserved.
       
     3 // This component and the accompanying materials are made available
       
     4 // under the terms of the License "Eclipse Public License v1.0"
       
     5 // which accompanies this distribution, and is available
       
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
       
     7 //
       
     8 // Initial Contributors:
       
     9 // Nokia Corporation - initial contribution.
       
    10 //
       
    11 // Contributors:
       
    12 //
       
    13 // Description:
       
    14 // e32\nkern\arm\ncsched.cia
       
    15 // 
       
    16 //
       
    17 
       
    18 // NThreadBase member data
       
    19 #define __INCLUDE_NTHREADBASE_DEFINES__
       
    20 
       
    21 // TDfc member data
       
    22 #define __INCLUDE_TDFC_DEFINES__
       
    23 
       
    24 #include <e32cia.h>
       
    25 #include <arm.h>
       
    26 #include "highrestimer.h"
       
    27 #include "nkern.h"
       
    28 #include "emievents.h"
       
    29 
       
    30 #if defined(MONITOR_THREAD_CPU_TIME) && !defined(HAS_HIGH_RES_TIMER)
       
    31 #error MONITOR_THREAD_CPU_TIME is defined, but high res timer is not supported
       
    32 #endif
       
    33 
       
    34 #ifdef _DEBUG
       
    35 #define ASM_KILL_LINK(rp,rs)	asm("mov "#rs", #0xdf ");\
       
    36 								asm("orr "#rs", "#rs", "#rs", lsl #8 ");\
       
    37 								asm("orr "#rs", "#rs", "#rs", lsl #16 ");\
       
    38 								asm("str "#rs", ["#rp"] ");\
       
    39 								asm("str "#rs", ["#rp", #4] ");
       
    40 #else
       
    41 #define ASM_KILL_LINK(rp,rs)
       
    42 #endif
       
    43 
       
    44 #define ALIGN_STACK_START			\
       
    45 	asm("mov r12, sp");				\
       
    46 	asm("tst sp, #4");				\
       
    47 	asm("subeq sp, sp, #4");		\
       
    48 	asm("str r12, [sp,#-4]!")
       
    49 
       
    50 #define ALIGN_STACK_END				\
       
    51 	asm("ldr sp, [sp]")
       
    52 
       
    53 
       
    54 #ifdef __CPU_HAS_VFP
       
    55 #ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG
       
    56 #define	FPEXC_REG	10
       
    57 #define	FPEXC_REG3	4
       
    58 #else
       
    59 #define	FPEXC_REG	11
       
    60 #define	FPEXC_REG3	10
       
    61 #endif
       
    62 #endif
       
    63 
       
    64 //////////////////////////////////////////////////////////////////////////////
       
    65 //	Macros to define which standard ARM registers are used to save 
       
    66 //	required co-processor registers on a reschedule.
       
    67 //	They rely on the fact that the compiler will concatenate adjacent strings
       
    68 //	so "r" "9" "," "r" "10" "," will be converted in the assembler file to:
       
    69 //		r9,r10
       
    70 /////////////////////////////////////////////////////////////////////////////
       
    71 
       
    72 #ifdef __CPU_HAS_CP15_THREAD_ID_REG
       
    73 #define TID_SP_REG(reg)		"r"#reg","
       
    74 #else
       
    75 #define TID_SP_REG(reg)
       
    76 #endif //__CPU_HAS_CP15_THREAD_ID_REG
       
    77 
       
    78 #ifdef __CPU_HAS_VFP
       
    79 #define FPEXC_SP_REG(reg) 	"r"#reg","
       
    80 #else
       
    81 #define FPEXC_SP_REG(reg)
       
    82 #endif //__CPU_HAS_VFP
       
    83 
       
    84 #ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG
       
    85 #define CAR_SP_REG(reg)		"r"#reg","
       
    86 #else
       
    87 #define CAR_SP_REG(reg)
       
    88 #endif //__CPU_HAS_COPROCESSOR_ACCESS_REG
       
    89 
       
    90 #ifdef __CPU_ARM_USE_DOMAINS
       
    91 #define DACR_SP_REG(reg)	"r"#reg","
       
    92 #else
       
    93 #define DACR_SP_REG(reg)
       
    94 #endif //__CPU_ARM_USE_DOMAINS
       
    95 
       
    96 #ifdef __CPU_SUPPORT_THUMB2EE
       
    97 #define THUMB2EE_SP_REG(reg)	"r"#reg","
       
    98 #else 
       
    99 #define THUMB2EE_SP_REG(reg)
       
   100 #endif  // __CPU_SUPPORT_THUMB2EE
       
   101 
       
   102 //	NOTE THIS WILL PRODUCE A WARNING IF REGISTERS ARE NOT IN ASCENDING ORDER
       
   103 #define EXTRA_STACK_LIST(thumb2ee, tid, fpexc, car, dacr)\
       
   104 THUMB2EE_SP_REG(thumb2ee) TID_SP_REG(tid) FPEXC_SP_REG(fpexc) CAR_SP_REG(car) DACR_SP_REG(dacr)
       
   105 
       
   106 //////////////////////////////////////////////////////////////////////////////
       
   107 
       
   108 //#define __DEBUG_BAD_ADDR
       
   109 
       
   110 extern "C" void PanicFastSemaphoreWait();
       
   111 
       
   112 #ifdef __DFC_MACHINE_CODED__
       
   113 
       
   114 __ASSERT_COMPILE(_FOFF(TDfcQue,iPresent) == 0);	
       
   115 __ASSERT_COMPILE(_FOFF(TDfc,iNext) == 0);
       
   116 __ASSERT_COMPILE(_FOFF(TDfc,iPrev) == 4);
       
   117 __ASSERT_COMPILE(_FOFF(TDfc,iPriority) % 4 == 0);	
       
   118 __ASSERT_COMPILE(_FOFF(TDfc,iOnFinalQ) == _FOFF(TDfc,iPriority) + 2);	
       
   119 __ASSERT_COMPILE(_FOFF(TDfc,iQueued) == _FOFF(TDfc,iOnFinalQ) + 1);	
       
   120 
       
   121 __NAKED__ void TDfcQue::ThreadFunction(TAny* /*aDfcQ*/)
       
   122 	{
       
   123 	asm("ldr r11, __TheScheduler2 ");
       
   124 	
       
   125 	asm("mov r4, r0 ");					// r4=aDfcQ
       
   126 	asm("ldr r10, [r11, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread));
       
   127 	asm("mov r7, #0 ");
       
   128 	asm("mov r9, #1 ");
       
   129 	SET_INTS_1(r5, MODE_SVC, INTS_ALL_ON);
       
   130 	SET_INTS_1(r6, MODE_SVC, INTS_ALL_OFF);
       
   131 
       
   132 	asm("dfc_thrd_fn_check_queue: ");
       
   133 	SET_INTS_2(r5, MODE_SVC, INTS_ALL_ON);	// enable interrupts
       
   134 
       
   135 	asm("dfc_thrd_fn_check_queue2: ");
       
   136 	asm("str r9, [r11, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));	// lock the kernel
       
   137 	asm("ldr r3, [r4, #%a0]" : : "i" _FOFF(TDfcQue,iPresent));			// r3=aDfcQ->iPresent
       
   138 	asm("add lr, r4, #%a0" : :  "i" _FOFF(TDfcQue,iQueue));				// lr=address of priority 0 queue
       
   139 #ifdef __CPU_ARM_HAS_CLZ
       
   140 	CLZ(12,3);							// r12=31-MSB(r3), 32 if r3=0
       
   141 	asm("rsbs r12, r12, #31 ");			// r12=ms bit number set, -1 if queue empty
       
   142 	asm("bmi dfc_thrd_fn_wait ");		// if empty, wait for next request
       
   143 #else
       
   144 	asm("movs r2, r3 ");				// check if queue empty
       
   145 	asm("beq dfc_thrd_fn_wait ");		// if empty, wait for next request
       
   146 	asm("mov r12, #7 ");
       
   147 	asm("cmp r2, #0x10 ");
       
   148 	asm("movcc r2, r2, lsl #4 ");
       
   149 	asm("subcc r12, r12, #4 ");
       
   150 	asm("cmp r2, #0x40 ");
       
   151 	asm("movcc r2, r2, lsl #2 ");
       
   152 	asm("subcc r12, r12, #2 ");
       
   153 	asm("cmp r2, #0x80 ");
       
   154 	asm("subcc r12, r12, #1 ");			// r12=ms bit number set
       
   155 #endif
       
   156 	asm("ldr r8, [lr, r12, lsl #2]! ");	// lr=address of highest priority non-empty queue, r8=address of first DFC
       
   157 	asm("ldmia r8, {r0-r1} ");			// r0=first->next, r1=first->prev
       
   158 	asm("cmp r0, r8 ");					// check if this is the only one at this priority
       
   159 	asm("strne r0, [r1, #0] ");			// if not, prev->next=next
       
   160 	asm("strne r1, [r0, #4] ");			// and next->prev=prev
       
   161 	asm("streq r7, [lr] ");				// if this was only one, set head pointer for this priority to NULL
       
   162 	asm("strne r0, [lr] ");				// else set head pointer to first->next
       
   163 	ASM_KILL_LINK(r8,r1);
       
   164 	asm("strh r7, [r8, #%a0]" : : "i" _FOFF(TDfc, iOnFinalQ));			// iOnFinalQ=iQueued=FALSE - can't touch link pointers after this
       
   165 	asm("biceq r3, r3, r9, lsl r12 ");	// if no more at this priority clear bit in iPresent
       
   166 	asm("streq r3, [r4, #%a0]" : : "i" _FOFF(TDfcQue,iPresent));
       
   167 
       
   168 	SET_INTS_2(r6, MODE_SVC, INTS_ALL_OFF);	// interrupts off
       
   169 	asm("ldr r3, [r11, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag));	// check if reschedule required
       
   170 	asm("cmp r3, #0 ");
       
   171 	asm("streq r7, [r11, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));	// if no reschedule required unlock the kernel
       
   172 	asm("blne  " CSM_ZN10TScheduler10RescheduleEv);	// if reschedule required, do it
       
   173 	SET_INTS_2(r5, MODE_SVC, INTS_ALL_ON);	// restore interrupts
       
   174 
       
   175 	asm("ldr r1, [r8, #%a0]" : : "i" _FOFF(TDfc, iFunction));			// r1=function address
       
   176 	asm("adr lr, dfc_thrd_fn_check_queue2 ");							// set up return address
       
   177 	asm("ldr r0, [r8, #%a0]" : : "i" _FOFF(TDfc, iPtr));				// r0=DFC argument
       
   178 	__JUMP(,r1);						// call DFC
       
   179 
       
   180 	asm("dfc_thrd_fn_wait: ");
       
   181 	asm("mov r0, #%a0" : : "i" ((TInt)NThreadBase::EWaitDfc));
       
   182 	asm("strb r0, [r10, #%a0]" : : "i" _FOFF(NThreadBase,iNState));
       
   183 	asm("strb r9, [r11, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag));
       
   184 	asm("mov r0, r11 ");
       
   185 	asm("mov r1, r10 ");
       
   186 	asm("bl unready ");
       
   187 	asm("adr lr, dfc_thrd_fn_check_queue ");	// set up return address
       
   188 	asm("b  " CSM_ZN10TScheduler10RescheduleEv);
       
   189 	
       
   190 	asm("__TheScheduler2: ");
       
   191 	asm(".word TheScheduler ");
       
   192 	}
       
   193 
       
   194 
       
   195 /** Cancels an IDFC or DFC.
       
   196 
       
   197 	This function does nothing if the IDFC or DFC is not queued.
       
   198 
       
   199 	@return	TRUE	if the DFC was actually dequeued by this call. In that case
       
   200 					it is guaranteed that the DFC will not execute until it is
       
   201 					queued again.
       
   202 			FALSE	if the DFC was not queued on entry to the call, or was in
       
   203 					the process of being executed or cancelled. In this case
       
   204 					it is possible that the DFC executes after this call
       
   205 					returns.
       
   206 
       
   207 	@post	However in either case it is safe to delete the DFC object on
       
   208 			return from this call provided only that the DFC function does not
       
   209 			refer to the DFC object itself.
       
   210 	
       
   211 	@pre IDFC or thread context. Do not call from ISRs.
       
   212 
       
   213 	@pre If the DFC function accesses the DFC object itself, the user must ensure that
       
   214 	     Cancel() cannot be called while the DFC function is running.
       
   215  */
       
   216 __NAKED__ EXPORT_C TBool TDfc::Cancel()
       
   217 	{
       
   218 	ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR);
       
   219 
       
   220 	asm("ldr r1, __TheScheduler2 ");
       
   221 	asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));
       
   222 	asm("add r3, r3, #1 ");
       
   223 	asm("str r3, [r1, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));	// lock the kernel
       
   224 	asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(TDfc,iPriority));			// r2=priority/flags
       
   225 	SET_INTS_1(r12, MODE_SVC, INTS_ALL_OFF);
       
   226 	asm("tst r2, #0xff000000 ");		// test queued flag
       
   227 	asm("moveq r0, #0 ");				// if not queued, return FALSE
       
   228 	asm("beq 0f ");
       
   229 	SET_INTS_2(r12, MODE_SVC, INTS_ALL_OFF);	// otherwise disable interrupts while we dequeue
       
   230 	asm("ldmia r0, {r3,r12} ");			// r3=next, r12=prev
       
   231 	SET_INTS_1(r1, MODE_SVC, INTS_ALL_ON);
       
   232 	asm("str r3, [r12, #0] ");			// prev->next=next
       
   233 	asm("str r12, [r3, #4] ");			// next->prev=prev
       
   234 	SET_INTS_2(r1, MODE_SVC, INTS_ALL_ON);	// reenable interrupts
       
   235 	asm("tst r2, #0x00ff0000 ");		// check iOnFinalQ
       
   236 	asm("beq 1f ");						// if FALSE, finish up
       
   237 	asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TDfc,iDfcQ));				// r1=iDfcQ
       
   238 	asm("and r2, r2, #0xff ");			// r2=iPriority
       
   239 	asm("subs r12, r3, r0 ");			// check if queue is now empty, r12=0 if it is
       
   240 	asm("beq 2f ");						// branch if now empty
       
   241 	asm("add r1, r1, r2, lsl #2 ");		// r1=&iDfcQ->iQueue[iPriority]-_FOFF(TDfcQue.iPriority)
       
   242 	asm("ldr r12, [r1, #%a0]" : : "i" _FOFF(TDfcQue,iQueue));			// r12=iDfcQ->iQueue[iPriority]
       
   243 	asm("cmp r12, r0 ");				// is this one first?
       
   244 	asm("streq r3, [r1, #%a0]" : : "i" _FOFF(TDfcQue,iQueue));			// if so, iQueue[pri]=next
       
   245 	asm("b 1f ");
       
   246 	asm("2: ");		// r0=this, r1=iDfcQ, r2=priority, r3=next, r12=0
       
   247 	asm("ldr r3, [r1], #%a0" : : "i" _FOFF(TDfcQue,iQueue));			// r3=iDfcQ->iPresent, r1=&iDfcQ->iQueue[0]
       
   248 	asm("str r12, [r1, r2, lsl #2] ");	// iDfcQ->iQueue[iPriority]=NULL
       
   249 	asm("mov r12, #1 ");
       
   250 	asm("bic r3, r3, r12, lsl r2 ");	// clear present bit
       
   251 	asm("str r3, [r1, #-%a0]" : : "i" _FOFF(TDfcQue,iQueue));
       
   252 	asm("1: ");
       
   253 	ASM_KILL_LINK(r0,r1);
       
   254 	asm("mov r3, #0 ");
       
   255 	asm("strh r3, [r0, #%a0]" : : "i" _FOFF(TDfc,iOnFinalQ));			// iOnFinalQ=iQueued=FALSE - must be done last
       
   256 
       
   257 	// R0=this != 0 here
       
   258 
       
   259 	asm("0: ");
       
   260 	asm("stmfd sp!, {r0,lr} ");
       
   261 	asm("bl  " CSM_ZN5NKern6UnlockEv);			// unlock the kernel
       
   262 	__POPRET("r0,");
       
   263 	}
       
   264 #endif
       
   265 
       
   266 #ifdef __FAST_SEM_MACHINE_CODED__
       
   267 /** Waits on a fast semaphore.
       
   268 
       
   269     Decrements the signal count for the semaphore and
       
   270 	removes the calling thread from the ready-list if the sempahore becomes
       
   271 	unsignalled. Only the thread that owns a fast semaphore can wait on it.
       
   272 	
       
   273 	Note that this function does not block, it merely updates the NThread state,
       
   274 	rescheduling will only occur when the kernel is unlocked. Generally threads
       
   275 	would use NKern::FSWait() which manipulates the kernel lock for you.
       
   276 
       
   277 	@pre The calling thread must own the semaphore.
       
   278 	@pre Kernel must be locked.
       
   279 	@pre No fast mutex can be held.
       
   280 	
       
   281 	@post Kernel is locked.
       
   282 	
       
   283 	@see NFastSemaphore::Signal()
       
   284 	@see NKern::FSWait()
       
   285 	@see NKern::Unlock()
       
   286  */
       
   287 EXPORT_C __NAKED__ void NFastSemaphore::Wait()
       
   288 	{
       
   289 	ASM_CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC|MASK_NO_FAST_MUTEX);
       
   290 
       
   291 	asm("mov r2, r0 ");
       
   292 	asm("ldr r0, __TheScheduler ");
       
   293 	asm("ldr r1, [r2, #%a0]" : : "i" _FOFF(NFastSemaphore,iOwningThread));	// r1=owning thread
       
   294 	asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread));		// r3=current thread
       
   295 	asm("cmp r1, r3 ");
       
   296 	asm("bne PanicFastSemaphoreWait ");		// if wrong thread, fault
       
   297 	// wait on a NFastSemaphore pointed to by r2
       
   298 	// enter with r0=&TheScheduler, r1=the current thread, already validated
       
   299 	asm("ldr r3, [r2, #%a0]" : : "i" _FOFF(NFastSemaphore,iCount));
       
   300 	asm("mov r12, #%a0" : : "i" (NThread::EWaitFastSemaphore));
       
   301 	asm("subs r3, r3, #1 ");
       
   302 	asm("str r3, [r2, #%a0]" : : "i" _FOFF(NFastSemaphore,iCount));	// decrement iCount
       
   303 	__JUMP(ge,lr);							// if result>=0, finished
       
   304 	asm("str r2, [r1, #%a0]" : : "i" _FOFF(NThread,iWaitObj));
       
   305 	asm("strb r12, [r1, #%a0]" : : "i" _FOFF(NThread,iNState));
       
   306 	asm("mov r3, #1 ");
       
   307 	asm("strb r3, [r0, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag));
       
   308 
       
   309 	// remove thread from ready list
       
   310 	asm("b unready ");
       
   311 	}
       
   312 
       
   313 
       
   314 /** Waits for a signal on the current thread's I/O semaphore.
       
   315  @pre   No fast mutex can be held.
       
   316  @pre   Kernel must be unlocked.
       
   317  @pre	Call in a thread context.
       
   318  @pre	Interrupts must be enabled.
       
   319  */
       
   320 EXPORT_C __NAKED__ void NKern::WaitForAnyRequest()
       
   321 	{
       
   322 	ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC|MASK_NO_FAST_MUTEX);
       
   323 
       
   324 	asm("ldr r0, __TheScheduler ");
       
   325 	asm("str lr, [sp, #-4]! ");				// save lr
       
   326 	asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread));
       
   327 	asm("bl wait_for_any_request2 ");
       
   328 	SET_INTS(r0, MODE_SVC, INTS_ALL_ON);	// turn interrupts back on
       
   329 	asm("ldr pc, [sp], #4 ");
       
   330 
       
   331 	// Special case handler for Exec::WaitForAnyRequest() for efficiency reasons
       
   332 	// Called from __ArmVectorSwi with R11=&TheScheduler, R1=current thread
       
   333 	// Returns with interrupts disabled
       
   334 	asm(".global wait_for_any_request ");
       
   335 	asm("wait_for_any_request: ");
       
   336 
       
   337 	ASM_DEBUG0(WaitForAnyRequest);
       
   338 	asm("mov r0, r11 ");
       
   339 	asm("wait_for_any_request2: ");
       
   340 	SET_INTS_1(r2, MODE_SVC, INTS_ALL_OFF);
       
   341 #ifdef _DEBUG
       
   342 	asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));
       
   343 	asm("cmp r3, #0 ");
       
   344 	asm("movne r12, #0xd8000001 ");			// FAULT - calling Exec::WaitForAnyRequest() with the kernel locked is silly
       
   345 	asm("strne r12, [r12] ");
       
   346 #endif
       
   347 	SET_INTS_2(r2, MODE_SVC, INTS_ALL_OFF);	// turn off interrupts
       
   348 	asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(NThread,iRequestSemaphore.iCount));
       
   349 	asm("mov r3, #1 ");
       
   350 	SET_INTS_1(r12, MODE_SVC, INTS_ALL_ON);
       
   351 	asm("subs r2, r2, #1 ");
       
   352 	asm("str r2, [r1, #%a0]" : : "i" _FOFF(NThread,iRequestSemaphore.iCount));	// decrement iCount
       
   353 	__JUMP(ge,lr);							// if result non-negative, finished
       
   354 
       
   355 	asm("str r3, [r0, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));	// lock the kernel
       
   356 	SET_INTS_2(r12, MODE_SVC, INTS_ALL_ON);	// reenable interrupts
       
   357 	asm("strb r3, [r0, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag));
       
   358 
       
   359 	// r2 points to NFastSemaphore
       
   360 	asm("add r2, r1, #%a0" : : "i" _FOFF(NThread,iRequestSemaphore));
       
   361 	asm("str lr, [sp, #-4]! ");
       
   362 	asm("str r2, [r1, #%a0]" : : "i" _FOFF(NThread,iWaitObj));
       
   363 	asm("mov r3, #%a0" : : "i" (NThread::EWaitFastSemaphore));
       
   364 	asm("strb r3, [r1, #%a0]" : : "i" _FOFF(NThread,iNState));	// mark thread waiting on semaphore
       
   365 	asm("bl unready ");						// remove thread from ready list - DOESN'T CLOBBER R0
       
   366 	asm("bl  " CSM_ZN10TScheduler10RescheduleEv);		// Reschedule
       
   367 	asm("ldr lr, [sp], #4 ");
       
   368 	asm("mov r3, #%a0 " : : "i" (NThread::EContextWFARCallback));
       
   369 	asm("b callUserModeCallbacks ");		// exit and call callbacks
       
   370 	}
       
   371 
       
   372 
       
   373 /** Signals a fast semaphore multiple times.
       
   374 
       
   375 	@pre Kernel must be locked.
       
   376 	@pre Call either in a thread or an IDFC context.
       
   377 	
       
   378 	@post Kernel is locked.
       
   379 
       
   380 	@internalComponent	
       
   381  */
       
   382 EXPORT_C __NAKED__ void NFastSemaphore::SignalN(TInt /*aCount*/)
       
   383 	{
       
   384 	ASM_CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR);
       
   385 
       
   386 	asm("req_sem_signaln: ");
       
   387 	asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(NFastSemaphore,iCount));
       
   388 	asm("adds r2, r2, r1 ");
       
   389 	asm("str r2, [r0, #%a0]" : : "i" _FOFF(NFastSemaphore,iCount));
       
   390 	__JUMP(cc,lr);							// if count did not cross 0 nothing more to do
       
   391 	asm("ldr r0, [r0, #%a0]" : : "i" _FOFF(NFastSemaphore,iOwningThread));
       
   392 	asm("mov r1, #0 ");
       
   393 	asm("str r1, [r0, #%a0]" : : "i" _FOFF(NThread,iWaitObj));
       
   394 	asm("b check_suspend_then_ready ");
       
   395 	}
       
   396 
       
   397 /** @internalComponent */
       
   398 __NAKED__ void NFastSemaphore::WaitCancel()
       
   399 	{
       
   400 	asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(NFastSemaphore,iOwningThread));
       
   401 	asm("mov r1, #0 ");
       
   402 	asm("str r1, [r0, #%a0]" : : "i" _FOFF(NFastSemaphore,iCount));
       
   403 	asm("str r1, [r3, #%a0]" : : "i" _FOFF(NThread,iWaitObj));
       
   404 	asm("mov r0, r3 ");
       
   405 	asm("b check_suspend_then_ready ");
       
   406 	}
       
   407 
       
   408 
       
   409 /** Resets a fast semaphore.
       
   410 
       
   411 	@pre Kernel must be locked.
       
   412 	@pre Call either in a thread or an IDFC context.
       
   413 	
       
   414 	@post Kernel is locked.
       
   415 
       
   416 	@internalComponent	
       
   417  */
       
   418 EXPORT_C __NAKED__ void NFastSemaphore::Reset()
       
   419 	{
       
   420 	ASM_CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR);
       
   421 
       
   422 	asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(NFastSemaphore,iCount));
       
   423 	asm("mov r1, #0 ");
       
   424 	asm("str r1, [r0, #%a0]" : : "i" _FOFF(NFastSemaphore,iCount));
       
   425 	asm("cmp r2, #0 ");
       
   426 	__JUMP(ge,lr);					// if count was not negative, nothing to do
       
   427 	asm("ldr r0, [r0, #%a0]" : : "i" _FOFF(NFastSemaphore,iOwningThread));
       
   428 	asm("mov r1, #0 ");
       
   429 	asm("str r1, [r0, #%a0]" : : "i" _FOFF(NThread,iWaitObj));
       
   430 	asm("b check_suspend_then_ready ");
       
   431 	}
       
   432 
       
   433 #endif
       
   434 
       
   435 #ifdef __SCHEDULER_MACHINE_CODED__
       
   436 
       
   437 __ASSERT_COMPILE(_FOFF(SDblQueLink,iNext) == 0);
       
   438 __ASSERT_COMPILE(_FOFF(SDblQueLink,iPrev) == 4);
       
   439 __ASSERT_COMPILE(_FOFF(TScheduler,iPresent) == 0);
       
   440 __ASSERT_COMPILE(_FOFF(NFastSemaphore,iCount) == 0);
       
   441 __ASSERT_COMPILE(_FOFF(NFastSemaphore,iOwningThread) == 4);
       
   442 __ASSERT_COMPILE(_FOFF(TDfc,iPtr) == _FOFF(TDfc,iPriority) + 4);
       
   443 __ASSERT_COMPILE(_FOFF(TDfc,iFunction) == _FOFF(TDfc,iPtr) + 4);
       
   444 
       
   445 __NAKED__ void TScheduler::Remove(NThreadBase* /*aThread*/)
       
   446 //
       
   447 // Remove a thread from the ready list
       
   448 //
       
   449 	{
       
   450 	asm("unready: ");
       
   451 #ifdef _DEBUG
       
   452 	asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex));
       
   453 	asm("mov r12, #0xd8000003 ");
       
   454 	asm("cmp r2, #0 ");
       
   455 	asm("strne r12, [r12] ");				// crash if fast mutex held
       
   456 #endif
       
   457 	asm("ldr r12, [r1, #%a0]" : : "i" _FOFF(NThread,iTimeslice));
       
   458 	asm("ldmia r1, {r2,r3} ");				// r2=next, r3=prev
       
   459 	asm("str r12, [r1, #%a0]" : : "i" _FOFF(NThread,iTime));	// fresh timeslice for next time
       
   460 
       
   461 	asm("pri_list_remove: ");
       
   462 	ASM_KILL_LINK(r1,r12);
       
   463 	asm("subs r12, r1, r2 ");				// check if more threads at this priority, r12=0 if not
       
   464 	asm("bne unready_1 ");					// branch if there are more at same priority
       
   465 	asm("ldrb r2, [r1, #%a0]" : : "i" _FOFF(NThread, iPriority));	// r2=thread priority
       
   466 	asm("add r1, r0, #%a0" : : "i" _FOFF(TScheduler, iQueue));		// r1->iQueue[0]
       
   467 	asm("str r12, [r1, r2, lsl #2] ");		// iQueue[priority]=NULL
       
   468 	asm("ldrb r1, [r0, r2, lsr #3] ");		// r1=relevant byte in present mask
       
   469 	asm("and r3, r2, #7 ");					// r3=priority & 7
       
   470 	asm("mov r12, #1 ");
       
   471 	asm("bic r1, r1, r12, lsl r3 ");		// clear bit in present mask
       
   472 	asm("strb r1, [r0, r2, lsr #3] ");		// update relevant byte in present mask
       
   473 	__JUMP(,lr);
       
   474 	asm("unready_1: ");						// get here if there are other threads at same priority
       
   475 	asm("ldrb r12, [r1, #%a0]" : : "i" _FOFF(NThread, iPriority));	// r12=thread priority
       
   476 	asm("add r0, r0, #%a0" : : "i" _FOFF(TScheduler, iQueue));		// r0=&iQueue[0]
       
   477 	asm("str r3, [r2, #4] ");				// next->prev=prev
       
   478 	asm("ldr r12, [r0, r12, lsl #2]! ");	// r12=iQueue[priority], r0=&iQueue[priority]
       
   479 	asm("str r2, [r3, #0] ");				// and prev->next=next
       
   480 	asm("cmp r12, r1 ");					// if aThread was first...
       
   481 	asm("streq r2, [r0, #0] ");				// iQueue[priority]=aThread->next
       
   482 	__JUMP(,lr);							// finished
       
   483 	}
       
   484 
       
   485 
       
   486 /** Removes an item from a priority list.
       
   487 
       
   488 	@param aLink A pointer to the item - this must not be NULL.
       
   489  */
       
   490 EXPORT_C __NAKED__ void TPriListBase::Remove(TPriListLink* /*aLink*/)
       
   491 	{
       
   492 	asm("ldmia r1, {r2,r3} ");				// r2=aLink->iNext, r3=aLink->iPrev
       
   493 	asm("b pri_list_remove ");
       
   494 	}
       
   495 
       
   496 
       
   497 /** Signals a fast semaphore.
       
   498 
       
   499     Increments the signal count of a fast semaphore by
       
   500 	one and releases any waiting thread if the semphore becomes signalled.
       
   501 	
       
   502 	Note that a reschedule will not occur before this function returns, this will
       
   503 	only take place when the kernel is unlocked. Generally threads
       
   504 	would use NKern::FSSignal() which manipulates the kernel lock for you.
       
   505 	
       
   506 	@pre Kernel must be locked.
       
   507     @pre Call either in a thread or an IDFC context.
       
   508 
       
   509 	@post Kernel is locked.
       
   510 	
       
   511 	@see NFastSemaphore::Wait()
       
   512 	@see NKern::FSSignal()
       
   513 	@see NKern::Unlock()
       
   514  */
       
   515 EXPORT_C __NAKED__ void NFastSemaphore::Signal()
       
   516 	{
       
   517 	ASM_CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR);
       
   518 
       
   519 	asm("req_sem_signal: ");
       
   520 	asm("ldmia r0, {r1,r2} ");				// r1=iCount, r2=iOwningThread
       
   521 	asm("mov r3, #0 ");
       
   522 	asm("adds r1, r1, #1 ");
       
   523 	asm("str r1, [r0, #%a0]" : : "i" _FOFF(NFastSemaphore,iCount));
       
   524 	__JUMP(gt,lr);							// if count after incrementing is >0, nothing more to do
       
   525 	asm("mov r0, r2 ");
       
   526 	asm("str r3, [r0, #%a0]" : : "i" _FOFF(NThread,iWaitObj));
       
   527 
       
   528 	// fall through to NThreadBase::CheckSuspendThenReady()
       
   529 	}
       
   530 
       
   531 
       
   532 /** Makes a nanothread ready provided that it is not explicitly suspended.
       
   533 	
       
   534 	For use by RTOS personality layers.
       
   535 
       
   536 	@pre	Kernel must be locked.
       
   537 	@pre	Call either in a thread or an IDFC context.
       
   538 	
       
   539 	@post	Kernel is locked.
       
   540  */
       
   541 EXPORT_C __NAKED__ void NThreadBase::CheckSuspendThenReady()
       
   542 	{
       
   543 	ASM_CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR);
       
   544 
       
   545 	asm("check_suspend_then_ready: ");
       
   546 	asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(NThread,iSuspendCount));
       
   547 	asm("mov r2, #%a0" : : "i" (NThread::ESuspended));
       
   548 	asm("cmp r1, #0 ");
       
   549 	asm("bne mark_thread_suspended ");		// branch out if suspend count nonzero
       
   550 
       
   551 	// fall through to NThreadBase::Ready()
       
   552 	}
       
   553 
       
   554 
       
   555 /** Makes a nanothread ready.
       
   556 	
       
   557 	For use by RTOS personality layers.
       
   558 
       
   559 	@pre	Kernel must be locked.
       
   560 	@pre	Call either in a thread or an IDFC context.
       
   561 	@pre	The calling thread must not be explicitly suspended.
       
   562 	
       
   563 	@post	Kernel is locked.
       
   564  */
       
   565 EXPORT_C __NAKED__ void NThreadBase::Ready()
       
   566 	{
       
   567 // on release builds just fall through to DoReady
       
   568 #ifdef _DEBUG
       
   569 	ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR|MASK_KERNEL_LOCKED);
       
   570 	asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(NThreadBase,iSuspendCount));
       
   571 	asm("cmp r1, #0 ");
       
   572 	asm("beq 1f ");
       
   573 	ASM_CHECK_PRECONDITIONS(MASK_ALWAYS_FAIL);
       
   574 	asm("1: ");
       
   575 	asm("stmfd sp!, {r0,lr} ");
       
   576 	asm("mov r0, #%a0" : : "i" ((TInt)KCRAZYSCHEDDELAY));
       
   577 	asm("bl " CSM_Z9KDebugNumi );
       
   578 	asm("cmp r0, #0 ");						// Z=1 => no delayed scheduler
       
   579 	asm("ldmfd sp!, {r0,lr} ");
       
   580 	asm("ldr r1, __TheScheduler ");
       
   581 	asm("ldrb r2, [r0, #%a0]" : : "i" _FOFF(NThread,iPriority));	// r2=priority of aThread
       
   582 	asm("beq DoReadyInner ");				// delayed scheduler is disabled
       
   583 	asm("ldr r12, __TheTimerQ ");
       
   584 	asm("cmp r2, #0 ");
       
   585 	asm("ldr r12, [r12, #%a0]" : : "i" _FOFF(NTimerQ,iMsCount));
       
   586 	asm("cmpne r12, #0 ");					// tick hasn't happened yet or this is priority 0
       
   587 	asm("beq DoReadyInner ");				// so ready it as usual
       
   588 	asm("ldrb r2, [r0, #%a0]" : : "i" _FOFF(NThread,i_ThrdAttr));
       
   589 	asm("tst r2, #%a0 " : : "i" ((TInt)KThreadAttDelayed));
       
   590 	__JUMP(ne,lr);							// thread is already on the delayed queue
       
   591 	asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TScheduler,iDelayedQ));
       
   592 	asm("ldr r12, [r3, #4] ");				// r12->last thread
       
   593 	asm("str r0, [r3, #4] ");				// first->prev=this
       
   594 	asm("str r0, [r12, #0] ");				// old last->next=this
       
   595 	asm("stmia r0, {r3,r12} ");				// this->next=first, this->prev=old last
       
   596 	asm("orr r2, r2, #%a0 " : : "i" ((TInt)KThreadAttDelayed));
       
   597 	asm("strb r2, [r0, #%a0]" : : "i" _FOFF(NThread,i_ThrdAttr));
       
   598 	__JUMP(,lr);
       
   599 
       
   600 	asm("__TheTimerQ: ");
       
   601 	asm(".word TheTimerQ ");
       
   602 	asm("__SuperPageAddress: ");
       
   603 	asm(".word SuperPageAddress ");
       
   604 #endif
       
   605 // on release builds just fall through to DoReady
       
   606 	}
       
   607 
       
   608 __NAKED__ void NThreadBase::DoReady()
       
   609 	{
       
   610 	asm("ldr r1, __TheScheduler ");
       
   611 	asm("ldrb r2, [r0, #%a0]" : : "i" _FOFF(NThread,iPriority));	// r2=priority of aThread
       
   612 	asm("DoReadyInner: ");
       
   613 	asm("mov r3, #%a0" : : "i" (NThread::EReady));
       
   614 	asm("strb r3, [r0, #%a0]" : : "i" _FOFF(NThread,iNState));
       
   615 	asm("ldmia r1!, {r3,r12} ");			// r3=present mask low, r12=present mask high, r1=&iQueue[0]
       
   616 	asm("cmp r2, #31 ");
       
   617 	asm("bhi 1f ");
       
   618 	asm("cmp r12, #0 ");
       
   619 	asm("mov r12, r3 ");
       
   620 	asm("mov r3, #1 ");
       
   621 	asm("bne 2f ");							// branch if high word set, so this has lower priority
       
   622 	asm("cmp r3, r12, lsr r2 ");			// see if new thread may cause reschedule (CS if so, EQ if equal priority)
       
   623 	asm("beq 3f ");							// branch if equality case (no need to update bitmask)
       
   624 	asm("strhib r3, [r1, #%a0]" : : "i" (_FOFF(TScheduler,iRescheduleNeededFlag)-8)); // set reschedule flag if necessary
       
   625 	asm("2: ");
       
   626 	asm("tst r12, r3, lsl r2 ");			// test bit in present mask
       
   627 	asm("orreq r12, r12, r3, lsl r2 ");		// if clear, set it ...
       
   628 	asm("ldrne r3, [r1, r2, lsl #2] ");		// if not alone, r3->first thread on queue
       
   629 	asm("streq r12, [r1, #-8] ");			// ... and update present mask low word
       
   630 	asm("bne 4f ");							// branch if not alone (don't need to touch bitmask)
       
   631 	asm("6: ");	// get here if thread is alone at this priority
       
   632 	asm("str r0, [r1, r2, lsl #2] ");		// thread is alone at this priority, so point queue to it
       
   633 	asm("str r0, [r0, #0] ");				// next=prev=this
       
   634 	asm("str r0, [r0, #4] ");
       
   635 	__JUMP(,lr);							// NOTE: R0=this != 0
       
   636 	asm("5: "); // get here if this thread has joint highest priority >= 32
       
   637 	asm("add r2, r2, #32 ");				// restore thread priority
       
   638 	asm("3: ");	// get here if this thread has joint highest priority < 32
       
   639 	asm("ldr r3, [r1, r2, lsl #2] ");		// r3->first thread on queue
       
   640 	asm("ldr r12, [r3, #%a0]" : : "i" _FOFF(NThreadBase,iTime));	// r12=first thread->time remaining
       
   641 	asm("subs r12, r12, #1 ");				// timeslice expired? if so, r12=-1 and C=0 else C=1
       
   642 	asm("strccb r12, [r1, #%a0]" : : "i" (_FOFF(TScheduler,iRescheduleNeededFlag)-8)); // set reschedule flag if necessary
       
   643 	asm("4: ");	// get here when adding to non-empty queue; r1->queue, r3->first thread on queue
       
   644 	asm("ldr r12, [r3, #4] ");				// r12->last thread
       
   645 	asm("str r0, [r3, #4] ");				// first->prev=this
       
   646 	asm("str r0, [r12, #0] ");				// old last->next=this
       
   647 	asm("stmia r0, {r3,r12} ");				// this->next=first, this->prev=old last
       
   648 	__JUMP(,lr);							// NOTE: R0=this != 0
       
   649 	asm("1: ");	// get here if this thread priority > 31
       
   650 	asm("and r2, r2, #31 ");
       
   651 	asm("mov r3, #1 ");
       
   652 	asm("cmp r3, r12, lsr r2 ");			// see if new thread may cause reschedule (CS if so, EQ if equal priority)
       
   653 	asm("beq 5b ");							// branch if equality case (no need to update bitmask)
       
   654 	asm("strhib r3, [r1, #%a0]" : : "i" (_FOFF(TScheduler,iRescheduleNeededFlag)-8)); // set reschedule flag if necessary
       
   655 	asm("tst r12, r3, lsl r2 ");			// test bit in present mask
       
   656 	asm("orreq r12, r12, r3, lsl r2 ");		// if clear, set it ...
       
   657 	asm("add r2, r2, #32 ");
       
   658 	asm("streq r12, [r1, #-4] ");			// ... and update present mask high word
       
   659 	asm("beq 6b ");							// branch if alone
       
   660 	asm("ldr r3, [r1, r2, lsl #2] ");		// if not alone, r3->first thread on queue
       
   661 	asm("b 4b ");							// branch if not alone (don't need to touch bitmask)
       
   662 
       
   663 	asm("mark_thread_suspended: ");			// continuation of CheckSuspendThenReady in unusual case
       
   664 	asm("strb r2, [r0, #%a0]" : : "i" _FOFF(NThread,iNState));	// set state to suspended
       
   665 	__JUMP(,lr);							// NOTE: R0=this != 0
       
   666 	}
       
   667 
       
   668 __NAKED__ void TScheduler::QueueDfcs()
       
   669 	{
       
   670 	// move DFCs from pending queue to their final queues
       
   671 	// enter with interrupts off and kernel locked
       
   672 	// leave with interrupts off and kernel locked
       
   673 	// NOTE: WE MUST NOT CLOBBER R0 OR R2!
       
   674 	// NOTE: STACK ALIGNMENT UNKNOWN ON ENTRY
       
   675 
       
   676 
       
   677 	SET_INTS(r1, MODE_SVC, INTS_ALL_ON);	// enable interrupts
       
   678 #ifdef __CPU_ARM_HAS_CPS
       
   679 	asm("mov r1, #1 ");						// (not necessary on ARMV5 as SET_INTS above leaves r1 == 0x13)
       
   680 #endif
       
   681 	asm("strb r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iInIDFC));
       
   682 	asm("stmfd sp!, {r2,r5,r11,lr} ");		// save registers
       
   683 
       
   684 #ifdef BTRACE_CPU_USAGE
       
   685 	asm("ldrb r1, [r0,#%a0]" : : "i" _FOFF(TScheduler,iCpuUsageFilter));
       
   686 	asm("add r5, r0, #%a0" : : "i" _FOFF(TScheduler,iDfcs));
       
   687 	asm("mov r11, sp ");					// r11 points to saved registers
       
   688 	asm("cmp r1, #0");
       
   689 	asm("blne idfc_start_trace");
       
   690 #else
       
   691 	asm("add r5, r0, #%a0" : : "i" _FOFF(TScheduler,iDfcs));
       
   692 	asm("mov r11, sp ");					// r11 points to saved registers
       
   693 #endif
       
   694 
       
   695 	asm("queue_dfcs_1: ");
       
   696 	SET_INTS(r0, MODE_SVC, INTS_ALL_OFF);	// disable interrupts
       
   697 	asm("ldr r0, [r5, #0] ");				// r0 points to first pending DFC
       
   698 	SET_INTS_1(r1, MODE_SVC, INTS_ALL_ON);
       
   699 	asm("subs r2, r0, r5 ");				// check if queue empty
       
   700 	asm("ldrne r3, [r0, #0] ");				// r3 points to next DFC
       
   701 	asm("beq queue_dfcs_0 ");				// if so, exit
       
   702 	asm("str r3, [r5, #0] ");				// next one is now first
       
   703 	asm("str r5, [r3, #4] ");				// next->prev=queue head
       
   704 	SET_INTS_2(r1, MODE_SVC, INTS_ALL_ON);	// enable interrupts
       
   705 	
       
   706 	asm("ldrb r12, [r0, #%a0]" : : "i" _FOFF(TDfc,iPriority));			// r12=iPriority
       
   707 	asm("adr lr, queue_dfcs_1 ");			// return to queue_dfcs_1
       
   708 	asm("cmp r12, #%a0" : : "i" ((TInt)KNumDfcPriorities));	// check for immediate DFC
       
   709 	asm("bcs do_immediate_dfc ");
       
   710 
       
   711 	// enqueue the DFC and signal the DFC thread
       
   712 	asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(TDfc,iDfcQ));				// r2=iDfcQ
       
   713 	asm("mov r3, #1 ");
       
   714 	asm("dfc_enque_1: ");
       
   715 	asm("ldr r1, [r2], #%a0" : : "i" _FOFF(TDfcQue,iQueue));			// r1=present mask, r2 points to first queue
       
   716 	asm("strb r3, [r0, #%a0]" : : "i" _FOFF(TDfc,iOnFinalQ));			// set flag to show DFC on final queue
       
   717 	asm("tst r1, r3, lsl r12 ");			// test bit in present mask
       
   718 	asm("ldrne r1, [r2, r12, lsl #2] ");	// if not originally empty, r1->first
       
   719 	asm("orreq r1, r1, r3, lsl r12 ");		// if bit clear, set it
       
   720 	asm("streq r1, [r2, #%a0]" : : "i" (_FOFF(TDfcQue,iPresent)-_FOFF(TDfcQue,iQueue)));	// if bit originally clear update present mask
       
   721 	asm("ldrne r3, [r1, #4] ");				// if not originally empty, r3->last
       
   722 	asm("streq r0, [r2, r12, lsl #2] ");	// if queue originally empty, iQueue[p]=this
       
   723 	asm("streq r0, [r0, #0] ");				// this->next=this
       
   724 	asm("ldr r2, [r2, #%a0]" : : "i" (_FOFF(TDfcQue,iThread)-_FOFF(TDfcQue,iQueue)));	// r2=iDfcQ->iThread
       
   725 	asm("stmneia r0, {r1,r3} ");			// this->next=first, this->prev=last
       
   726 	asm("streq r0, [r0, #4] ");				// this->prev=this
       
   727 	asm("ldrb r12, [r2, #%a0]" : : "i" _FOFF(NThreadBase,iNState));	// r2=thread NState
       
   728 	asm("strne r0, [r1, #4] ");				// first->prev=this
       
   729 	asm("strne r0, [r3, #0] ");				// last->next=this
       
   730 	asm("cmp r12, #%a0" : : "i" ((TInt)NThreadBase::EWaitDfc));		// check for EWaitDfc
       
   731 	asm("mov r0, r2 ");						// r0->thread
       
   732 	asm("beq check_suspend_then_ready ");	// if it is, release thread
       
   733 	__JUMP(,lr);							// else we are finished - NOTE R0=thread ptr != 0
       
   734 
       
   735 	asm("queue_dfcs_0: ");
       
   736 #ifdef BTRACE_CPU_USAGE
       
   737 	asm("ldrb r1, [r5, #%a0]" : : "i" (_FOFF(TScheduler,iCpuUsageFilter)-_FOFF(TScheduler,iDfcs)));
       
   738 	asm("strb r2, [r5, #%a0]" : : "i" (_FOFF(TScheduler,iDfcPendingFlag)-_FOFF(TScheduler,iDfcs)));
       
   739 	asm("strb r2, [r5, #%a0]" : : "i" (_FOFF(TScheduler,iInIDFC)-_FOFF(TScheduler,iDfcs)));
       
   740 	asm("cmp r1, #0");
       
   741 	asm("blne idfc_end_trace");
       
   742 #else
       
   743 	asm("strb r2, [r0, #%a0]" : : "i" (_FOFF(TScheduler,iDfcPendingFlag)-_FOFF(TScheduler,iDfcs)));
       
   744 	asm("strb r2, [r0, #%a0]" : : "i" (_FOFF(TScheduler,iInIDFC)-_FOFF(TScheduler,iDfcs)));
       
   745 #endif
       
   746 	asm("sub r0, r5, #%a0" : : "i" _FOFF(TScheduler,iDfcs));	// restore r0
       
   747 	asm("mov sp, r11 ");					// retrieve stack pointer before alignment
       
   748 	asm("ldmfd sp!, {r2,r5,r11,pc} ");
       
   749 
       
   750 	asm("do_immediate_dfc: ");
       
   751 	ASM_KILL_LINK(r0,r1);
       
   752 	asm("mov r1, #0x000000ff ");			// pri=0xff (IDFC), spare1=0 (unused), spare2=0 (iOnFinalQ), spare3=0 (iQueued)
       
   753 	asm("str r1, [r0, #%a0]!" : : "i" _FOFF(TDfc,iPriority));	// dfc->iQueued=FALSE, r0->iPriority
       
   754 	asm("ldmib r0, {r0,r1} ");				// r0 = DFC parameter, r1 = DFC function pointer
       
   755 	asm("bic sp, sp, #4 ");					// align stack
       
   756 	__JUMP(,r1);							// call DFC, return to queue_dfcs_1
       
   757 
       
   758 #ifdef BTRACE_CPU_USAGE
       
   759 	asm("idfc_start_trace_header:");
       
   760 	asm(".word %a0" : : "i" ((TInt)(4<<BTrace::ESizeIndex) + (BTrace::ECpuUsage<<BTrace::ECategoryIndex*8) + (BTrace::EIDFCStart<<BTrace::ESubCategoryIndex*8)) );
       
   761 	asm("idfc_end_trace_header:");
       
   762 	asm(".word %a0" : : "i" ((TInt)(4<<BTrace::ESizeIndex) + (BTrace::ECpuUsage<<BTrace::ECategoryIndex*8) + (BTrace::EIDFCEnd<<BTrace::ESubCategoryIndex*8)) );
       
   763 
       
   764 	asm("idfc_start_trace:");
       
   765 	asm("ldr r1, [r0,#%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler));
       
   766 	asm("ldr r0, idfc_start_trace_header" );
       
   767 	__JUMP(,r1);
       
   768 
       
   769 	asm("idfc_end_trace:");
       
   770 	asm("ldr r0, idfc_end_trace_header" );
       
   771 	asm("ldr pc, [r5,#%a0]" : : "i" (_FOFF(TScheduler,iBTraceHandler)-_FOFF(TScheduler,iDfcs)));
       
   772 #endif
       
   773 
       
   774 	}
       
   775 #endif
       
   776 
       
   777 #ifdef __DFC_MACHINE_CODED__
       
   778 
       
   779 /** Queues an IDFC or a DFC from an ISR.
       
   780 
       
   781 	This function is the only way to queue an IDFC and is the only way to queue
       
   782 	a DFC from an ISR. To queue a DFC from an IDFC or a thread either Enque()
       
   783 	or DoEnque() should be used.
       
   784 
       
   785 	This function does nothing if the IDFC/DFC is already queued.
       
   786 
       
   787 	@pre Call only from ISR, IDFC or thread with the kernel locked.
       
   788 	@pre Do not call from thread with the kernel unlocked.
       
   789 	@return	TRUE if DFC was actually queued by this call
       
   790 			FALSE if DFC was already queued on entry so this call did nothing
       
   791 	
       
   792 	@see TDfc::DoEnque()
       
   793 	@see TDfc::Enque()
       
   794  */
       
   795 __NAKED__ EXPORT_C TBool TDfc::Add()
       
   796 	{
       
   797 	ASM_CHECK_PRECONDITIONS(MASK_NO_RESCHED);
       
   798 #ifdef _DEBUG
       
   799 	asm("ldrb r2, [r0, #%a0]" : : "i" _FOFF(TDfc,iPriority));
       
   800 	asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TDfc,iDfcQ));
       
   801 	asm("cmp r2, #%a0" : : "i" ((TInt)KNumDfcPriorities));
       
   802 	asm("bhs 1f ");
       
   803 	asm("cmp r1, #0 ");
       
   804 	asm("bne 1f ");
       
   805 	ASM_CHECK_PRECONDITIONS(MASK_ALWAYS_FAIL);
       
   806 	asm("1: ");
       
   807 #endif
       
   808 	// Fall through to TDfc::RawAdd() ...
       
   809 	}
       
   810 
       
   811 /** Queue an IDFC or a DFC.
       
   812 
       
   813 	This function is identical to TDfc::Add() but no checks are performed for correct usage,
       
   814 	and it contains no instrumentation code.
       
   815 
       
   816 	@return	TRUE if DFC was actually queued by this call
       
   817 			FALSE if DFC was already queued on entry so this call did nothing
       
   818 	@see TDfc::DoEnque()
       
   819 	@see TDfc::Enque()
       
   820 	@see TDfc::Add()
       
   821 */
       
   822 __NAKED__ EXPORT_C TBool TDfc::RawAdd()
       
   823 	{
       
   824 
       
   825 #if defined(__CPU_ARM_HAS_LDREX_STREX_V6K)
       
   826 /* Optimize with LDREXB/STREXB */
       
   827 
       
   828 	asm("add r2, r0, #%a0" : : "i" _FOFF(TDfc, iQueued));	// r2=&iQueued's byte offset 
       
   829 	asm("mov r12, #1 ");									// r12=TRUE
       
   830 
       
   831 	asm("tryagain:	");
       
   832 	LDREXB(3,2);								// r3 = already iQueued
       
   833 	STREXB(1,12,2); 							// Try setting iQueued = TRUE 
       
   834 	asm("teq   r1, #0 ");						// Exclusive write succeeded?
       
   835 	asm("bne   tryagain ");						// No - retry until it does 
       
   836 
       
   837 #elif defined(__CPU_ARM_HAS_LDREX_STREX)
       
   838 /* Implement with LDREX/STREX and shifts */
       
   839 
       
   840 #define IQUEUED_WORD (_FOFF(TDfc, iQueued) & ~3)				// offset of word containing iQueued
       
   841 #define IQUEUED_SHIFT ((_FOFF(TDfc, iQueued) & 3) * 8)			// bit position of byte within word
       
   842 
       
   843 	asm("add r2, r0, #%a0" : : "i" IQUEUED_WORD);				// r2=&iQueued's word
       
   844 
       
   845 	asm("tryagain:	");
       
   846 	LDREX(3, 2);
       
   847 	asm("bic   r12, r3, #%a0" : : "i" ((TInt)0xff<<IQUEUED_SHIFT));	// clear the bits to write to
       
   848 	asm("orr   r12, r12, #%a0" : : "i" ((TInt)0x01<<IQUEUED_SHIFT));	// &iQueued = TRUE;
       
   849 	STREX(1, 12, 2);
       
   850 	asm("teq   r1, #0 ");
       
   851 	asm("bne   tryagain ");
       
   852 	asm("and r3, r3, #%a0" : : "i" ((TInt)0xff<<IQUEUED_SHIFT));		// mask out unwanted bits
       
   853 #else
       
   854 	asm("mov r12, #1 ");										// r12=TRUE
       
   855 	asm("add r2, r0, #%a0" : : "i" _FOFF(TDfc, iQueued));		// r2=&iQueued
       
   856 	asm("swpb r3, r12, [r2] ");									// ATOMIC {r3=iQueued; iQueued=TRUE}
       
   857 #endif
       
   858 
       
   859 	asm("ldr r1, __PendingDfcQueue ");		// r1 points to DFC pending queue
       
   860 
       
   861 	asm("cmp r3, #0 ");						// check if already queued
       
   862 	asm("addeq r3, r1, #4 ");				// if not r3=&TheScheduler.iDfcs.iPrev ...
       
   863 	asm("streq r1, [r0, #0] ");				// ...iNext=&TheScheduler.iDfcs ...
       
   864 
       
   865 #ifdef __CPU_ARM_HAS_LDREX_STREX
       
   866 	asm("movne r0, #0 ");
       
   867 	asm("bne dontswap ");									// easier this way
       
   868 	asm("try2:	");
       
   869 	LDREX(2, 3);							// read
       
   870 	STREX(12, 0, 3);						// write
       
   871 	asm("teq   r12, #0 ");					// success? also restore eq
       
   872 	asm("bne   try2 ");						// no!
       
   873 	asm("mov   r12, #1");
       
   874 #else
       
   875 	asm("swpeq r2, r0, [r3] ");				// ...ATOMIC {r2=last; last=this} ...
       
   876 #endif
       
   877 
       
   878 	asm("streqb r12, [r1, #%a0]" : : "i" (_FOFF(TScheduler,iDfcPendingFlag)-_FOFF(TScheduler,iDfcs)));
       
   879 	asm("streq r0, [r2, #0] ");				// ...old last->iNext=this ...
       
   880 	asm("streq r2, [r0, #4]	");				// ...iPrev=old last
       
   881 
       
   882 	// NOTE: R0=this != 0
       
   883 
       
   884 	asm("dontswap: ");
       
   885 	__JUMP(,lr);
       
   886 
       
   887 	asm("__PendingDfcQueue: ");
       
   888 	asm(".word %a0" : : "i" ((TInt)&TheScheduler.iDfcs));
       
   889 	}
       
   890 
       
   891 
       
   892 /** Queues a DFC (not an IDFC) from an IDFC or thread with preemption disabled.
       
   893 
       
   894 	This function is the preferred way to queue a DFC from an IDFC. It should not
       
   895 	be used to queue an IDFC - use TDfc::Add() for this.
       
   896 
       
   897 	This function does nothing if the DFC is already queued.
       
   898 
       
   899 	@pre Call only from IDFC or thread with the kernel locked.
       
   900 	@pre Do not call from ISR or thread with the kernel unlocked.
       
   901 	@return	TRUE if DFC was actually queued by this call
       
   902 			FALSE if DFC was already queued on entry so this call did nothing
       
   903 
       
   904 	@see TDfc::Add()
       
   905 	@see TDfc::Enque()
       
   906  */
       
   907 __NAKED__ EXPORT_C TBool TDfc::DoEnque()
       
   908 	{
       
   909 	ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR|MASK_NO_RESCHED);
       
   910 #ifdef _DEBUG
       
   911 	asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TDfc,iDfcQ));
       
   912 	asm("cmp r1, #0 ");
       
   913 	asm("bne 1f ");
       
   914 	ASM_CHECK_PRECONDITIONS(MASK_ALWAYS_FAIL);
       
   915 	asm("1: ");
       
   916 #endif
       
   917 
       
   918 #if defined(__CPU_ARM_HAS_LDREX_STREX_V6K)
       
   919 	asm("add r2, r0, #%a0" : : "i" _FOFF(TDfc, iQueued));	// r2=&iQueued's byte offset 
       
   920 	asm("mov r3, #1 ");
       
   921 
       
   922 	asm("tryagain8:	");
       
   923 				LDREXB(1, 2); 				// r1 = iQueued	
       
   924 				STREXB(12, 3, 2); 			// Try setting iQueued = True	
       
   925 	asm("		teq   r12, #1 ");			// worked?
       
   926 	asm("		beq   tryagain8 ");			// nope
       
   927 											// r3 = 1, r1 = old iQueued
       
   928 #elif defined(__CPU_ARM_HAS_LDREX_STREX)
       
   929 	asm("		add   r0, r0, #8 ");		// align address (struct always aligned)
       
   930 	asm("tryagain8:	");
       
   931 				LDREX(2, 0);						// do the load/store half
       
   932 	asm("		bic   r12, r2, #0xff000000 ");		// knock out unwanted bits
       
   933 	asm("		orr   r12, r12, #0x01000000 ");		// 'looking' value
       
   934 				STREX(1, 12, 0);				// write looking value
       
   935 	asm("		teq   r1, #1 ");				// worked?
       
   936 	asm("		beq   tryagain8 ");				// nope
       
   937 	asm("		mov   r1, r2, lsr #24 ");		// extract previous value byte
       
   938 	asm("		sub   r0, r0, #8 ");			// restore base pointer
       
   939 	asm("		mov   r3, #1 ");				// dfc_enque_1 expects r3 = 1
       
   940 #else
       
   941 	asm("add r12, r0, #11 ");				// r12=&iQueued
       
   942 	asm("mov r3, #1 ");
       
   943 	asm("swpb r1, r3, [r12] ");				// ATOMIC {r1=iQueued; iQueued=TRUE}
       
   944 #endif
       
   945 
       
   946 	asm("ldrb r12, [r0, #8] ");				// r12=iPriority
       
   947 	asm("ldr r2, [r0, #20] ");				// r2=iDfcQ
       
   948 	asm("cmp r1, #0 ");						// check if queued
       
   949 	asm("beq dfc_enque_1 ");				// if not, queue it and return with R0 nonzero
       
   950 	asm("mov r0, #0 ");
       
   951 	__JUMP(,lr);
       
   952 	}
       
   953 #endif 
       
   954 
       
   955 #ifdef __FAST_MUTEX_MACHINE_CODED__
       
   956 
       
   957 __ASSERT_COMPILE(_FOFF(NFastMutex,iHoldingThread) == 0);
       
   958 
       
   959 /** Releases a previously acquired fast mutex.
       
   960 	
       
   961 	Generally threads would use NKern::FMSignal() which manipulates the kernel lock
       
   962 	for you.
       
   963 	
       
   964 	@pre The calling thread must hold the mutex.
       
   965 	@pre Kernel must be locked.
       
   966 
       
   967 	@post Kernel is locked.
       
   968 	
       
   969 	@see NFastMutex::Wait()
       
   970 	@see NKern::FMSignal()
       
   971 */
       
   972 EXPORT_C __NAKED__ void NFastMutex::Signal()
       
   973 	{
       
   974 	// NOTE: STACK ALIGNMENT UNKNOWN ON ENTRY
       
   975 	ASM_DEBUG1(FMSignal,r0);
       
   976 	asm("ldr r2, __TheScheduler ");
       
   977 #ifdef BTRACE_FAST_MUTEX
       
   978 	asm("ldrb r1, [r2,#%a0]" : : "i" _FOFF(TScheduler,iFastMutexFilter));
       
   979 	asm("cmp r1, #0");
       
   980 	asm("bne fastmutex_signal_trace");
       
   981 	asm("no_fastmutex_signal_trace:");
       
   982 #endif
       
   983 	asm("mov r12, #0 ");
       
   984 	asm("str r12, [r0], #%a0" : : "i" _FOFF(NFastMutex,iWaiting));		// iHoldingThread=NULL, r0->iWaiting
       
   985 	asm("ldr r1, [r2, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread));	// r1=iCurrentThread
       
   986 	asm("ldr r3, [r0] ");				// r3=iWaiting
       
   987 	asm("str r12, [r0] ");				// iWaiting=FALSE
       
   988 	asm("str r12, [r1, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex));	// current thread->iHeldFastMutex=NULL
       
   989 	asm("cmp r3, #0 ");					// check waiting flag
       
   990 	asm("bne 2f ");
       
   991 	asm("1: ");
       
   992 	__JUMP(,lr);						// if clear, finished
       
   993 	asm("2: ");
       
   994 	asm("ldr r12, [r1, #%a0]" : : "i" _FOFF(NThread,iCsFunction));
       
   995 	asm("strb r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag));	// Assumes iWaiting!=0 mod 256
       
   996 	asm("cmp r12, #0 ");				// check for outstanding CS function
       
   997 	asm("beq 1b ");						// if none, finished
       
   998 	asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(NThread,iCsCount));	// else check CS count
       
   999 	asm("mov r0, r1 ");
       
  1000 	asm("cmp r2, #0 ");
       
  1001 	__JUMP(ne,lr);						// if nonzero, finished
       
  1002 	asm("DoDoCsFunction: ");
       
  1003 	asm("stmfd sp!, {r11,lr} ");
       
  1004 	asm("mov r11, sp ");
       
  1005 	asm("bic sp, sp, #4 ");
       
  1006 	asm("bl  " CSM_ZN11NThreadBase12DoCsFunctionEv);	// if iCsCount=0, DoCsFunction()
       
  1007 	asm("mov sp, r11 ");
       
  1008 	asm("ldmfd sp!, {r11,pc} ");
       
  1009 
       
  1010 #ifdef BTRACE_FAST_MUTEX
       
  1011 	asm("fastmutex_signal_trace:");
       
  1012 	ALIGN_STACK_START;
       
  1013 	asm("stmdb sp!, {r0-r2,lr}"); // 4th item on stack is PC value for trace
       
  1014 	asm("bl fmsignal_lock_trace_unlock");
       
  1015 	asm("ldmia sp!, {r0-r2,lr}");
       
  1016 	ALIGN_STACK_END;
       
  1017 	asm("b no_fastmutex_signal_trace");
       
  1018 #endif
       
  1019 	}
       
  1020 
       
  1021 
       
  1022 /** Acquires the fast mutex.
       
  1023 
       
  1024     This will block until the mutex is available, and causes
       
  1025 	the thread to enter an implicit critical section until the mutex is released.
       
  1026 
       
  1027 	Generally threads would use NKern::FMWait() which manipulates the kernel lock
       
  1028 	for you.
       
  1029 	
       
  1030 	@pre Kernel must be locked, with lock count 1.
       
  1031 	
       
  1032 	@post Kernel is locked, with lock count 1.
       
  1033 	@post The calling thread holds the mutex.
       
  1034 	
       
  1035 	@see NFastMutex::Signal()
       
  1036 	@see NKern::FMWait()
       
  1037 */
       
  1038 EXPORT_C __NAKED__ void NFastMutex::Wait()
       
  1039 	{
       
  1040 	// NOTE: STACK ALIGNMENT UNKNOWN ON ENTRY
       
  1041 	ASM_DEBUG1(FMWait,r0);
       
  1042 	asm("ldr r2, __TheScheduler ");
       
  1043 	asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(NFastMutex,iHoldingThread));	// r3=iHoldingThread
       
  1044 	asm("ldr r1, [r2, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread));	// r1=iCurrentThread
       
  1045 	asm("cmp r3, #0 ");					// check if mutex held
       
  1046 	asm("bne fastmutex_wait_block ");
       
  1047 	asm("str r1, [r0, #%a0]" : : "i" _FOFF(NFastMutex,iHoldingThread));	// if not, iHoldingThread=current thread
       
  1048 	asm("str r0, [r1, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex));	// and current thread->iHeldFastMutex=this
       
  1049 #ifdef BTRACE_FAST_MUTEX
       
  1050 	asm("ldrb r12, [r2,#%a0]" : : "i" _FOFF(TScheduler,iFastMutexFilter));
       
  1051 	asm("cmp r12, #0");
       
  1052 	asm("bne fmwait_trace2");
       
  1053 #endif
       
  1054 	__JUMP(,lr);						// and we're done
       
  1055 	asm("fastmutex_wait_block:"); 
       
  1056 	asm("str lr, [sp, #-4]! ");			// We must wait - save return address
       
  1057 	asm("mov r12, #1 ");
       
  1058 	asm("str r12, [r0, #%a0]" : : "i" _FOFF(NFastMutex,iWaiting));		// iWaiting=TRUE
       
  1059 	asm("str r0, [r1, #%a0]" : : "i" _FOFF(NThread,iWaitFastMutex));	// current thread->iWaitFastMutex=this
       
  1060 	asm("mov r0, r3 ");					// parameter for YieldTo
       
  1061 	ASM_DEBUG1(FMWaitYield,r0);
       
  1062 	asm("bl  " CSM_ZN10TScheduler7YieldToEP11NThreadBase);	// yield to the mutex holding thread
       
  1063 	// will not return until the mutex is free
       
  1064 	// on return r0=Scheduler,r1=0,r2!=0,r3=current thread, kernel unlocked, interrupts disabled
       
  1065 	asm("mov r12, #1 ");
       
  1066 	asm("str r12, [r0, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));	// lock the kernel
       
  1067 	SET_INTS(r12, MODE_SVC, INTS_ALL_ON);	// reenable interrupts
       
  1068 	asm("ldr r2, [r3, #%a0]" : : "i" _FOFF(NThread,iWaitFastMutex));	// r2=this
       
  1069 	asm("str r1, [r3, #%a0]" : : "i" _FOFF(NThread,iWaitFastMutex));	// iWaitFastMutex=NULL
       
  1070 	asm("str r3, [r2, #0] ");			// iHoldingThread=current thread
       
  1071 	asm("str r2, [r3, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex));	// current thread->iHeldFastMutex=this
       
  1072 #ifdef BTRACE_FAST_MUTEX
       
  1073 	asm("ldrb r12, [r0,#%a0]" : : "i" _FOFF(TScheduler,iFastMutexFilter));
       
  1074 	asm("cmp r12, #0");
       
  1075 	asm("bne fastmutex_wait_trace2"); 
       
  1076 #endif
       
  1077 	asm("ldr pc, [sp], #4 ");
       
  1078 	
       
  1079 #ifdef BTRACE_FAST_MUTEX
       
  1080 	asm("fastmutex_wait_trace2:");
       
  1081 	// r0=scheduler r2=mutex r3=thread 
       
  1082 	asm("ldr lr, [sp], #4 ");
       
  1083 	ALIGN_STACK_START;
       
  1084 	asm("stmdb sp!, {r0-r2,lr}"); // 4th item on stack is PC value for trace
       
  1085 	asm("bl fmwait_lockacquiredwait_trace");
       
  1086 	asm("ldmia sp!, {r0-r2,lr}");
       
  1087 	ALIGN_STACK_END;
       
  1088 	__JUMP(,lr);
       
  1089 #endif
       
  1090 	}
       
  1091 
       
  1092 
       
  1093 /** Releases the System Lock.
       
  1094 
       
  1095 	@pre System lock must be held.
       
  1096 
       
  1097 	@see NKern::LockSystem()	
       
  1098 	@see NKern::FMSignal()
       
  1099 */
       
  1100 EXPORT_C __NAKED__ void NKern::UnlockSystem()
       
  1101 	{
       
  1102 	// NOTE: STACK ALIGNMENT UNKNOWN ON ENTRY
       
  1103 	ASM_CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED);
       
  1104 	asm("ldr r0, __SystemLock ");
       
  1105 	}
       
  1106 
       
  1107 
       
  1108 /** Releases a previously acquired fast mutex.
       
  1109 	
       
  1110 	@param aMutex The fast mutex to be released.
       
  1111 	
       
  1112 	@pre The calling thread must hold the mutex.
       
  1113 	
       
  1114 	@see NFastMutex::Signal()
       
  1115 	@see NKern::FMWait()
       
  1116 */
       
  1117 EXPORT_C __NAKED__ void NKern::FMSignal(NFastMutex*)
       
  1118 	{
       
  1119 	// NOTE: STACK ALIGNMENT UNKNOWN ON ENTRY
       
  1120 	ASM_DEBUG1(NKFMSignal,r0);	
       
  1121 
       
  1122 	asm("ldr r2, __TheScheduler ");
       
  1123 #ifdef BTRACE_FAST_MUTEX
       
  1124 	asm("ldrb r1, [r2,#%a0]" : : "i" _FOFF(TScheduler,iFastMutexFilter));
       
  1125 	asm("cmp r1, #0");
       
  1126 	asm("bne fmsignal_trace1");
       
  1127 	asm("no_fmsignal_trace1:");
       
  1128 #endif
       
  1129 
       
  1130 #ifdef __CPU_ARM_HAS_CPS
       
  1131 	asm("mov r12, #0 ");
       
  1132 	CPSIDIF;							// disable interrupts
       
  1133 	asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(NFastMutex,iWaiting));		// r3=iWaiting
       
  1134 	asm("ldr r1, [r2, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread));	// r1=iCurrentThread
       
  1135 	asm("str r12, [r0, #%a0]" : : "i" _FOFF(NFastMutex,iHoldingThread));	// iHoldingThread=NULL
       
  1136 	asm("cmp r3, #0 ");					// check waiting flag
       
  1137 	asm("str r12, [r0, #%a0]" : : "i" _FOFF(NFastMutex,iWaiting));		// iWaiting=FALSE
       
  1138 	asm("str r12, [r1, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex));	// current thread->iHeldFastMutex=NULL
       
  1139 	asm("bne 1f ");
       
  1140 	CPSIEIF;							// reenable interrupts
       
  1141 	__JUMP(,lr);						// if clear, finished
       
  1142 	asm("1: ");
       
  1143 	asm("str r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));	// lock the kernel if set (assumes iWaiting always 0 or 1)
       
  1144 	CPSIEIF;							// reenable interrupts
       
  1145 #else
       
  1146 	SET_INTS_1(r3, MODE_SVC, INTS_ALL_OFF);
       
  1147 	asm("mov r12, #0 ");
       
  1148 	asm("ldr r1, [r2, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread));	// r1=iCurrentThread
       
  1149 	SET_INTS_2(r3, MODE_SVC, INTS_ALL_OFF);	// disable interrupts
       
  1150 	asm("str r12, [r0], #%a0" : : "i" _FOFF(NFastMutex,iWaiting));		// iHoldingThread=NULL, r0->iWaiting
       
  1151 	asm("ldr r3, [r0] ");				// r3=iWaiting
       
  1152 	asm("str r12, [r0] ");				// iWaiting=FALSE
       
  1153 	asm("str r12, [r1, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex));	// current thread->iHeldFastMutex=NULL
       
  1154 	asm("mov r12, #0x13 ");
       
  1155 	asm("cmp r3, #0 ");					// check waiting flag
       
  1156 	__MSR_CPSR_C(eq, r12);			// if clear, finished
       
  1157 	__JUMP(eq,lr);
       
  1158 	asm("str r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));	// lock the kernel (assumes iWaiting always 0 or 1)
       
  1159 	asm("msr cpsr_c, r12 ");				// reenable interrupts
       
  1160 #endif	
       
  1161 	asm("strb r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag));
       
  1162 	asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(NThread,iCsFunction));		// r3=current thread->iCsFunction
       
  1163 	asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(NThread,iCsCount));			// r2=current thread->iCsCount
       
  1164 	asm("str lr, [sp, #-4]! ");
       
  1165 	asm("cmp r3, #0 ");					// outstanding CS function?
       
  1166 	asm("beq 2f ");						// branch if not
       
  1167 	asm("cmp r2, #0 ");					// iCsCount!=0 ?
       
  1168 	asm("moveq r0, r1 ");				// if iCsCount=0, DoCsFunction()
       
  1169 	asm("bleq DoDoCsFunction ");
       
  1170 	asm("2: ");
       
  1171 	asm("bl  " CSM_ZN10TScheduler10RescheduleEv);	// reschedule to allow waiting thread in
       
  1172 	SET_INTS(r12, MODE_SVC, INTS_ALL_ON);			// reenable interrupts after reschedule
       
  1173 	asm("ldr pc, [sp], #4 ");
       
  1174 
       
  1175 #ifdef BTRACE_FAST_MUTEX
       
  1176 	asm("fmsignal_trace1:");
       
  1177 	ALIGN_STACK_START;
       
  1178 	asm("stmdb sp!, {r0-r2,lr}"); // 4th item on stack is PC value for trace
       
  1179 	asm("bl fmsignal_lock_trace_unlock");
       
  1180 	asm("ldmia sp!, {r0-r2,lr}");
       
  1181 	ALIGN_STACK_END;
       
  1182 	asm("b no_fmsignal_trace1");
       
  1183 #endif
       
  1184 	}
       
  1185 
       
  1186 
       
  1187 /** Acquires the System Lock.
       
  1188 
       
  1189     This will block until the mutex is available, and causes
       
  1190 	the thread to enter an implicit critical section until the mutex is released.
       
  1191 
       
  1192 	@post System lock is held.
       
  1193 
       
  1194 	@see NKern::UnlockSystem()
       
  1195 	@see NKern::FMWait()
       
  1196 
       
  1197 	@pre	No fast mutex can be held.
       
  1198 	@pre	Kernel must be unlocked.
       
  1199 	@pre	Call in a thread context.
       
  1200 	@pre	Interrupts must be enabled.
       
  1201 */
       
  1202 EXPORT_C __NAKED__ void NKern::LockSystem()
       
  1203 	{
       
  1204 	ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_UNLOCKED|MASK_NO_FAST_MUTEX|MASK_NOT_ISR|MASK_NOT_IDFC);
       
  1205 	// NOTE: STACK ALIGNMENT UNKNOWN ON ENTRY
       
  1206 	asm("ldr r0, __SystemLock ");
       
  1207 	}
       
  1208 
       
  1209 
       
  1210 /** Acquires a fast mutex.
       
  1211 
       
  1212     This will block until the mutex is available, and causes
       
  1213 	the thread to enter an implicit critical section until the mutex is released.
       
  1214 
       
  1215 	@param aMutex The fast mutex to be acquired.
       
  1216 	
       
  1217 	@post The calling thread holds the mutex.
       
  1218 	
       
  1219 	@see NFastMutex::Wait()
       
  1220 	@see NKern::FMSignal()
       
  1221 
       
  1222 	@pre	No fast mutex can be held.
       
  1223 	@pre	Kernel must be unlocked.
       
  1224 	@pre	Call in a thread context.
       
  1225 	@pre	Interrupts must be enabled.
       
  1226 */
       
  1227 EXPORT_C __NAKED__ void NKern::FMWait(NFastMutex*)
       
  1228 	{
       
  1229 	ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_UNLOCKED|MASK_NO_FAST_MUTEX|MASK_NOT_ISR|MASK_NOT_IDFC);
       
  1230 	// NOTE: STACK ALIGNMENT UNKNOWN ON ENTRY
       
  1231 	ASM_DEBUG1(NKFMWait,r0);
       
  1232 	asm("ldr r2, __TheScheduler ");
       
  1233 
       
  1234 #ifdef __CPU_ARM_HAS_CPS
       
  1235 	CPSIDIF;							// disable interrupts
       
  1236 	asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(NFastMutex,iHoldingThread));	// r3=iHoldingThread
       
  1237 	asm("ldr r1, [r2, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread));	// r1=iCurrentThread
       
  1238 	asm("cmp r3, #0 ");					// check if mutex held
       
  1239 	asm("bne 1f");
       
  1240 	asm("str r1, [r0, #%a0]" : : "i" _FOFF(NFastMutex,iHoldingThread));	// iHoldingThread=current thread
       
  1241 	asm("str r0, [r1, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex));	// and current thread->iHeldFastMutex=this
       
  1242 	CPSIEIF;							// reenable interrupts
       
  1243 #ifdef BTRACE_FAST_MUTEX
       
  1244 	asm("ldrb r12, [r2,#%a0]" : : "i" _FOFF(TScheduler,iFastMutexFilter));
       
  1245 	asm("cmp r12, #0");
       
  1246 	asm("bne fmwait_trace2");
       
  1247 #endif	
       
  1248 	__JUMP(,lr);						// we're finished
       
  1249 	asm("1: ");
       
  1250 	asm("mov r3, #1 ");	
       
  1251 	asm("str r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));	// mutex held, so lock the kernel
       
  1252 	CPSIEIF;							// reenable interrupts
       
  1253 #else
       
  1254 	asm("mov r3, #0xd3 ");
       
  1255 	asm("ldr r1, [r2, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread));	// r1=iCurrentThread
       
  1256 	asm("msr cpsr, r3 ");				// disable interrupts
       
  1257 	asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(NFastMutex,iHoldingThread));	// r3=iHoldingThread
       
  1258 	asm("mov r12, #0x13 ");
       
  1259 	asm("cmp r3, #0");					// check if mutex held
       
  1260 	asm("streq r1, [r0, #%a0]" : : "i" _FOFF(NFastMutex,iHoldingThread));	// if not, iHoldingThread=current thread
       
  1261 	asm("streq r0, [r1, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex));	// and current thread->iHeldFastMutex=this
       
  1262 	__MSR_CPSR_C(eq, r12);		// and we're finished
       
  1263 #ifdef BTRACE_FAST_MUTEX
       
  1264 	asm("bne no_fmwait_trace2");
       
  1265 	asm("ldrb r12, [r2,#%a0]" : : "i" _FOFF(TScheduler,iFastMutexFilter));
       
  1266 	asm("cmp r12, #0");
       
  1267 	asm("bne fmwait_trace2");
       
  1268 	__JUMP(,lr);
       
  1269 	asm("no_fmwait_trace2:");
       
  1270 #endif	
       
  1271 	__JUMP(eq,lr);
       
  1272 	asm("mov r3, #1 ");
       
  1273 	asm("str r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));	// mutex held, so lock the kernel
       
  1274 	asm("msr cpsr_c, r12 ");				// and reenable interrupts
       
  1275 #endif
       
  1276 	asm("str lr, [sp, #-4]! ");
       
  1277 	asm("str r3, [r0, #4] ");			// iWaiting=TRUE
       
  1278 	asm("str r0, [r1, #%a0]" : : "i" _FOFF(NThread,iWaitFastMutex));	// current thread->iWaitFastMutex=this
       
  1279 	asm("ldr r0, [r0, #0] ");			// parameter for YieldTo
       
  1280 	ASM_DEBUG1(NKFMWaitYield,r0);
       
  1281 	asm("bl  " CSM_ZN10TScheduler7YieldToEP11NThreadBase);		// yield to the mutex holding thread
       
  1282 	// will not return until the mutex is free
       
  1283 	// on return r0=Scheduler,r1=0,r2!=0,r3=current thread, kernel unlocked, interrupts disabled
       
  1284 	asm("ldr r2, [r3, #%a0]" : : "i" _FOFF(NThread,iWaitFastMutex));	// r2=this
       
  1285 	asm("ldr lr, [sp], #4 ");
       
  1286 	asm("str r1, [r3, #%a0]" : : "i" _FOFF(NThread,iWaitFastMutex));	// iWaitFastMutex=NULL
       
  1287 	asm("str r2, [r3, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex));	// current thread->iHeldFastMutex=this
       
  1288 	asm("str r3, [r2, #0] ");			// iHoldingThread=current thread
       
  1289 	SET_INTS(r12, MODE_SVC, INTS_ALL_ON);
       
  1290 #ifdef BTRACE_FAST_MUTEX
       
  1291 	asm("ldrb r12, [r0,#%a0]" : : "i" _FOFF(TScheduler,iFastMutexFilter));
       
  1292 	asm("cmp r12, #0");
       
  1293 	asm("bne fmwait_trace3"); 
       
  1294 #endif
       
  1295 	__JUMP(,lr);
       
  1296 
       
  1297 #ifdef BTRACE_FAST_MUTEX
       
  1298 	asm("fmwait_trace2:");
       
  1299 	// r0=mutex r1=thread r2=scheduler
       
  1300 	ALIGN_STACK_START;
       
  1301 	asm("stmdb sp!, {r0-r2,lr}"); // 4th item on stack is PC value for trace
       
  1302 	asm("bl fmwait_lockacquiredwait_trace2");
       
  1303 	asm("ldmia sp!, {r0-r2,lr}");
       
  1304 	ALIGN_STACK_END;
       
  1305 	__JUMP(,lr);
       
  1306 	
       
  1307 	asm("fmwait_trace3:");
       
  1308 	// r0=scheduler r2=mutex r3=thread 
       
  1309 	ALIGN_STACK_START;
       
  1310 	asm("stmdb sp!, {r0-r2,lr}"); // 4th item on stack is PC value for trace
       
  1311 	asm("bl fmwait_lockacquiredwait_trace");
       
  1312 	asm("ldmia sp!, {r0-r2,lr}");
       
  1313 	ALIGN_STACK_END;
       
  1314 	__JUMP(,lr);
       
  1315 #endif
       
  1316 	}
       
  1317 #endif
       
  1318 
       
  1319 __NAKED__ void TScheduler::YieldTo(NThreadBase*)
       
  1320 	{
       
  1321 	//
       
  1322 	// Enter in mode_svc with kernel locked, interrupts can be on or off
       
  1323 	// Exit in mode_svc with kernel unlocked, interrupts off
       
  1324 	// On exit r0=&TheScheduler, r1=0, r2!=0, r3=TheCurrentThread, r4-r11 unaltered
       
  1325 	// NOTE: STACK ALIGNMENT UNKNOWN ON ENTRY
       
  1326 	//
       
  1327 	asm("mrs r1, spsr ");					// r1=spsr_svc
       
  1328 	asm("mov r2, r0 ");						// r2=new thread
       
  1329 	asm("ldr r0, __TheScheduler ");			// r0 points to scheduler data
       
  1330 	asm("stmfd sp!, {r1,r4-r11,lr} ");		// store registers and return address
       
  1331 #ifdef __CPU_ARM_USE_DOMAINS
       
  1332 	asm("mrc p15, 0, r12, c3, c0, 0 ");		// r12=DACR
       
  1333 #endif
       
  1334 	asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread));	// r1=iCurrentThread
       
  1335 #ifdef __CPU_HAS_VFP
       
  1336 	VFP_FMRX(,FPEXC_REG,VFP_XREG_FPEXC);	// r10/r11=FPEXC
       
  1337 #endif
       
  1338 #ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG
       
  1339 	GET_CAR(,r11);							// r11=CAR
       
  1340 #endif
       
  1341 #ifdef __CPU_HAS_CP15_THREAD_ID_REG
       
  1342 	GET_RWRW_TID(,r9); 						// r9=Thread ID
       
  1343 #endif 
       
  1344 #ifdef __CPU_SUPPORT_THUMB2EE
       
  1345 	GET_THUMB2EE_HNDLR_BASE(,r8);			// r8=Thumb-2EE Handler Base
       
  1346 #endif
       
  1347 
       
  1348 	asm("sub sp, sp, #%a0" : : "i" (8+EXTRA_STACK_SPACE));	// make room for original thread, extras, sp_usr and lr_usr
       
  1349 
       
  1350 	// Save the sp_usr and lr_usr and only the required coprocessor registers
       
  1351 	//										Thumb-2EE 	TID		FPEXC		CAR		DACR
       
  1352 	asm("stmia sp, {" 	EXTRA_STACK_LIST(	8,			9, 		FPEXC_REG,	11, 	12) 	"r13-r14}^ ");
       
  1353 #if defined(__CPU_ARMV4) || defined(__CPU_ARMV4T) || defined(__CPU_ARMV5T)
       
  1354 	asm("nop ");	// Can't have banked register access immediately after LDM/STM user registers
       
  1355 #endif
       
  1356 	asm("str sp, [r1, #%a0]" : : "i" _FOFF(NThread,iSavedSP));	// store original thread's stack pointer
       
  1357 	asm("b switch_threads ");
       
  1358 	}
       
  1359 
       
  1360 #ifdef MONITOR_THREAD_CPU_TIME
       
  1361 
       
  1362 #ifdef HIGH_RES_TIMER_COUNTS_UP
       
  1363 #define CALC_HIGH_RES_DIFF(Rd, Rn, Rm)	asm("sub "#Rd", "#Rn", "#Rm)
       
  1364 #else
       
  1365 #define CALC_HIGH_RES_DIFF(Rd, Rn, Rm)	asm("rsb "#Rd", "#Rn", "#Rm)
       
  1366 #endif
       
  1367 
       
  1368 // Update thread cpu time counters
       
  1369 // Called just before thread switch with r2 == new thread
       
  1370 // Corrupts r3-r8, Leaves r5=current Time, r6=current thread
       
  1371 #define UPDATE_THREAD_CPU_TIME \
       
  1372 	asm("ldr r6, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); \
       
  1373 	GET_HIGH_RES_TICK_COUNT(r5); \
       
  1374 	asm("ldr r3, [r6, #%a0]" : : "i" _FOFF(NThreadBase,iLastStartTime)); \
       
  1375 	asm("str r5, [r2, #%a0]" : : "i" _FOFF(NThreadBase,iLastStartTime)); \
       
  1376 	CALC_HIGH_RES_DIFF(r4, r5, r3); \
       
  1377 	asm("add r3, r6, #%a0" : : "i" _FOFF(NThreadBase,iTotalCpuTime)); \
       
  1378 	asm("ldmia r3, {r7-r8}"); \
       
  1379 	asm("adds r7, r7, r4"); \
       
  1380 	asm("adc r8, r8, #0"); \
       
  1381 	asm("stmia r3, {r7-r8}")
       
  1382 
       
  1383 #else
       
  1384 #define UPDATE_THREAD_CPU_TIME
       
  1385 #endif
       
  1386 
       
  1387 // EMI - Schedule Logging
       
  1388 // Needs: r0=TScheduler, r2 = new thread
       
  1389 // If CPU_TIME, needs:  r5=time, r6=current thread
       
  1390 // preserve r0 r2 r9(new address space), r10(&iLock), sp. Trashes r3-r8, lr
       
  1391 
       
  1392 #ifdef __EMI_SUPPORT__
       
  1393 #define EMI_EVENTLOGGER \
       
  1394 	asm("ldr r3, [r0, #%a0]"	: : "i" _FOFF(TScheduler,iLogging)); \
       
  1395 	asm("cmp r3,#0"); \
       
  1396 	asm("blne AddTaskSwitchEvent");
       
  1397 
       
  1398 // Needs: r0=TScheduler, r2 = new thread
       
  1399 #define EMI_CHECKDFCTAG(no) \
       
  1400 	asm("ldr r3, [r0, #%a0]"	: : "i" _FOFF(TScheduler,iEmiMask)); \
       
  1401 	asm("ldr r4, [r2,#%a0]"		: : "i" _FOFF(NThread, iTag)); \
       
  1402 	asm("ands r3, r3, r4"); \
       
  1403 	asm("bne emi_add_dfc" #no); \
       
  1404 	asm("check_dfc_tag_done" #no ": ");
       
  1405 
       
  1406 #define EMI_ADDDFC(no) \
       
  1407 	asm("emi_add_dfc" #no ": "); \
       
  1408 	asm("ldr r4, [r0,#%a0]"		: : "i" _FOFF(TScheduler, iEmiDfcTrigger)); \
       
  1409 	asm("mov r5, r2"); \
       
  1410 	asm("orr r4, r3, r4");  \
       
  1411 	asm("str r4, [r0,#%a0]"		: : "i" _FOFF(TScheduler, iEmiDfcTrigger)); \
       
  1412 	asm("mov r6, r0"); \
       
  1413 	asm("ldr r0, [r0,#%a0]"		: : "i" _FOFF(TScheduler, iEmiDfc)); \
       
  1414 	asm("bl " CSM_ZN4TDfc3AddEv); \
       
  1415 	asm("mov r2, r5"); \
       
  1416 	asm("mov r0, r6"); \
       
  1417 	asm("b check_dfc_tag_done" #no);
       
  1418 
       
  1419 #else
       
  1420 #define EMI_EVENTLOGGER
       
  1421 #define EMI_CHECKDFCTAG(no)
       
  1422 #define EMI_ADDDFC(no)
       
  1423 #endif
       
  1424 
       
  1425 
       
  1426 __ASSERT_COMPILE(_FOFF(NThread,iPriority) == _FOFF(NThread,iPrev) + 4);
       
  1427 __ASSERT_COMPILE(_FOFF(NThread,i_ThrdAttr) == _FOFF(NThread,iPriority) + 2);
       
  1428 __ASSERT_COMPILE(_FOFF(NThread,iHeldFastMutex) == _FOFF(NThread,i_ThrdAttr) + 2);
       
  1429 __ASSERT_COMPILE(_FOFF(NThread,iWaitFastMutex) == _FOFF(NThread,iHeldFastMutex) + 4);
       
  1430 __ASSERT_COMPILE(_FOFF(NThread,iAddressSpace) == _FOFF(NThread,iWaitFastMutex) + 4);
       
  1431 
       
  1432 __NAKED__ void TScheduler::Reschedule()
       
  1433 	{
       
  1434 	//
       
  1435 	// Enter in mode_svc with kernel locked, interrupts can be on or off
       
  1436 	// Exit in mode_svc with kernel unlocked, interrupts off
       
  1437 	// On exit r0=&TheScheduler, r1=0, r3=TheCurrentThread, r4-r11 unaltered
       
  1438 	// r2=0 if no reschedule occurred, non-zero if a reschedule did occur.
       
  1439 	// NOTE: STACK ALIGNMENT UNKNOWN ON ENTRY
       
  1440 	//
       
  1441 	asm("ldr r0, __TheScheduler ");			// r0 points to scheduler data
       
  1442 	asm("str lr, [sp, #-4]! ");				// save return address
       
  1443 	SET_INTS(r3, MODE_SVC, INTS_ALL_OFF);	// interrupts off
       
  1444 	asm("ldrb r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iDfcPendingFlag));
       
  1445 	asm("mov r2, #0 ");						// start with r2=0
       
  1446 	asm("cmp r1, #0 ");						// check if DFCs pending
       
  1447 
       
  1448 	asm("start_resched: ");
       
  1449 	asm("blne  " CSM_ZN10TScheduler9QueueDfcsEv);	// queue any pending DFCs - PRESERVES R2
       
  1450 	asm("ldrb r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag));
       
  1451 	SET_INTS_1(r3, MODE_SVC, INTS_ALL_ON);
       
  1452 	asm("cmp r1, #0 ");						// check if a reschedule is required
       
  1453 	asm("beq no_resched_needed ");			// branch out if not
       
  1454 	SET_INTS_2(r3, MODE_SVC, INTS_ALL_ON);	// enable interrupts
       
  1455 	asm("mrs r2, spsr ");					// r2=spsr_svc
       
  1456 	asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread));
       
  1457 	asm("stmfd sp!, {r2,r4-r11} ");			// store registers and return address
       
  1458 #ifdef __CPU_HAS_VFP
       
  1459 	VFP_FMRX(,FPEXC_REG,VFP_XREG_FPEXC);	// r10/r11=FPEXC
       
  1460 #endif
       
  1461 #ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG
       
  1462 	GET_CAR(,r11);							// r11=CAR
       
  1463 #endif
       
  1464 #ifdef __CPU_HAS_CP15_THREAD_ID_REG
       
  1465 	GET_RWRW_TID(,r9);						// r9=Thread ID
       
  1466 #endif 
       
  1467 #ifdef __CPU_ARM_USE_DOMAINS
       
  1468 	asm("mrc p15, 0, r12, c3, c0, 0 ");		// r12=DACR
       
  1469 #endif
       
  1470 #ifdef __CPU_SUPPORT_THUMB2EE
       
  1471 	GET_THUMB2EE_HNDLR_BASE(,r8);			// r8=Thumb-2EE Handler Base
       
  1472 #endif
       
  1473 	asm("ldr lr, [r0, #4] ");				// lr=present mask high
       
  1474 	asm("sub sp, sp, #%a0" : : "i" (8+EXTRA_STACK_SPACE));	// make room for extras, sp_usr and lr_usr
       
  1475 	asm("str sp, [r1, #%a0]" : : "i" _FOFF(NThread,iSavedSP));	// store original thread's stack pointer
       
  1476 
       
  1477 
       
  1478 	// Save the sp_usr and lr_usr and only the required coprocessor registers
       
  1479 	//										Thumb-2EE	TID		FPEXC		CAR		DACR
       
  1480 	asm("stmia sp, {"	EXTRA_STACK_LIST(	8,			9, 		FPEXC_REG, 	11, 	12)		"r13-r14}^ ");
       
  1481 	// NOTE: Prior to ARMv6 can't have banked register access immediately after LDM/STM user registers
       
  1482 
       
  1483 	asm("ldr r1, [r0], #%a0" : : "i" _FOFF(TScheduler,iQueue));		// r1=present mask low, r0=&iQueue[0]
       
  1484 #ifdef __CPU_ARM_HAS_CLZ
       
  1485 	CLZ(12,14);								// r12=31-MSB(r14)
       
  1486 	asm("subs r12, r12, #32 ");				// r12=-1-MSB(r14), 0 if r14=0
       
  1487 	CLZcc(CC_EQ,12,1);						// if r14=0, r12=31-MSB(r1)
       
  1488 	asm("rsb r12, r12, #31 ");				// r12=highest ready thread priority
       
  1489 #else
       
  1490 	asm("mov r12, #31 ");					// find the highest priority ready thread
       
  1491 	asm("cmp r14, #0 ");					// high word nonzero?
       
  1492 	asm("moveq r14, r1 ");					// if zero, r14=low word
       
  1493 	asm("movne r12, #63 ");					// else start at pri 63
       
  1494 	asm("cmp r14, #0x00010000 ");
       
  1495 	asm("movlo r14, r14, lsl #16 ");
       
  1496 	asm("sublo r12, r12, #16 ");
       
  1497 	asm("cmp r14, #0x01000000 ");
       
  1498 	asm("movlo r14, r14, lsl #8 ");
       
  1499 	asm("sublo r12, r12, #8 ");
       
  1500 	asm("cmp r14, #0x10000000 ");
       
  1501 	asm("movlo r14, r14, lsl #4 ");
       
  1502 	asm("sublo r12, r12, #4 ");
       
  1503 	asm("cmp r14, #0x40000000 ");
       
  1504 	asm("movlo r14, r14, lsl #2 ");
       
  1505 	asm("sublo r12, r12, #2 ");
       
  1506 	asm("cmp r14, #0x80000000 ");
       
  1507 	asm("sublo r12, r12, #1 ");				// r12 now equals highest ready priority
       
  1508 #endif
       
  1509 	asm("ldr r2, [r0, r12, lsl #2] ");		// r2=pointer to highest priority thread's link field
       
  1510 	asm("sub r0, r0, #%a0" : : "i" _FOFF(TScheduler,iQueue));
       
  1511 	asm("mov r4, #0 ");
       
  1512 	asm("ldmia r2, {r3,r5-r9,lr} ");		// r3=next r5=prev r6=attributes, r7=heldFM, r8=waitFM, r9=address space
       
  1513 											// lr=time
       
  1514 	asm("add r10, r0, #%a0" : : "i" _FOFF(TScheduler,iLock));
       
  1515 	asm("strb r4, [r0, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag));	// clear flag
       
  1516 	ASM_DEBUG1(InitSelection,r2);
       
  1517 	asm("cmp lr, #0 ");						// check if timeslice expired
       
  1518 	asm("bne no_other ");					// skip if not
       
  1519 	asm("cmp r3, r2 ");						// check for thread at same priority
       
  1520 	asm("bne round_robin ");				// branch if there is one
       
  1521 	asm("no_other: ");
       
  1522 	asm("cmp r7, #0 ");						// does this thread hold a fast mutex?
       
  1523 	asm("bne holds_fast_mutex ");			// branch if it does
       
  1524 	asm("cmp r8, #0 ");						// is thread blocked on a fast mutex?
       
  1525 	asm("bne resched_blocked ");			// branch out if it is
       
  1526 
       
  1527 	asm("resched_not_blocked: ");
       
  1528 	asm("tst r6, #%a0" : : "i" ((TInt)KThreadAttImplicitSystemLock<<16));	// implicit system lock required?
       
  1529 #if defined(__MEMMODEL_MULTIPLE__) || defined(__MEMMODEL_FLEXIBLE__)
       
  1530 	asm("beq resched_end ");				// no, switch to this thread
       
  1531 	asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iHoldingThread));	// yes, look at system lock holding thread
       
  1532 	asm("cmp r1, #0 ");						// lock held?
       
  1533 	asm("beq resched_end ");				// no, switch to this thread
       
  1534 	asm("b resched_imp_sys_held ");
       
  1535 #else
       
  1536 	asm("ldrne r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iHoldingThread));	// yes, look at system lock holding thread
       
  1537 	asm("beq resched_end ");				// no, switch to this thread
       
  1538 	asm("cmp r1, #0 ");						// lock held?
       
  1539 	asm("ldreq r5, [r0, #%a0]" : : "i" _FOFF(TScheduler,iAddressSpace));	// no, get current address space ptr
       
  1540 	asm("bne resched_imp_sys_held ");
       
  1541 	asm("tst r6, #%a0" : : "i" ((TInt)KThreadAttAddressSpace<<16));			// does thread require address space switch?
       
  1542 	asm("cmpne r9, r5 ");					// change of address space required?
       
  1543 	asm("beq resched_end ");				// branch if not
       
  1544 
       
  1545 	ASM_DEBUG1(Resched,r2)					// r2->new thread
       
  1546 	UPDATE_THREAD_CPU_TIME;
       
  1547 	EMI_EVENTLOGGER;
       
  1548 	EMI_CHECKDFCTAG(1)
       
  1549 
       
  1550 #ifdef BTRACE_CPU_USAGE
       
  1551 	asm("ldrb r1, [r0,#%a0]" : : "i" _FOFF(TScheduler,iCpuUsageFilter));
       
  1552 	asm("ldr sp, [r2, #%a0]" : : "i" _FOFF(NThread,iSavedSP));				// restore new thread's stack pointer
       
  1553 	asm("str r2, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread));		// iCurrentThread=r2
       
  1554 	asm("cmp r1, #0");
       
  1555 	asm("blne context_switch_trace");
       
  1556 #else
       
  1557 	asm("ldr sp, [r2, #%a0]" : : "i" _FOFF(NThread,iSavedSP));				// restore new thread's stack pointer
       
  1558 	asm("str r2, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread));		// iCurrentThread=r2
       
  1559 #endif
       
  1560 
       
  1561 #ifdef __CPU_HAS_ETM_PROCID_REG
       
  1562 	asm("mcr p15, 0, r2, c13, c0, 1 ");		// notify ETM of new thread
       
  1563 #endif
       
  1564 	SET_INTS_1(r12, MODE_SVC, INTS_ALL_OFF);
       
  1565 #if EXTRA_STACK_SPACE==0 && defined(__CPU_ARM9_USER_LDM_BUG)
       
  1566 	asm("mov r1, sp ");
       
  1567 	asm("ldmia r1, {r13,r14}^ ");			// restore sp_usr and lr_usr
       
  1568 	// NOTE: Prior to ARMv6 can't have banked register access immediately after LDM/STM user registers
       
  1569 #else
       
  1570 	// Load the sp_usr and lr_usr and only the required coprocessor registers
       
  1571 	//										Thumb-2EE	TID		FPEXC		CAR		DACR
       
  1572 	asm("ldmia sp, {"	EXTRA_STACK_LIST(	3,			4, 		5,			6, 		11)		"r13-r14}^ ");
       
  1573 	// NOTE: Prior to ARMv6 can't have banked register access immediately after LDM/STM user registers
       
  1574 #endif
       
  1575 	asm("str r2, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iHoldingThread));	// iLock.iHoldingThread=new thread
       
  1576 	asm("str r10, [r2, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex));			// current thread->iHeldFastMutex=&iLock
       
  1577 #ifdef BTRACE_FAST_MUTEX
       
  1578 	asm("ldrb lr, [r0,#%a0]" : : "i" _FOFF(TScheduler,iFastMutexFilter));
       
  1579 	asm("cmp lr, #0");
       
  1580 	asm("blne reschedule_syslock_wait_trace");
       
  1581 #endif	
       
  1582 
       
  1583 #ifdef __CPU_SUPPORT_THUMB2EE
       
  1584 	SET_THUMB2EE_HNDLR_BASE(,r3);			
       
  1585 #endif
       
  1586 #ifdef __CPU_HAS_CP15_THREAD_ID_REG
       
  1587 	SET_RWRW_TID(,r4); 
       
  1588 #endif 
       
  1589 #ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG
       
  1590 	SET_CAR(,r6)
       
  1591 #endif
       
  1592 #ifdef __CPU_ARM_USE_DOMAINS
       
  1593 	asm("mcr p15, 0, r11, c3, c0, 0 ");
       
  1594 #endif
       
  1595 #ifdef __CPU_HAS_VFP
       
  1596 	VFP_FMXR(,VFP_XREG_FPEXC,5);	// restore FPEXC from R5
       
  1597 #endif
       
  1598 	asm("add sp, sp, #%a0" : : "i" (8+EXTRA_STACK_SPACE));	// step past sp_usr and lr_usr
       
  1599 
       
  1600 	// Do process switching
       
  1601 	// Handler called with:
       
  1602 	// r0->scheduler, r2->current thread
       
  1603 	// r9->new address space, r10->system lock
       
  1604 	// Must preserve r0,r2, can modify other registers
       
  1605 	CPWAIT(,r1);
       
  1606 	SET_INTS_2(r12, MODE_SVC, INTS_ALL_OFF);	// disable interrupts
       
  1607 	asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag));
       
  1608 	asm("mov r3, r2 ");
       
  1609 	asm("cmp r1, #0 ");
       
  1610 	asm("streq r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));	// unlock the kernel
       
  1611 	asm("blne  " CSM_ZN10TScheduler10RescheduleEv);
       
  1612 	SET_INTS(r12, MODE_SVC, INTS_ALL_ON);	// kernel is now unlocked, interrupts enabled, system lock held
       
  1613 	asm("mov r2, r3 ");
       
  1614 	asm("mov lr, pc ");
       
  1615 	asm("ldr pc, [r0, #%a0]" : : "i" _FOFF(TScheduler,iProcessHandler));	// do process switch
       
  1616 
       
  1617 	asm("mov r1, #1 ");
       
  1618 	asm("mov r4, #0 ");
       
  1619 	asm("str r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));			// lock the kernel
       
  1620 	asm("mov r3, r2 ");						// r3->new thread
       
  1621 	asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iWaiting));			// check system lock wait flag
       
  1622 	asm("str r4, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iHoldingThread));	// release system lock
       
  1623 	asm("str r4, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iWaiting));
       
  1624 	asm("str r4, [r3, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex));
       
  1625 #ifdef BTRACE_FAST_MUTEX
       
  1626 	asm("ldrb lr, [r0,#%a0]" : : "i" _FOFF(TScheduler,iFastMutexFilter));
       
  1627 	asm("cmp lr, #0");
       
  1628 	asm("blne reschedule_syslock_signal_trace");
       
  1629 #endif	
       
  1630 	asm("cmp r2, #0 ");
       
  1631 	asm("beq switch_threads_2 ");			// no contention on system lock
       
  1632 	asm("ldr r2, [r3, #%a0]" : : "i" _FOFF(NThread,iCsFunction));
       
  1633 	asm("ldr r12, [r3, #%a0]" : : "i" _FOFF(NThread,iCsCount));
       
  1634 	asm("strb r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag));	// contention - need to reschedule again
       
  1635 	asm("cmp r2, #0 ");						// outstanding CS function?
       
  1636 	asm("beq switch_threads_2 ");			// branch if not
       
  1637 	asm("cmp r12, #0 ");					// iCsCount!=0 ?
       
  1638 	asm("bne switch_threads_2 ");			// branch if it is
       
  1639 	asm("ldr r1, [sp, #0] ");				// r1=spsr_svc for this thread
       
  1640 	asm("mov r4, r0 ");
       
  1641 	asm("mov r5, r3 ");
       
  1642 	asm("msr spsr, r1 ");					// restore spsr_svc
       
  1643 	asm("mov r0, r3 ");						// if iCsCount=0, DoCsFunction()
       
  1644 	asm("bl DoDoCsFunction ");
       
  1645 	asm("mov r0, r4 ");
       
  1646 	asm("mov r3, r5 ");
       
  1647 	asm("b switch_threads_2 ");
       
  1648 #endif	// __MEMMODEL_MULTIPLE__ || __MEMMODEL_FLEXIBLE__
       
  1649 
       
  1650 	asm("round_robin: ");					// get here if thread's timeslice has expired and there is another
       
  1651 											// thread ready at the same priority
       
  1652 	asm("cmp r7, #0 ");						// does this thread hold a fast mutex?
       
  1653 	asm("bne rr_holds_fast_mutex ");
       
  1654 	asm("ldr lr, [r2, #%a0]" : : "i" _FOFF(NThread,iTimeslice));
       
  1655 	asm("add r0, r0, #%a0" : : "i" _FOFF(TScheduler,iQueue));
       
  1656 	asm("str r3, [r0, r12, lsl #2] ");		// first thread at this priority is now the next one
       
  1657 	asm("str lr, [r2, #%a0]" : : "i" _FOFF(NThread,iTime));	// fresh timeslice
       
  1658 	ASM_DEBUG1(RR,r3);
       
  1659 	asm("add r3, r3, #%a0" : : "i" _FOFF(NThread,iPriority));
       
  1660 	asm("ldmia r3, {r6-r9} ");				// r6=attributes, r7=heldFM, r8=waitFM, r9=address space
       
  1661 	asm("sub r2, r3, #%a0" : : "i" _FOFF(NThread,iPriority));	// move to next thread at this priority
       
  1662 	asm("sub r0, r0, #%a0" : : "i" _FOFF(TScheduler,iQueue));
       
  1663 	asm("b no_other ");
       
  1664 
       
  1665 	asm("resched_blocked: ");				// get here if thread is blocked on a fast mutex
       
  1666 	ASM_DEBUG1(BlockedFM,r8)
       
  1667 	asm("ldr r3, [r8, #%a0]" : : "i" _FOFF(NFastMutex,iHoldingThread));	// if so, get holding thread
       
  1668 	asm("cmp r3, #0 ");						// mutex now free?
       
  1669 	asm("beq resched_not_blocked ");
       
  1670 	asm("mov r2, r3 ");						// no, switch to holding thread
       
  1671 	asm("b resched_end ");
       
  1672 
       
  1673 	asm("holds_fast_mutex: ");
       
  1674 #if defined(__MEMMODEL_MULTIPLE__) || defined(__MEMMODEL_FLEXIBLE__)
       
  1675 	asm("cmp r7, r10 ");					// does this thread hold system lock?
       
  1676 	asm("tstne r6, #%a0" : : "i" (((TInt)KThreadAttImplicitSystemLock)<<16));	// if not, is implicit system lock required?
       
  1677 	asm("beq resched_end ");				// if neither, switch to this thread
       
  1678 	asm("ldr r5, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iHoldingThread));	// check if system lock held
       
  1679 	asm("cmp r5, #0 ");
       
  1680 	asm("bne rr_holds_fast_mutex ");		// if implicit system lock contention, set waiting flag on held mutex but still schedule thread
       
  1681 	asm("b resched_end ");					// else switch to thread and finish
       
  1682 #else
       
  1683 	asm("cmp r7, r10 ");					// does this thread hold system lock?
       
  1684 	asm("beq resched_end ");				// if so, switch to it
       
  1685 	asm("tst r6, #%a0" : : "i" (((TInt)KThreadAttImplicitSystemLock)<<16));	// implicit system lock required?
       
  1686 	asm("ldrne r5, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iHoldingThread));	// if so, check if system lock held
       
  1687 	asm("beq resched_end ");				// if lock not required, switch to thread and finish
       
  1688 	asm("cmp r5, #0 ");
       
  1689 	asm("bne rr_holds_fast_mutex ");		// if implicit system lock contention, set waiting flag on held mutex but still schedule thread
       
  1690 	asm("tst r6, #%a0" : : "i" (((TInt)KThreadAttAddressSpace)<<16));	// address space required?
       
  1691 	asm("ldrne r5, [r0, #%a0]" : : "i" _FOFF(TScheduler,iAddressSpace));	// if so, get current address space ptr
       
  1692 	asm("beq resched_end ");				// if not, switch to thread and finish
       
  1693 	asm("cmp r5, r9 ");						// do we have correct address space?
       
  1694 	asm("beq resched_end ");				// yes, switch to thread and finish
       
  1695 	asm("b rr_holds_fast_mutex ");			// no, set waiting flag on fast mutex
       
  1696 #endif // __MEMMODEL_MULTIPLE__ || __MEMMODEL_FLEXIBLE__
       
  1697 
       
  1698 	asm("resched_imp_sys_held: ");			// get here if thread requires implicit system lock and lock is held
       
  1699 	ASM_DEBUG1(ImpSysHeld,r1)
       
  1700 	asm("mov r2, r1 ");						// switch to holding thread
       
  1701 	asm("add r7, r0, #%a0" : : "i" _FOFF(TScheduler,iLock));	// set waiting flag on system lock
       
  1702 
       
  1703 	asm("rr_holds_fast_mutex: ");			// get here if round-robin deferred due to fast mutex held
       
  1704 	asm("mov r6, #1 ");
       
  1705 	asm("str r6, [r7, #%a0]" : : "i" _FOFF(NFastMutex,iWaiting));	// if so, set waiting flag
       
  1706 
       
  1707 	asm("resched_end: ");
       
  1708 	ASM_DEBUG1(Resched,r2)
       
  1709 
       
  1710 	asm("switch_threads: ");
       
  1711 	UPDATE_THREAD_CPU_TIME;	
       
  1712 	EMI_EVENTLOGGER;
       
  1713 	EMI_CHECKDFCTAG(2)
       
  1714 
       
  1715 #ifdef BTRACE_CPU_USAGE
       
  1716 	asm("ldrb r1, [r0,#%a0]" : : "i" _FOFF(TScheduler,iCpuUsageFilter));
       
  1717 	asm("ldr sp, [r2, #%a0]" : : "i" _FOFF(NThread,iSavedSP));				// restore new thread's stack pointer
       
  1718 	asm("str r2, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread));		// iCurrentThread=r2
       
  1719 	asm("cmp r1, #0");
       
  1720 	asm("blne context_switch_trace");
       
  1721 #else
       
  1722 	asm("ldr sp, [r2, #%a0]" : : "i" _FOFF(NThread,iSavedSP));				// restore new thread's stack pointer
       
  1723 	asm("str r2, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread));		// iCurrentThread=r2
       
  1724 #endif
       
  1725 
       
  1726 #if defined(__MEMMODEL_MULTIPLE__) || defined(__MEMMODEL_FLEXIBLE__)
       
  1727 	asm("ldr r6, [r2, #%a0]" : : "i" _FOFF(NThread,iPriority));		// attributes into r6
       
  1728 	asm("ldr r9, [r2, #%a0]" : : "i" _FOFF(NThread,iAddressSpace));	// address space into r9
       
  1729 #else
       
  1730 #ifdef __CPU_HAS_ETM_PROCID_REG
       
  1731 	asm("mcr p15, 0, r2, c13, c0, 1 ");		// notify ETM of new thread
       
  1732 #endif
       
  1733 #endif
       
  1734 #if EXTRA_STACK_SPACE==0 && defined(__CPU_ARM9_USER_LDM_BUG)
       
  1735 	asm("mov r3, sp ");
       
  1736 	asm("ldmia r3, {r13,r14}^ ");			// restore sp_usr and lr_usr
       
  1737 	// NOTE: Prior to ARMv6 can't have banked register access immediately after LDM/STM user registers
       
  1738 #else
       
  1739 	// Load the sp_usr and lr_usr and only the required coprocessor registers
       
  1740 	//										Thumb-2EE	TID		FPEXC		CAR		DACR
       
  1741 	asm("ldmia sp, {"	EXTRA_STACK_LIST(	1,			3,		FPEXC_REG3, 10, 	11)		"r13-r14}^ ");
       
  1742 	// NOTE: Prior to ARMv6 can't have banked register access immediately after LDM/STM user registers
       
  1743 #endif
       
  1744 #ifdef __CPU_SUPPORT_THUMB2EE
       
  1745 	SET_THUMB2EE_HNDLR_BASE(,r1);			
       
  1746 #endif
       
  1747 #ifdef __CPU_HAS_CP15_THREAD_ID_REG
       
  1748 	SET_RWRW_TID(,r3)						// restore Thread ID from r3
       
  1749 #endif 
       
  1750 	asm("mov r3, r2 ");						// r3=TheCurrentThread
       
  1751 #ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG
       
  1752 	SET_CAR(,r10)
       
  1753 #endif
       
  1754 #ifdef __CPU_ARM_USE_DOMAINS
       
  1755 	asm("mcr p15, 0, r11, c3, c0, 0 ");
       
  1756 #endif
       
  1757 #ifdef __CPU_HAS_VFP
       
  1758 	VFP_FMXR(,VFP_XREG_FPEXC,FPEXC_REG3);	// restore FPEXC from R4 or R10
       
  1759 #endif
       
  1760 	asm("add sp, sp, #%a0" : : "i" (8+EXTRA_STACK_SPACE));	// step past sp_usr and lr_usr
       
  1761 #if defined(__MEMMODEL_MULTIPLE__) || defined(__MEMMODEL_FLEXIBLE__)
       
  1762 	// r2=r3=current thread here
       
  1763 	asm("tst r6, #%a0" : : "i" (((TInt)KThreadAttAddressSpace)<<16));		// address space required?
       
  1764 	asm("ldrne r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iProcessHandler));	// if so, get pointer to process handler
       
  1765 	asm("mov r2, r2, lsr #6 ");				// r2=current thread>>6
       
  1766 	asm("beq switch_threads_3 ");			// skip if address space change not required
       
  1767 
       
  1768 	// Do address space switching
       
  1769 	// Handler called with:
       
  1770 	// r0->scheduler, r3->current thread
       
  1771 	// r9->new address space, r5->old address space
       
  1772 	// Return with r2 = (r2<<8) | ASID
       
  1773 	// Must preserve r0,r3, can modify other registers
       
  1774 	asm("ldr r5, [r0, #%a0]" : : "i" _FOFF(TScheduler,iAddressSpace));	// get current address space ptr
       
  1775 #ifdef __MEMMODEL_FLEXIBLE__
       
  1776 	asm("adr lr, switch_threads_5 ");
       
  1777 #else
       
  1778 	asm("adr lr, switch_threads_4 ");
       
  1779 #endif
       
  1780 	__JUMP(,r1);
       
  1781 
       
  1782 	asm("switch_threads_3: ");
       
  1783 	asm("mrc p15, 0, r4, c13, c0, 1 ");		// r4 = CONTEXTID (threadID:ASID)
       
  1784 	asm("and r4, r4, #0xff ");				// isolate ASID
       
  1785 	asm("orr r2, r4, r2, lsl #8 ");			// r2 = new thread ID : ASID
       
  1786 	__DATA_SYNC_BARRIER_Z__(r12);			// needed before change to ContextID
       
  1787 
       
  1788 	asm("switch_threads_4: ");
       
  1789 #if (defined(__CPU_ARM1136__) || defined(__CPU_ARM1176__)) && !defined(__CPU_ARM1136_ERRATUM_408022_FIXED)
       
  1790 	asm("nop");
       
  1791 #endif
       
  1792 	asm("mcr p15, 0, r2, c13, c0, 1 ");		// set ContextID (ASID + debugging thread ID)
       
  1793 	__INST_SYNC_BARRIER_Z__(r12);
       
  1794 #ifdef __CPU_NEEDS_BTAC_FLUSH_AFTER_ASID_CHANGE
       
  1795 	asm("mcr p15, 0, r12, c7, c5, 6 ");		// flush BTAC
       
  1796 #endif
       
  1797 
       
  1798 //	asm("switch_threads_3: ");	// TEMPORARY UNTIL CONTEXTID BECOMES READABLE
       
  1799 	asm("switch_threads_5: ");
       
  1800 #if defined(__CPU_ARM1136__) && defined(__CPU_HAS_VFP) && !defined(__CPU_ARM1136_ERRATUM_351912_FIXED)
       
  1801 	VFP_FMRX(,14,VFP_XREG_FPEXC);
       
  1802 	asm("mrc p15, 0, r4, c1, c0, 1 ");
       
  1803 	asm("tst r14, #%a0" : : "i" ((TInt)VFP_FPEXC_EN) );
       
  1804 	asm("bic r4, r4, #2 ");					// clear DB bit (disable dynamic prediction)
       
  1805 	asm("and r12, r4, #1 ");				// r2 bit 0 = RS bit (1 if return stack enabled)
       
  1806 	asm("orreq r4, r4, r12, lsl #1 ");		// if VFP is being disabled set DB = RS
       
  1807 	asm("mcr p15, 0, r4, c1, c0, 1 ");
       
  1808 #endif
       
  1809 #endif
       
  1810 	CPWAIT(,r12);
       
  1811 
       
  1812 	asm("switch_threads_2: ");
       
  1813 	asm("resched_trampoline_hook_address: ");
       
  1814 	asm("ldmia sp!, {r2,r4-r11,lr} ");		// r2=spsr_svc, restore r4-r11 and return address
       
  1815 	asm("resched_trampoline_return: ");
       
  1816 
       
  1817 	SET_INTS(r12, MODE_SVC, INTS_ALL_OFF);					// disable interrupts
       
  1818 	asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag));
       
  1819 	asm("msr spsr, r2 ");					// restore spsr_svc
       
  1820 	asm("cmp r1, #0 ");						// check for another reschedule
       
  1821 	asm("streq r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));	// if not needed unlock the kernel
       
  1822 #if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571622_FIXED)
       
  1823 	asm("nop ");							// ARM Cortex-A9 MPCore erratum 571622 workaround
       
  1824 											// Insert nops so branch doesn't occur in 2nd or 3rd position after a msr spsr
       
  1825 #endif
       
  1826 	__JUMP(eq,lr);							// and return in context of new thread, with r2 non zero
       
  1827 	asm("str lr, [sp, #-4]! ");
       
  1828 	asm("b start_resched ");				// if necessary, go back to beginning
       
  1829 
       
  1830 	asm("no_resched_needed: ");
       
  1831 	asm("str r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));	// else unlock the kernel
       
  1832 	asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread));	// r3=iCurrentThread
       
  1833 	asm("ldr pc, [sp], #4 ");				// and exit immediately with r2=0 iff no reschedule occurred
       
  1834 
       
  1835 	asm("__TheScheduler: ");
       
  1836 	asm(".word TheScheduler ");
       
  1837 	asm("__SystemLock: ");
       
  1838 	asm(".word %a0" : : "i" ((TInt)&TheScheduler.iLock));
       
  1839 #ifdef BTRACE_CPU_USAGE
       
  1840 	asm("context_switch_trace_header:");
       
  1841 	asm(".word %a0" : : "i" ((TInt)(8<<BTrace::ESizeIndex) + (BTrace::EContextIdPresent<<BTrace::EFlagsIndex*8) + (BTrace::ECpuUsage<<BTrace::ECategoryIndex*8) + (BTrace::ENewThreadContext<<BTrace::ESubCategoryIndex*8)) );
       
  1842 
       
  1843 	asm("context_switch_trace:");
       
  1844 	asm("ldr r1, [r0,#%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler));
       
  1845 	asm("stmdb sp!, {r0,r2,lr}");
       
  1846 	asm("ldr r0, context_switch_trace_header" );
       
  1847 	asm("mov lr, pc");
       
  1848 	__JUMP(,r1);
       
  1849 	asm("ldmia sp!, {r0,r2,pc}");
       
  1850 #endif
       
  1851 
       
  1852 #ifdef __DEBUGGER_SUPPORT__
       
  1853 	asm("resched_trampoline: ");
       
  1854 	asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleHook));
       
  1855 	asm("ldr r0, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread));
       
  1856 	asm("mov r11, sp ");					// save stack pointer
       
  1857 	asm("bic sp, sp, #4 ");					// align stack to 8 byte boundary
       
  1858 	asm("tst r1, r1");
       
  1859 	asm("movne lr, pc");
       
  1860 	__JUMP(ne,r1);
       
  1861 	asm("ldr r0, __TheScheduler ");			// r0 points to scheduler data
       
  1862 	asm("mov sp, r11 ");					// restore stack pointer
       
  1863 	asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread));	// r3=iCurrentThread
       
  1864 	asm("resched_trampoline_unhook_data: ");
       
  1865 	asm("ldmia sp!, {r2,r4-r11,lr} ");		// r2=spsr_svc, restore r4-r11 and return address
       
  1866 	asm("b resched_trampoline_return");
       
  1867 #endif
       
  1868 
       
  1869 #ifdef __EMI_SUPPORT__
       
  1870 	// EMI Task Event Logger
       
  1871 	asm("AddTaskSwitchEvent: ");
       
  1872 #ifndef MONITOR_THREAD_CPU_TIME
       
  1873 	// if we dont have it, get CurrentThread
       
  1874 	asm("ldr r6, [r0, #%a0]"	: : "i" _FOFF(TScheduler,iCurrentThread));
       
  1875 #endif
       
  1876 
       
  1877 	// Check new thread for if loggable
       
  1878 	asm("ldrb r3, [r2,#%a0]"	: : "i" _FOFF(NThread, i_ThrdAttr));
       
  1879 	asm("ldr r4, [r6,#%a0]"		: : "i" _FOFF(NThread, iPriority));  // Load Spares.  b2=state,b3=attrbutes
       
  1880 
       
  1881 	asm("tst r3, #%a0"			: : "i" ((TInt) KThreadAttLoggable));
       
  1882 	asm("ldreq r7, [r0, #%a0]"	: : "i" _FOFF(TScheduler,iSigma));
       
  1883 	asm("movne r7,r2");
       
  1884 
       
  1885 	// Check old thread for if loggable
       
  1886 	asm("tst r4, #%a0"			: : "i" (KThreadAttLoggable << 16));
       
  1887 	asm("ldreq r6, [r0, #%a0]"	: : "i" _FOFF(TScheduler,iSigma));
       
  1888 
       
  1889 	// Abort log entry if duplicate
       
  1890 	asm("cmp r6,r7");
       
  1891 	__JUMP(eq,lr);
       
  1892 
       
  1893 	// create record:	r3=iType/iFlags/iExtra, r4=iUserState
       
  1894 	//					r5=iTime, r6=iPrevious, r7=iNext
       
  1895 	// waiting = (2nd byte of r4)!=NThread::EReady (=0)
       
  1896 #ifndef MONITOR_THREAD_CPU_TIME
       
  1897 	GET_HIGH_RES_TICK_COUNT(r5);
       
  1898 #endif
       
  1899 
       
  1900 	asm("tst r4, #0xff00");
       
  1901 	asm("ldr r8, [r0, #%a0]"	: : "i" _FOFF(TScheduler,iBufferHead));
       
  1902 	asm("ldr r4, [r0, #%a0]"	: : "i" _FOFF(TScheduler,iEmiState));
       
  1903 	asm("moveq r3, #0x200"); // #2 = waiting flag.
       
  1904 	asm("movne r3, #0x0");
       
  1905 
       
  1906 	//Store record, move onto next
       
  1907 	asm("stmia r8!,{r3-r7}"); 
       
  1908 
       
  1909 	// Check for and apply buffer wrap
       
  1910 	asm("ldr r7,[r0, #%a0]"		: : "i" _FOFF(TScheduler,iBufferEnd));	// r7 = BufferEnd
       
  1911 	asm("ldr r6,[r0, #%a0]"		: : "i" _FOFF(TScheduler,iBufferTail));	// r6 = BufferTail
       
  1912 	asm("cmp r7,r8");
       
  1913 	asm("ldrlo r8,[r0, #%a0]"	: : "i" _FOFF(TScheduler,iBufferStart));
       
  1914 
       
  1915 	// Check for event lost
       
  1916 	asm("cmp r6,r8");
       
  1917 	asm("str r8, [r0, #%a0]"	: : "i" _FOFF(TScheduler,iBufferHead));	// r8 = BufferHead
       
  1918 	__JUMP(ne,lr);
       
  1919 
       
  1920 	// overflow, move on read pointer - event lost!
       
  1921 	asm("add r6,r6,#%a0"		: : "i" ((TInt) sizeof(TTaskEventRecord)));	// iBufferTail++
       
  1922 	asm("cmp r7,r6");					// iBufferTail > iBufferEnd ?
       
  1923 	asm("ldrlo r6,[r0, #%a0]"	: : "i" _FOFF(TScheduler,iBufferStart));
       
  1924 
       
  1925 	asm("ldrb r5, [r6, #%a0]"	: : "i" _FOFF(TTaskEventRecord,iFlags));
       
  1926 	asm("orr r5, r5, #%a0"	    : : "i" ((TInt) KTskEvtFlag_EventLost));
       
  1927 	asm("strb r5, [r6, #%a0]"	: : "i" _FOFF(TTaskEventRecord,iFlags));
       
  1928 
       
  1929 	asm("str r6, [r0, #%a0]"	: : "i" _FOFF(TScheduler,iBufferTail));
       
  1930 
       
  1931 	__JUMP(,lr);
       
  1932 
       
  1933 #if !defined(__MEMMODEL_MULTIPLE__) && !defined(__MEMMODEL_FLEXIBLE__)
       
  1934 	EMI_ADDDFC(1)
       
  1935 #endif
       
  1936 	EMI_ADDDFC(2)
       
  1937 #endif
       
  1938 
       
  1939 #ifdef BTRACE_FAST_MUTEX
       
  1940 	asm("reschedule_syslock_wait_trace:");
       
  1941 	// r0=scheduler r2=thread
       
  1942 	asm("stmdb sp!, {r3,r12}");
       
  1943 	ALIGN_STACK_START;
       
  1944 	asm("stmdb sp!, {r0-r2,lr}"); // 4th item on stack is PC value for trace
       
  1945 	asm("bl syslock_wait_trace");
       
  1946 	asm("ldmia sp!, {r0-r2,lr}");
       
  1947 	ALIGN_STACK_END;
       
  1948 	asm("ldmia sp!, {r3,r12}");
       
  1949 	__JUMP(,lr);
       
  1950 	
       
  1951 	asm("reschedule_syslock_signal_trace:");
       
  1952 	// r0=scheduler r3=thread
       
  1953 	asm("stmdb sp!, {r3,r12}");
       
  1954 	ALIGN_STACK_START;
       
  1955 	asm("stmdb sp!, {r0-r2,lr}"); // 4th item on stack is PC value for trace
       
  1956 	asm("bl syslock_signal_trace");
       
  1957 	asm("ldmia sp!, {r0-r2,lr}");
       
  1958 	ALIGN_STACK_END;
       
  1959 	asm("ldmia sp!, {r3,r12}");
       
  1960 	__JUMP(,lr);
       
  1961 #endif	
       
  1962 	};
       
  1963 
       
  1964 
       
  1965 /** 
       
  1966  * Returns the range of linear memory which inserting the scheduler hooks needs to modify.
       
  1967  * 
       
  1968  * @param aStart Set to the lowest memory address which needs to be modified.
       
  1969  * @param aEnd   Set to the highest memory address +1 which needs to be modified.
       
  1970 
       
  1971  @pre	Kernel must be locked.
       
  1972  @pre	Call in a thread context.
       
  1973  @pre	Interrupts must be enabled.
       
  1974  */
       
  1975 EXPORT_C __NAKED__ void NKern::SchedulerHooks(TLinAddr& aStart, TLinAddr& aEnd)
       
  1976 	{
       
  1977 	ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC);
       
  1978 #ifdef __DEBUGGER_SUPPORT__
       
  1979 	asm("adr r2,resched_trampoline_hook_address");
       
  1980 	asm("str r2,[r0]");
       
  1981 	asm("adr r2,resched_trampoline_hook_address+4");
       
  1982 	asm("str r2,[r1]");
       
  1983 #else
       
  1984 	asm("mov r2,#0");
       
  1985 	asm("str r2,[r0]");
       
  1986 	asm("str r2,[r1]");
       
  1987 #endif
       
  1988 	__JUMP(,lr);
       
  1989 	};
       
  1990 
       
  1991 
       
  1992 /** 
       
  1993  * Modifies the scheduler code so that it can call the function set by
       
  1994  * NKern::SetRescheduleCallback().
       
  1995  *
       
  1996  * This requires that the region of memory indicated by NKern::SchedulerHooks() is writable.
       
  1997 
       
  1998  @pre	Kernel must be locked.
       
  1999  @pre	Call in a thread context.
       
  2000  @pre	Interrupts must be enabled.
       
  2001  */
       
  2002 EXPORT_C __NAKED__ void NKern::InsertSchedulerHooks()
       
  2003 	{
       
  2004 	ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC);
       
  2005 #ifdef __DEBUGGER_SUPPORT__
       
  2006 	asm("adr r0,resched_trampoline_hook_address");
       
  2007 	asm("adr r1,resched_trampoline");
       
  2008 	asm("sub r1, r1, r0");
       
  2009 	asm("sub r1, r1, #8");
       
  2010 	asm("mov r1, r1, asr #2");
       
  2011 	asm("add r1, r1, #0xea000000");  // r1 = a branch instruction from resched_trampoline_hook_address to resched_trampoline
       
  2012 
       
  2013 #if defined(__MMU_USE_SYMMETRIC_ACCESS_PERMISSIONS)
       
  2014 	// These platforms have shadow memory in non-writable page. We cannot use the standard
       
  2015 	// Epoc::CopyToShadowMemory interface as we hold Kernel lock here.
       
  2016 	// Instead, we'll temporarily disable access permission checking in MMU by switching
       
  2017 	// domain#0 into Manager Mode (see Domain Access Control Register).
       
  2018 	asm("mrs r12, CPSR ");				// save cpsr setting and ...
       
  2019 	CPSIDAIF;							// ...disable interrupts
       
  2020 	asm("mrc p15, 0, r2, c3, c0, 0 ");	// read DACR
       
  2021 	asm("orr r3, r2, #3");				// domain #0 is the first two bits. manager mode is 11b
       
  2022 	asm("mcr p15, 0, r3, c3, c0, 0 ");	// write DACR
       
  2023 	asm("str r1,[r0]");
       
  2024 	asm("mcr p15, 0, r2, c3, c0, 0 ");	// write back the original value of DACR
       
  2025 	asm("msr CPSR_cxsf, r12 "); 		// restore cpsr setting (re-enable interrupts)
       
  2026 #else
       
  2027 	asm("str r1,[r0]");
       
  2028 #endif
       
  2029 
       
  2030 #endif
       
  2031 	__JUMP(,lr);
       
  2032 	};
       
  2033 
       
  2034 
       
  2035 /** 
       
  2036  * Reverts the modification of the Scheduler code performed by NKern::InsertSchedulerHooks()
       
  2037  *
       
  2038  * This requires that the region of memory indicated by NKern::SchedulerHooks() is writable.
       
  2039 
       
  2040  @pre	Kernel must be locked.
       
  2041  @pre	Call in a thread context.
       
  2042  @pre	Interrupts must be enabled.
       
  2043  */
       
  2044 EXPORT_C __NAKED__ void NKern::RemoveSchedulerHooks()
       
  2045 	{
       
  2046 	ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC);
       
  2047 #ifdef __DEBUGGER_SUPPORT__
       
  2048 	asm("adr r0,resched_trampoline_hook_address");
       
  2049 	asm("ldr r1,resched_trampoline_unhook_data");
       
  2050 
       
  2051 #if defined(__MMU_USE_SYMMETRIC_ACCESS_PERMISSIONS)
       
  2052 	// See comments above in InsertSchedulerHooks
       
  2053 	asm("mrs r12, CPSR ");				// save cpsr setting and ...
       
  2054 	CPSIDAIF;							// ...disable interrupts
       
  2055 	asm("mrc p15, 0, r2, c3, c0, 0 ");	// read DACR
       
  2056 	asm("orr r3, r2, #3");				// domain #0 is the first two bits. manager mode is 11b
       
  2057 	asm("mcr p15, 0, r3, c3, c0, 0 ");	// write DACR
       
  2058 	asm("str r1,[r0]");
       
  2059 	asm("mcr p15, 0, r2, c3, c0, 0 ");	// write back the original value of DACR
       
  2060 	asm("msr CPSR_cxsf, r12 "); 		// restore cpsr setting (re-enable interrupts)
       
  2061 #else
       
  2062 	asm("str r1,[r0]");
       
  2063 #endif
       
  2064 
       
  2065 #endif
       
  2066 	__JUMP(,lr);
       
  2067 	};
       
  2068 
       
  2069 
       
  2070 /** 
       
  2071  * Set the function which is to be called on every thread reschedule.
       
  2072  *
       
  2073  * @param aCallback  Pointer to callback function, or NULL to disable callback.
       
  2074 
       
  2075  @pre	Kernel must be locked.
       
  2076  @pre	Call in a thread context.
       
  2077  @pre	Interrupts must be enabled.
       
  2078  */
       
  2079 EXPORT_C __NAKED__ void NKern::SetRescheduleCallback(TRescheduleCallback /*aCallback*/)
       
  2080 	{
       
  2081 	ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC);
       
  2082 #ifdef __DEBUGGER_SUPPORT__
       
  2083 	asm("ldr r1, __TheScheduler ");
       
  2084 	asm("str r0, [r1, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleHook));
       
  2085 #endif
       
  2086 	__JUMP(,lr);
       
  2087 	};
       
  2088 
       
  2089 
       
  2090 
       
  2091 /** Disables interrupts to specified level.
       
  2092 
       
  2093 	Note that if we are not disabling all interrupts we must lock the kernel
       
  2094 	here, otherwise a high priority interrupt which is still enabled could
       
  2095 	cause a reschedule and the new thread could then reenable interrupts.
       
  2096 
       
  2097 	@param  aLevel Interrupts are disbabled up to and including aLevel.  On ARM,
       
  2098 			level 1 stands for IRQ only and level 2 stands for IRQ and FIQ.
       
  2099 	@return CPU-specific value passed to RestoreInterrupts.
       
  2100 
       
  2101 	@pre 1 <= aLevel <= maximum level (CPU-specific)
       
  2102 
       
  2103 	@see NKern::RestoreInterrupts()
       
  2104  */
       
  2105 EXPORT_C __NAKED__ TInt NKern::DisableInterrupts(TInt /*aLevel*/)
       
  2106 	{
       
  2107 	asm("cmp r0, #1 ");
       
  2108 	asm("bhi  " CSM_ZN5NKern20DisableAllInterruptsEv);	// if level>1, disable all
       
  2109 	asm("ldreq r12, __TheScheduler ");
       
  2110 	asm("mrs r2, cpsr ");				// r2=original CPSR
       
  2111 	asm("bcc 1f ");						// skip if level=0
       
  2112 	asm("ldr r3, [r12, #%a0]!" : : "i" _FOFF(TScheduler,iKernCSLocked));
       
  2113 	asm("and r0, r2, #0xc0 ");
       
  2114 	INTS_OFF_1(r2, r2, INTS_IRQ_OFF);	// disable level 1 interrupts
       
  2115 	asm("cmp r3, #0 ");					// test if kernel locked
       
  2116 	asm("addeq r3, r3, #1 ");			// if not, lock the kernel
       
  2117 	asm("streq r3, [r12] ");
       
  2118 	asm("orreq r0, r0, #0x80000000 ");	// and set top bit to indicate kernel locked
       
  2119 	INTS_OFF_2(r2, r2, INTS_IRQ_OFF);
       
  2120 	__JUMP(,lr);
       
  2121 	asm("1: ");
       
  2122 	asm("and r0, r2, #0xc0 ");
       
  2123 	__JUMP(,lr);
       
  2124 	}
       
  2125 
       
  2126 
       
  2127 /** Disables all interrupts (e.g. both IRQ and FIQ on ARM). 
       
  2128 
       
  2129 	@return CPU-specific value passed to NKern::RestoreInterrupts().
       
  2130 
       
  2131 	@see NKern::RestoreInterrupts()
       
  2132  */
       
  2133 EXPORT_C __NAKED__ TInt NKern::DisableAllInterrupts()
       
  2134 	{
       
  2135 	asm("mrs r1, cpsr ");
       
  2136 	asm("and r0, r1, #0xc0 ");			// return I and F bits of CPSR
       
  2137 	INTS_OFF(r1, r1, INTS_ALL_OFF);
       
  2138 	__JUMP(,lr);
       
  2139 	}
       
  2140 
       
  2141 
       
  2142 /** Enables all interrupts (e.g. IRQ and FIQ on ARM).
       
  2143 
       
  2144 	This function never unlocks the kernel.  So it must be used
       
  2145 	only to complement NKern::DisableAllInterrupts. Never use it
       
  2146 	to complement NKern::DisableInterrupts.
       
  2147 
       
  2148 	@see NKern::DisableInterrupts()
       
  2149 	@see NKern::DisableAllInterrupts()
       
  2150 
       
  2151 	@internalComponent
       
  2152  */
       
  2153 EXPORT_C __NAKED__ void NKern::EnableAllInterrupts()
       
  2154 	{
       
  2155 #ifndef __CPU_ARM_HAS_CPS
       
  2156 	asm("mrs r0, cpsr ");
       
  2157 	asm("bic r0, r0, #0xc0 ");
       
  2158 	asm("msr cpsr_c, r0 ");
       
  2159 #else
       
  2160 	CPSIEIF;
       
  2161 #endif
       
  2162 	__JUMP(,lr);
       
  2163 	}
       
  2164 
       
  2165 
       
  2166 /** Restores interrupts to previous level and unlocks the kernel if it was 
       
  2167 	locked when disabling them.
       
  2168 
       
  2169 	@param 	aRestoreData CPU-specific data returned from NKern::DisableInterrupts
       
  2170 			or NKern::DisableAllInterrupts specifying the previous interrupt level.
       
  2171 
       
  2172 	@see NKern::DisableInterrupts()
       
  2173 	@see NKern::DisableAllInterrupts()
       
  2174  */
       
  2175 EXPORT_C __NAKED__ void NKern::RestoreInterrupts(TInt /*aRestoreData*/)
       
  2176 	{
       
  2177 	asm("tst r0, r0 ");					// test state of top bit of aLevel
       
  2178 	asm("mrs r1, cpsr ");
       
  2179 	asm("and r0, r0, #0xc0 ");
       
  2180 	asm("bic r1, r1, #0xc0 ");
       
  2181 	asm("orr r1, r1, r0 ");				// replace I and F bits with those supplied
       
  2182 	asm("msr cpsr_c, r1 ");				// flags are unchanged (in particular N)
       
  2183 	__JUMP(pl,lr);						// if top bit of aLevel clear, finished
       
  2184 
       
  2185 	// if top bit of aLevel set, fall through to unlock the kernel
       
  2186 	}
       
  2187 
       
  2188 
       
  2189 /**	Unlocks the kernel.
       
  2190 
       
  2191 	Decrements iKernCSLocked; if it becomes zero and IDFCs or a reschedule are
       
  2192 	pending, calls the scheduler to process them.
       
  2193 	Must be called in mode_svc.
       
  2194 
       
  2195     @pre    Call either in a thread or an IDFC context.
       
  2196     @pre    Do not call from an ISR.
       
  2197  */
       
  2198 EXPORT_C __NAKED__ void NKern::Unlock()
       
  2199 	{
       
  2200 	ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR);
       
  2201 
       
  2202 	asm("ldr r1, __TheScheduler ");
       
  2203 	asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));
       
  2204 	asm("subs r2, r3, #1 ");
       
  2205 	asm("str r2, [r1, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));
       
  2206 	asm("ldreq r2, [r1, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag));	// if kernel now unlocked, check flags
       
  2207 	asm("bne 1f ");							// if kernel still locked, return
       
  2208 	asm("cmp r2, #0 ");						// check for DFCs or reschedule
       
  2209 	asm("bne 2f");							// branch if needed
       
  2210 	asm("1: ");
       
  2211 	__JUMP(,lr);							
       
  2212 	asm("2: ");
       
  2213 	asm("str r3, [r1, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));	// else lock the kernel again
       
  2214 	asm("str lr, [sp, #-4]! ");				// save return address
       
  2215 	asm("bl  " CSM_ZN10TScheduler10RescheduleEv);	// run DFCs and reschedule, return with kernel unlocked, interrupts disabled
       
  2216 	SET_INTS(r0, MODE_SVC, INTS_ALL_ON);	// reenable interrupts
       
  2217 	asm("ldr pc, [sp], #4 ");
       
  2218 	}
       
  2219 
       
  2220 /**	Locks the kernel.
       
  2221 
       
  2222 	Increments iKernCSLocked, thereby deferring IDFCs and preemption.
       
  2223 	Must be called in mode_svc.
       
  2224 
       
  2225     @pre    Call either in a thread or an IDFC context.
       
  2226     @pre    Do not call from an ISR.
       
  2227  */
       
  2228 EXPORT_C __NAKED__ void NKern::Lock()
       
  2229 	{
       
  2230 	ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR);
       
  2231 
       
  2232 	asm("ldr r12, __TheScheduler ");
       
  2233 	asm("ldr r3, [r12, #%a0]!" : : "i" _FOFF(TScheduler,iKernCSLocked));
       
  2234 	asm("add r3, r3, #1 ");			// lock the kernel
       
  2235 	asm("str r3, [r12] ");
       
  2236 	__JUMP(,lr);
       
  2237 	}
       
  2238 
       
  2239 
       
  2240 /**	Locks the kernel and returns a pointer to the current thread
       
  2241 	Increments iKernCSLocked, thereby deferring IDFCs and preemption.
       
  2242 
       
  2243     @pre    Call either in a thread or an IDFC context.
       
  2244     @pre    Do not call from an ISR.
       
  2245  */
       
  2246 EXPORT_C __NAKED__ NThread* NKern::LockC()
       
  2247 	{
       
  2248 	ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR);
       
  2249 
       
  2250 	asm("ldr r12, __TheScheduler ");
       
  2251 	asm("ldr r0, [r12, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread));
       
  2252 	asm("ldr r3, [r12, #%a0]!" : : "i" _FOFF(TScheduler,iKernCSLocked));
       
  2253 	asm("add r3, r3, #1 ");			// lock the kernel
       
  2254 	asm("str r3, [r12] ");
       
  2255 	__JUMP(,lr);
       
  2256 	}
       
  2257 
       
  2258 
       
  2259 __ASSERT_COMPILE(_FOFF(TScheduler,iKernCSLocked) == _FOFF(TScheduler,iRescheduleNeededFlag) + 4);
       
  2260 
       
  2261 /**	Allows IDFCs and rescheduling if they are pending.
       
  2262 
       
  2263 	If IDFCs or a reschedule are pending and iKernCSLocked is exactly equal to 1
       
  2264 	calls the scheduler to process the IDFCs and possibly reschedule.
       
  2265 	Must be called in mode_svc.
       
  2266 
       
  2267 	@return	Nonzero if a reschedule actually occurred, zero if not.
       
  2268 
       
  2269     @pre    Call either in a thread or an IDFC context.
       
  2270     @pre    Do not call from an ISR.
       
  2271  */
       
  2272 EXPORT_C __NAKED__ TInt NKern::PreemptionPoint()
       
  2273 	{
       
  2274 	ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR);
       
  2275 
       
  2276 	asm("ldr r3, __RescheduleNeededFlag ");
       
  2277 	asm("ldmia r3, {r0,r1} ");				// r0=RescheduleNeededFlag, r1=KernCSLocked
       
  2278 	asm("cmp r0, #0 ");
       
  2279 	__JUMP(eq,lr);							// if no reschedule required, return 0
       
  2280 	asm("subs r1, r1, #1 ");
       
  2281 	__JUMP(ne,lr);							// if kernel still locked, exit
       
  2282 	asm("str lr, [sp, #-4]! ");				// store return address
       
  2283 
       
  2284 	// reschedule - this also switches context if necessary
       
  2285 	// enter this function in mode_svc, interrupts on, kernel locked
       
  2286 	// exit this function in mode_svc, all interrupts off, kernel unlocked
       
  2287 	asm("bl  " CSM_ZN10TScheduler10RescheduleEv);
       
  2288 
       
  2289 	asm("mov r1, #1 ");
       
  2290 	asm("str r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));	// lock the kernel again
       
  2291 	SET_INTS(r3, MODE_SVC, INTS_ALL_ON);	// interrupts back on
       
  2292 	asm("mov r0, r2 ");						// Return 0 if no reschedule, non-zero if reschedule occurred
       
  2293 	asm("ldr pc, [sp], #4 ");
       
  2294 
       
  2295 	asm("__RescheduleNeededFlag: ");
       
  2296 	asm(".word %a0" : : "i" ((TInt)&TheScheduler.iRescheduleNeededFlag));
       
  2297 	}
       
  2298 
       
  2299 
       
  2300 /**	Returns the current processor context type (thread, IDFC or interrupt).
       
  2301 
       
  2302 	@return	A value from NKern::TContext enumeration (but never EEscaped).
       
  2303 	
       
  2304 	@pre	Call in any context.
       
  2305 
       
  2306 	@see	NKern::TContext
       
  2307  */
       
  2308 EXPORT_C __NAKED__ TInt NKern::CurrentContext()
       
  2309 	{
       
  2310 	asm("mrs r1, cpsr ");
       
  2311 	asm("mov r0, #2 ");						// 2 = interrupt
       
  2312 	asm("and r1, r1, #0x1f ");				// r1 = mode
       
  2313 	asm("cmp r1, #0x13 ");
       
  2314 	asm("ldreq r2, __TheScheduler ");
       
  2315 	__JUMP(ne,lr);							// if not svc, must be interrupt
       
  2316 	asm("ldrb r0, [r2, #%a0]" : : "i" _FOFF(TScheduler,iInIDFC));
       
  2317 	asm("cmp r0, #0 ");
       
  2318 	asm("movne r0, #1 ");					// if iInIDFC, return 1 else return 0
       
  2319 	__JUMP(,lr);
       
  2320 	}
       
  2321 
       
  2322 
       
  2323 #ifdef __FAST_MUTEX_MACHINE_CODED__
       
  2324 
       
  2325 /** Temporarily releases the System Lock if there is contention.
       
  2326 
       
  2327     If there
       
  2328 	is another thread attempting to acquire the System lock, the calling
       
  2329 	thread releases the mutex and then acquires it again.
       
  2330 	
       
  2331 	This is more efficient than the equivalent code:
       
  2332 	
       
  2333 	@code
       
  2334 	NKern::UnlockSystem();
       
  2335 	NKern::LockSystem();
       
  2336 	@endcode
       
  2337 
       
  2338 	Note that this can only allow higher priority threads to use the System
       
  2339 	lock as lower priority cannot cause contention on a fast mutex.
       
  2340 
       
  2341 	@return	TRUE if the system lock was relinquished, FALSE if not.
       
  2342 
       
  2343 	@pre	System lock must be held.
       
  2344 
       
  2345 	@post	System lock is held.
       
  2346 
       
  2347 	@see NKern::LockSystem()
       
  2348 	@see NKern::UnlockSystem()
       
  2349 */
       
  2350 EXPORT_C __NAKED__ TBool NKern::FlashSystem()
       
  2351 	{
       
  2352 	asm("ldr r0, __SystemLock ");
       
  2353 	}
       
  2354 
       
  2355 
       
  2356 /** Temporarily releases a fast mutex if there is contention.
       
  2357 
       
  2358     If there is another thread attempting to acquire the mutex, the calling
       
  2359 	thread releases the mutex and then acquires it again.
       
  2360 	
       
  2361 	This is more efficient than the equivalent code:
       
  2362 	
       
  2363 	@code
       
  2364 	NKern::FMSignal();
       
  2365 	NKern::FMWait();
       
  2366 	@endcode
       
  2367 
       
  2368 	@return	TRUE if the mutex was relinquished, FALSE if not.
       
  2369 
       
  2370 	@pre	The mutex must be held.
       
  2371 
       
  2372 	@post	The mutex is held.
       
  2373 */
       
  2374 EXPORT_C __NAKED__ TBool NKern::FMFlash(NFastMutex*)
       
  2375 	{
       
  2376 	ASM_DEBUG1(NKFMFlash,r0);	
       
  2377 	
       
  2378 	asm("ldr r1, [r0,#%a0]" : : "i" _FOFF(NFastMutex,iWaiting));
       
  2379 	asm("cmp r1, #0");
       
  2380 	asm("bne fmflash_contended");
       
  2381 #ifdef BTRACE_FAST_MUTEX
       
  2382 	asm("ldr r1, __TheScheduler ");
       
  2383 	asm("ldrb r2, [r1,#%a0]" : : "i" _FOFF(TScheduler,iFastMutexFilter));
       
  2384 	asm("cmp r2, #0");
       
  2385 	asm("bne fmflash_trace");
       
  2386 #endif
       
  2387 	asm("mov r0, #0");
       
  2388 	__JUMP(,lr);
       
  2389 
       
  2390 	asm("fmflash_contended:");
       
  2391 	asm("stmfd sp!,{r4,lr}");
       
  2392 	asm("mov r4, r0");
       
  2393 	asm("bl " CSM_ZN5NKern4LockEv);
       
  2394 	asm("mov r0, r4");
       
  2395 	asm("bl " CSM_ZN10NFastMutex6SignalEv);
       
  2396 	asm("bl " CSM_ZN5NKern15PreemptionPointEv);
       
  2397 	asm("mov r0, r4");
       
  2398 	asm("bl " CSM_ZN10NFastMutex4WaitEv);
       
  2399 	asm("bl " CSM_ZN5NKern6UnlockEv);
       
  2400 	asm("mov r0, #-1");
       
  2401 	__POPRET("r4,");
       
  2402 
       
  2403 #ifdef BTRACE_FAST_MUTEX
       
  2404 	asm("fmflash_trace:");
       
  2405 	ALIGN_STACK_START;
       
  2406 	asm("stmdb sp!,{r0-r2,lr}");		// 4th item on stack is PC value for trace
       
  2407 	asm("mov r3, r0");					 // fast mutex parameter in r3
       
  2408 	asm("ldr r0, fmflash_trace_header"); // header parameter in r0
       
  2409 	asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread));
       
  2410 	asm("mov lr, pc");
       
  2411 	asm("ldr pc, [r1, #%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler));	
       
  2412 	asm("ldmia sp!,{r0-r2,lr}");
       
  2413 	ALIGN_STACK_END;
       
  2414 	asm("mov r0, #0");
       
  2415 	__JUMP(,lr);
       
  2416 
       
  2417 	asm("fmflash_trace_header:");
       
  2418 	asm(".word %a0" : : "i" ((TInt)(16<<BTrace::ESizeIndex) + ((BTrace::EContextIdPresent|BTrace::EPcPresent) << BTrace::EFlagsIndex*8) + (BTrace::EFastMutex<< BTrace::ECategoryIndex*8) + (BTrace::EFastMutexFlash << BTrace::ESubCategoryIndex*8)) );
       
  2419 #endif
       
  2420 	}
       
  2421 #endif
       
  2422 
       
  2423 
       
  2424 // Need to put the code here because the H2 ekern build complains about the
       
  2425 // offset of __TheSchduler label offset from the first function in the file
       
  2426 // files outside the permissible range
       
  2427 #ifdef BTRACE_FAST_MUTEX
       
  2428 __NAKED__ TInt BtraceFastMutexHolder()
       
  2429 	{
       
  2430 	asm("fmsignal_lock_trace_header:");
       
  2431 	asm(".word %a0" : : "i" ((TInt)(16<<BTrace::ESizeIndex) + ((BTrace::EContextIdPresent|BTrace::EPcPresent) << BTrace::EFlagsIndex*8) + (BTrace::EFastMutex<< BTrace::ECategoryIndex*8) + (BTrace::EFastMutexSignal << BTrace::ESubCategoryIndex*8)) );
       
  2432 	
       
  2433 	asm("fmwait_lockacquired_trace_header:");
       
  2434 	asm(".word %a0" : : "i" ((TInt)(16<<BTrace::ESizeIndex) + ((BTrace::EContextIdPresent|BTrace::EPcPresent) << BTrace::EFlagsIndex*8) + (BTrace::EFastMutex << BTrace::ECategoryIndex*8) + (BTrace::EFastMutexWait << BTrace::ESubCategoryIndex*8)) );
       
  2435 	
       
  2436 	asm("fmsignal_lock_trace_unlock:");
       
  2437 	// r0=mutex r2=scheduler
       
  2438 	asm("ldr r12, [r2, #%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler));	
       
  2439 	asm("mov r3, r0");													// mutex
       
  2440 	asm("ldr r0, fmsignal_lock_trace_header");							// header
       
  2441 	asm("ldr r2, [r2, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread));	// context id
       
  2442 	__JUMP(,r12);
       
  2443 
       
  2444 	asm("fmwait_lockacquiredwait_trace:");
       
  2445 	// r0=scheduler r2=mutex r3=thread 
       
  2446 	asm("ldr r12, [r0, #%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler));	
       
  2447 	asm("mov r1, r2");
       
  2448 	asm("mov r2, r3");													// context id 
       
  2449 	asm("mov r3, r1");													// mutex
       
  2450 	asm("ldr r0, fmwait_lockacquired_trace_header");					// header 
       
  2451 	__JUMP(,r12);
       
  2452 
       
  2453 	asm("fmwait_lockacquiredwait_trace2:");
       
  2454 	// r0=mutex r1=thread r2=scheduler
       
  2455 	asm("ldr r12, [r2, #%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler));
       
  2456 	asm("mov r3, r0");													// mutex
       
  2457 	asm("ldr r0, fmwait_lockacquired_trace_header");					// header
       
  2458 	asm("mov r2, r1");													// context id 
       
  2459 	__JUMP(,r12);
       
  2460 	
       
  2461 	asm("syslock_wait_trace:");
       
  2462 	// r0=scheduler r2=thread
       
  2463 	asm("ldr r12, [r0, #%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler));	
       
  2464 //	asm("mov r2, r2");													// context id 
       
  2465 	asm("add r3, r0, #%a0" : : "i"  _FOFF(TScheduler,iLock));			// mutex
       
  2466 	asm("ldr r0, fmwait_lockacquired_trace_header");					// header 
       
  2467 	__JUMP(,r12);
       
  2468 
       
  2469 	asm("syslock_signal_trace:");
       
  2470 	// r0=scheduler r3=thread
       
  2471 	asm("ldr r12, [r0, #%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler));	
       
  2472 	asm("mov r2, r3");													// context id 
       
  2473 	asm("add r3, r0, #%a0" : : "i"  _FOFF(TScheduler,iLock));			// mutex
       
  2474 	asm("ldr r0, fmsignal_lock_trace_header");							// header
       
  2475 	__JUMP(,r12);
       
  2476 
       
  2477 	}
       
  2478 #endif // BTRACE_FAST_MUTEX