kernel/eka/nkernsmp/arm/ncsched.cia
changeset 0 a41df078684a
child 31 56f325a607ea
equal deleted inserted replaced
-1:000000000000 0:a41df078684a
       
     1 // Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
       
     2 // All rights reserved.
       
     3 // This component and the accompanying materials are made available
       
     4 // under the terms of the License "Eclipse Public License v1.0"
       
     5 // which accompanies this distribution, and is available
       
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
       
     7 //
       
     8 // Initial Contributors:
       
     9 // Nokia Corporation - initial contribution.
       
    10 //
       
    11 // Contributors:
       
    12 //
       
    13 // Description:
       
    14 // e32\nkernsmp\arm\ncsched.cia
       
    15 // 
       
    16 //
       
    17 
       
    18 // NThreadBase member data
       
    19 #define __INCLUDE_NTHREADBASE_DEFINES__
       
    20 
       
    21 // TDfc member data
       
    22 #define __INCLUDE_TDFC_DEFINES__
       
    23 
       
    24 #include <e32cia.h>
       
    25 #include <arm.h>
       
    26 #include "nkern.h"
       
    27 #include <arm_gic.h>
       
    28 #include <arm_scu.h>
       
    29 #include <arm_tmr.h>
       
    30 //#include <highrestimer.h>
       
    31 //#include "emievents.h"
       
    32 
       
    33 #ifdef _DEBUG
       
    34 #define ASM_KILL_LINK(rp,rs)	asm("mov "#rs", #0xdf ");\
       
    35 								asm("orr "#rs", "#rs", "#rs", lsl #8 ");\
       
    36 								asm("orr "#rs", "#rs", "#rs", lsl #16 ");\
       
    37 								asm("str "#rs", ["#rp"] ");\
       
    38 								asm("str "#rs", ["#rp", #4] ");
       
    39 #else
       
    40 #define ASM_KILL_LINK(rp,rs)
       
    41 #endif
       
    42 
       
    43 #define ALIGN_STACK_START			\
       
    44 	asm("mov r12, sp");				\
       
    45 	asm("tst sp, #4");				\
       
    46 	asm("subeq sp, sp, #4");		\
       
    47 	asm("str r12, [sp,#-4]!")
       
    48 
       
    49 #define ALIGN_STACK_END				\
       
    50 	asm("ldr sp, [sp]")
       
    51 
       
    52 
       
    53 //#define __DEBUG_BAD_ADDR
       
    54 
       
    55 extern "C" void NewThreadTrace(NThread* a);
       
    56 extern "C" void send_accumulated_resched_ipis();
       
    57 
       
    58 
       
    59 __NAKED__ void TScheduler::Reschedule()
       
    60 	{
       
    61 	//
       
    62 	// Enter in mode_svc with kernel locked, interrupts can be on or off
       
    63 	// Exit in mode_svc with kernel unlocked, interrupts off
       
    64 	// NOTE: STACK ALIGNMENT UNKNOWN ON ENTRY
       
    65 	// NOTE: R4-R11 are modified
       
    66 	//
       
    67 	asm("mov	r2, sp ");					// bit 0 will be reschedule flag
       
    68 	asm("bic	sp, sp, #4 ");				// align stack
       
    69 	GET_RWNO_TID(,r0)						// r0->TSubScheduler
       
    70 	asm("stmfd	sp!, {r2,lr} ");			// save original SP/resched flag, return address
       
    71 	__ASM_CLI();							// interrupts off
       
    72 	asm("ldr	r1, [r0, #%a0]" : : "i" (_FOFF(TSubScheduler,iDfcPendingFlag)&~3));	// check iDfcPendingFlag and iExIDfcPendingFlag
       
    73 	asm("mov	r11, r0 ");					// r11->TSubScheduler
       
    74 	asm("ldr	r10, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_LocalTimerAddr));	// r10->CPU local timer
       
    75 
       
    76 	asm("start_resched: ");
       
    77 	asm("movs	r1, r1, lsr #16 ");			// check if IDFCs or ExIDFCs pending
       
    78 
       
    79 	asm("blne "	CSM_ZN13TSubScheduler9QueueDfcsEv);		// queue any pending DFCs
       
    80 	asm("ldrb	r1, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag));
       
    81 	asm("ldr	r3, [sp, #0] ");
       
    82 	asm("mrs	r2, spsr ");				// r2=spsr_svc
       
    83 	asm("cmp	r1, #0 ");					// check if a reschedule is required
       
    84 	asm("beq	no_resched_needed ");		// branch out if not
       
    85 	__ASM_STI();							// interrupts back on
       
    86 	asm("orr	r3, r3, #1 ");
       
    87 	asm("str	r3, [sp, #0] ");			// set resched flag
       
    88 	asm("stmfd	sp!, {r0,r2} ");			// store SPSR_SVC
       
    89 	asm("ldr	r5, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
       
    90 #ifdef __CPU_ARMV7
       
    91 	asm("mrc	p14, 6, r7, c1, c0, 0 ");	// r7 = TEEHBR
       
    92 #else
       
    93 	asm("mov	r7, #0 ");
       
    94 #endif
       
    95 	GET_RWRO_TID(,r8);						// r8 = User RO Thread ID
       
    96 	GET_RWRW_TID(,r9);						// r9 = User RW Thread ID
       
    97 #ifdef __CPU_HAS_VFP
       
    98 	VFP_FMRX(,0,VFP_XREG_FPEXC);			// r0 = FPEXC
       
    99 	asm("bic r0, #%a0" : : "i" ((TInt)VFP_FPEXC_EN) ); // Store FPEXC with VFP disabled in case this thread runs on a different core next time
       
   100 #else
       
   101 	asm("mov	r0, #0 ");
       
   102 #endif
       
   103 	GET_CAR(,	r1);						// r1 = CAR
       
   104 	asm("mrc	p15, 0, r12, c3, c0, 0 ");	// r12 = DACR
       
   105 	asm("ldr	r4, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iInitialThread));
       
   106 
       
   107 	// Save auxiliary registers
       
   108 	// R0=FPEXC, R1=CAR, R7=TEEHBR, R8=RWROTID, R9=RWRWTID, R12=DACR
       
   109 	asm("sub	sp, sp, #%a0" : : "i" _FOFF(SThreadReschedStack,iSpare));
       
   110 	asm("str	sp, [r5, #%a0]" : : "i" _FOFF(NThread,iSavedSP));	// store original thread's stack pointer
       
   111 	asm("stmia	sp, {r0-r1,r7-r9,r12} ");
       
   112 
       
   113 	// We must move to a temporary stack before selecting the next thread.
       
   114 	// This is because another CPU may begin executing this thread before the
       
   115 	// select_next_thread() function returns and our stack would then be
       
   116 	// corrupted. We use the stack belonging to this CPU's initial thread since
       
   117 	// we are guaranteed that will never run on another CPU.
       
   118 	asm("ldr	sp, [r4, #%a0]" : : "i" _FOFF(NThread,iSavedSP));
       
   119 
       
   120 	asm("select_thread: ");
       
   121 	asm("mov	r0, r11 ");
       
   122 	asm("bl "	CSM_ZN13TSubScheduler16SelectNextThreadEv );	// also sets r0->iCurrentThread
       
   123 #ifdef BTRACE_CPU_USAGE
       
   124 	asm("ldr	r2, __BTraceFilter ");
       
   125 #endif
       
   126 	asm("movs	r3, r0 ");					// r3 = new thread (might be 0)
       
   127 	asm("ldrne	sp, [r0, #%a0]" : : "i" _FOFF(NThread,iSavedSP));	// if a thread has been selected, move to its stack
       
   128 	asm("beq	no_thread ");				// branch out if no thread is ready
       
   129 
       
   130 #ifdef BTRACE_CPU_USAGE
       
   131 	asm("ldrb	r1, [r2, #4] ");			// check category 4 trace
       
   132 	asm("cmp	r1, #0 ");
       
   133 	asm("beq	1f ");
       
   134 	asm("stmfd	sp!, {r0-r3} ");
       
   135 	asm("bl		NewThreadTrace ");
       
   136 	asm("ldmfd	sp!, {r0-r3} ");
       
   137 	asm("1: ");
       
   138 #endif	// BTRACE_CPU_USAGE
       
   139 
       
   140 	asm("cmp	r3, r5 ");					// same thread?
       
   141 	asm("beq	same_thread ");
       
   142 	asm("ldrb	r1, [r3, #%a0]" : : "i" _FOFF(NThreadBase, i_ThrdAttr));
       
   143 	asm("ldr	r4, [r11, #%a0]" : : "i" _FOFF(TSubScheduler, iScheduler));
       
   144 	asm("mov	r2, r3, lsr #6 ");			// r2=current thread>>6
       
   145 	asm("tst	r1, #%a0" : : "i" ((TInt)KThreadAttAddressSpace));	// address space required?
       
   146 	asm("ldrne	r4, [r4, #%a0]" : : "i" _FOFF(TScheduler,iProcessHandler));	// if so, get pointer to process handler
       
   147 
       
   148 	// we are doing a thread switch so restore new thread's auxiliary registers
       
   149 	// R0=FPEXC, R1=CAR, R7=TEEHBR, R8=RWROTID, R9=RWRWTID, R12=DACR
       
   150 	asm("ldmia	sp, {r0-r1,r7-r9,r12} ");
       
   151 
       
   152 #ifdef __CPU_ARMV7
       
   153 	asm("mcr	p14, 6, r7, c1, c0, 0 ");	// r7 = TEEHBR
       
   154 #endif
       
   155 	SET_RWRO_TID(,r8);						// r8 = User RO Thread ID
       
   156 	SET_RWRW_TID(,r9);						// r9 = User RW Thread ID
       
   157 #ifdef __CPU_HAS_VFP
       
   158 	VFP_FMXR(,VFP_XREG_FPEXC,0);			// r0 = FPEXC
       
   159 #endif
       
   160 	SET_CAR(,	r1);						// r1 = CAR
       
   161 	asm("mcr	p15, 0, r12, c3, c0, 0 ");	// r12 = DACR
       
   162 
       
   163 	asm("beq	no_as_switch ");			// skip if address space change not required
       
   164 
       
   165 	// Do address space switching
       
   166 	// Handler called with:
       
   167 	// r11->subscheduler, r3->current thread
       
   168 	// r9->new address space, r5->old address space
       
   169 	// Must preserve r10,r11,r3, can modify other registers
       
   170 	asm("ldr	r5, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iAddressSpace));	// get current address space ptr
       
   171 	asm("ldr	r9, [r3, #%a0]" : : "i" _FOFF(NThreadBase,iAddressSpace));		// get new address space ptr
       
   172 	asm("adr	lr, as_switch_return ");
       
   173 	__JUMP(,	r4);
       
   174 
       
   175 	asm("no_as_switch: ");
       
   176 	asm("mrc	p15, 0, r4, c13, c0, 1 ");	// r4 = CONTEXTID (threadID:ASID)
       
   177 	asm("and	r4, r4, #0xff ");			// isolate ASID
       
   178 	asm("orr	r2, r4, r2, lsl #8 ");		// r2 = new ContextID (new thread ID : ASID)
       
   179 	__DATA_SYNC_BARRIER_Z__(r12);			// needed before change to ContextID
       
   180 	asm("mcr	p15, 0, r2, c13, c0, 1 ");	// set ContextID (ASID + debugging thread ID)
       
   181 	__INST_SYNC_BARRIER__(r12);
       
   182 #ifdef __CPU_NEEDS_BTAC_FLUSH_AFTER_ASID_CHANGE
       
   183 	asm("mcr	p15, 0, r12, c7, c5, 6 ");	// flush BTAC
       
   184 #endif
       
   185 
       
   186 	asm("as_switch_return: ");
       
   187 	asm("same_thread: ");
       
   188 	asm("add	sp, sp, #%a0" : : "i" _FOFF(SThreadReschedStack,iSpare));	// step past auxiliary registers
       
   189  	asm("ldmib	sp!, {r2,r12} ");			// r2=SPSR_SVC, r12=original SP + resched flag
       
   190 	__ASM_CLI();							// interrupts off
       
   191 	asm("ldr	r1, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag));
       
   192 	asm("msr	spsr, r2 ");				// restore spsr_svc
       
   193 	asm("mov	r0, r11 ");
       
   194 	asm("mov	r2, r12 ");					// r2 = original SP + reschedule flag
       
   195 	asm("cmp	r1, #0 ");					// check for more IDFCs and/or another reschedule
       
   196 	asm("bne	start_resched ");			// loop if required
       
   197 	asm("ldr	r14, [r3, #%a0]" : : "i" _FOFF(NThreadBase,iCsFunction));
       
   198 	asm("ldr	r12, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedIPIs));
       
   199 	asm("str	r1, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
       
   200 	asm("cmp	r14, #%a0" : : "i" ((TInt)NThreadBase::ECSDivertPending));
       
   201 	asm("ldr	lr, [sp, #4] ");			// restore R10, R11, return address
       
   202 	asm("bic	sp, r2, #3 ");				// restore initial unaligned stack pointer
       
   203 	asm("and	r2, r2, #1 ");				// r2 = reschedule flag
       
   204 	asm("beq	resched_thread_divert ");
       
   205 
       
   206 	// Return with:	R0=&SubScheduler, R1=0, R2=TRUE if reschedule occurred, R3=iCurrentThread
       
   207 	//				R12=iReschedIPIs
       
   208 	__JUMP(,	lr);
       
   209 
       
   210 	asm("no_resched_needed: ");
       
   211 	asm("ldr	r3, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
       
   212 	asm("ldr	r12, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedIPIs));
       
   213 	asm("mov	r0, r11 ");
       
   214 	asm("str	r1, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
       
   215 	asm("ldr	r2, [r3, #%a0]" : : "i" _FOFF(NThreadBase,iCsFunction));
       
   216 	asm("cmp	r2, #%a0" : : "i" ((TInt)NThreadBase::ECSDivertPending));
       
   217 	asm("ldmfd	sp, {r2,lr} ");				// r2 = original SP + reschedule flag, restore lr
       
   218 	asm("bic	sp, r2, #3 ");				// restore initial unaligned stack pointer
       
   219 	asm("and	r2, r2, #1 ");				// r2 = reschedule flag
       
   220 	asm("beq	resched_thread_divert ");
       
   221 
       
   222 	// Return with:	R0=&SubScheduler, R1=0, R2=TRUE if reschedule occurred, R3=iCurrentThread
       
   223 	//				R12=iReschedIPIs
       
   224 	__JUMP(,	lr);
       
   225 
       
   226 	asm("resched_thread_divert: ");
       
   227 	asm("mov	r1, #1 ");
       
   228 	asm("str	r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
       
   229 	asm("bic	sp, sp, #4 ");				// align stack
       
   230 	asm("stmfd	sp!, {r0-r5,r12,lr} ");		// save registers for diagnostic purposes
       
   231 	asm("mov	r4, r3 ");					// don't really need to bother about registers since thread is exiting
       
   232 
       
   233 	// need to send any outstanding reschedule IPIs
       
   234 	asm("cmp	r12, #0 ");
       
   235 	asm("blne " CSM_CFUNC(send_accumulated_resched_ipis));
       
   236 
       
   237 	__ASM_STI();
       
   238 	asm("ldrb	r1, [r4, #%a0]" : : "i" _FOFF(NThreadBase,iFastMutexDefer));
       
   239 	asm("cmp	r1, #1 ");
       
   240 	asm("bne	1f ");
       
   241 	__ASM_CRASH();
       
   242 	asm("1: ");
       
   243 	asm("mov	r2, #0 ");
       
   244 	asm("strb	r2, [r4, #%a0]" : : "i" _FOFF(NThreadBase,iFastMutexDefer));
       
   245 	asm("mov	r0, r4 ");
       
   246 	asm("bl "	CSM_ZN11NThreadBase4ExitEv );
       
   247 	__ASM_CRASH();	// shouldn't get here
       
   248 
       
   249 	// There is no thread ready to run
       
   250 	// R11->TSubScheduler, R1=unknown, R2=0, R3=__BTraceFilter, R12=unknown
       
   251 	asm("no_thread: ");
       
   252 	__ASM_CLI();
       
   253 	asm("ldr	r12, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedIPIs));
       
   254 	asm("mov	r0, r11 ");
       
   255 	asm("cmp	r12, #0 ");
       
   256 	asm("blne " CSM_CFUNC(send_accumulated_resched_ipis));
       
   257 	__ASM_STI();
       
   258 	__DATA_SYNC_BARRIER_Z__(r1);
       
   259 	ARM_WFI;
       
   260 	asm("no_thread2: ");
       
   261 	asm("ldr	r1, [r11, #%a0]" : : "i" (_FOFF(TSubScheduler,iDfcPendingFlag)&~3));	// check iDfcPendingFlag and iExIDfcPendingFlag
       
   262 	asm("mov	r0, r11 ");
       
   263 	asm("movs	r1, r1, lsr #16 ");
       
   264 	asm("beq	no_thread ");
       
   265 	asm("bl "	CSM_ZN13TSubScheduler9QueueDfcsEv);		// queue any pending DFCs
       
   266 	asm("ldrb	r1, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag));
       
   267 	asm("cmp	r1, #0 ");					// check if a reschedule is required
       
   268 	asm("beq	no_thread2 ");
       
   269 	asm("b		select_thread ");
       
   270 
       
   271 
       
   272 
       
   273 /******************************************************************************
       
   274  Missed out stuff:
       
   275 	EMI EVENT LOGGING
       
   276 	__CPU_ARM1136_ERRATUM_351912_FIXED
       
   277 	Debug hooks in the scheduler
       
   278  ******************************************************************************/
       
   279 
       
   280 	asm("__BTraceFilter: ");
       
   281 	asm(".word %a0 " : : "i" ((TInt)&BTraceData.iFilter[0]));
       
   282 	};
       
   283 
       
   284 
       
   285 /** 
       
   286  * Returns the range of linear memory which inserting the scheduler hooks needs to modify.
       
   287  * 
       
   288  * @param aStart Set to the lowest memory address which needs to be modified.
       
   289  * @param aEnd   Set to the highest memory address +1 which needs to be modified.
       
   290 
       
   291  @pre	Kernel must be locked.
       
   292  @pre	Call in a thread context.
       
   293  @pre	Interrupts must be enabled.
       
   294  */
       
   295 EXPORT_C __NAKED__ void NKern::SchedulerHooks(TLinAddr& aStart, TLinAddr& aEnd)
       
   296 	{
       
   297 #if 0
       
   298 	ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC);
       
   299 #ifdef __DEBUGGER_SUPPORT__
       
   300 	asm("adr r2,resched_trampoline_hook_address");
       
   301 	asm("str r2,[r0]");
       
   302 	asm("adr r2,resched_trampoline_hook_address+4");
       
   303 	asm("str r2,[r1]");
       
   304 #else
       
   305 	asm("mov r2,#0");
       
   306 	asm("str r2,[r0]");
       
   307 	asm("str r2,[r1]");
       
   308 #endif
       
   309 #endif
       
   310 	__JUMP(,lr);
       
   311 	};
       
   312 
       
   313 
       
   314 /** 
       
   315  * Modifies the scheduler code so that it can call the function set by
       
   316  * NKern::SetRescheduleCallback().
       
   317  *
       
   318  * This requires that the region of memory indicated by NKern::SchedulerHooks() is writable.
       
   319 
       
   320  @pre	Kernel must be locked.
       
   321  @pre	Call in a thread context.
       
   322  @pre	Interrupts must be enabled.
       
   323  */
       
   324 EXPORT_C __NAKED__ void NKern::InsertSchedulerHooks()
       
   325 	{
       
   326 #if 0
       
   327 	ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC);
       
   328 #ifdef __DEBUGGER_SUPPORT__
       
   329 	asm("adr r0,resched_trampoline_hook_address");
       
   330 	asm("adr r1,resched_trampoline");
       
   331 	asm("sub r1, r1, r0");
       
   332 	asm("sub r1, r1, #8");
       
   333 	asm("mov r1, r1, asr #2");
       
   334 	asm("add r1, r1, #0xea000000");  // r1 = a branch instruction from resched_trampoline_hook_address to resched_trampoline
       
   335 
       
   336 #if defined(__CPU_MEMORY_TYPE_REMAPPING)
       
   337 	// These platforms have shadow memory in non-writable page. We cannot use the standard
       
   338 	// Epoc::CopyToShadowMemory interface as we hold Kernel lock here.
       
   339 	// Instead, we'll temporarily disable access permission checking in MMU by switching
       
   340 	// domain#0 into Manager Mode (see Domain Access Control Register).
       
   341 	asm("mrs r12, CPSR ");				// save cpsr setting and ...
       
   342 	CPSIDAIF;							// ...disable interrupts
       
   343 	asm("mrc p15, 0, r2, c3, c0, 0 ");	// read DACR
       
   344 	asm("orr r3, r2, #3");				// domain #0 is the first two bits. manager mode is 11b
       
   345 	asm("mcr p15, 0, r3, c3, c0, 0 ");	// write DACR
       
   346 	asm("str r1,[r0]");
       
   347 	asm("mcr p15, 0, r2, c3, c0, 0 ");	// write back the original value of DACR
       
   348 	asm("msr CPSR_cxsf, r12 "); 		// restore cpsr setting (re-enable interrupts)
       
   349 #else
       
   350 	asm("str r1,[r0]");
       
   351 #endif
       
   352 
       
   353 #endif
       
   354 #endif
       
   355 	__JUMP(,lr);
       
   356 	};
       
   357 
       
   358 
       
   359 /** 
       
   360  * Reverts the modification of the Scheduler code performed by NKern::InsertSchedulerHooks()
       
   361  *
       
   362  * This requires that the region of memory indicated by NKern::SchedulerHooks() is writable.
       
   363 
       
   364  @pre	Kernel must be locked.
       
   365  @pre	Call in a thread context.
       
   366  @pre	Interrupts must be enabled.
       
   367  */
       
   368 EXPORT_C __NAKED__ void NKern::RemoveSchedulerHooks()
       
   369 	{
       
   370 #if 0
       
   371 	ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC);
       
   372 #ifdef __DEBUGGER_SUPPORT__
       
   373 	asm("adr r0,resched_trampoline_hook_address");
       
   374 	asm("ldr r1,resched_trampoline_unhook_data");
       
   375 
       
   376 #if defined(__CPU_MEMORY_TYPE_REMAPPING)
       
   377 	// See comments above in InsertSchedulerHooks
       
   378 	asm("mrs r12, CPSR ");				// save cpsr setting and ...
       
   379 	CPSIDAIF;							// ...disable interrupts
       
   380 	asm("mrc p15, 0, r2, c3, c0, 0 ");	// read DACR
       
   381 	asm("orr r3, r2, #3");				// domain #0 is the first two bits. manager mode is 11b
       
   382 	asm("mcr p15, 0, r3, c3, c0, 0 ");	// write DACR
       
   383 	asm("str r1,[r0]");
       
   384 	asm("mcr p15, 0, r2, c3, c0, 0 ");	// write back the original value of DACR
       
   385 	asm("msr CPSR_cxsf, r12 "); 		// restore cpsr setting (re-enable interrupts)
       
   386 #else
       
   387 	asm("str r1,[r0]");
       
   388 #endif
       
   389 
       
   390 #endif
       
   391 #endif
       
   392 	__JUMP(,lr);
       
   393 	};
       
   394 
       
   395 
       
   396 /** 
       
   397  * Set the function which is to be called on every thread reschedule.
       
   398  *
       
   399  * @param aCallback  Pointer to callback function, or NULL to disable callback.
       
   400 
       
   401  @pre	Kernel must be locked.
       
   402  @pre	Call in a thread context.
       
   403  @pre	Interrupts must be enabled.
       
   404  */
       
   405 EXPORT_C __NAKED__ void NKern::SetRescheduleCallback(TRescheduleCallback /*aCallback*/)
       
   406 	{
       
   407 #if 0
       
   408 	ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC);
       
   409 #ifdef __DEBUGGER_SUPPORT__
       
   410 	asm("ldr r1, __TheScheduler ");
       
   411 	asm("str r0, [r1, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleHook));
       
   412 #endif
       
   413 #endif
       
   414 	__JUMP(,lr);
       
   415 	};
       
   416 
       
   417 
       
   418 
       
   419 /** Disables interrupts to specified level.
       
   420 
       
   421 	Note that if we are not disabling all interrupts we must lock the kernel
       
   422 	here, otherwise a high priority interrupt which is still enabled could
       
   423 	cause a reschedule and the new thread could then reenable interrupts.
       
   424 
       
   425 	@param  aLevel Interrupts are disbabled up to and including aLevel.  On ARM,
       
   426 			level 1 stands for IRQ only and level 2 stands for IRQ and FIQ.
       
   427 	@return CPU-specific value passed to RestoreInterrupts.
       
   428 
       
   429 	@pre 1 <= aLevel <= maximum level (CPU-specific)
       
   430 
       
   431 	@see NKern::RestoreInterrupts()
       
   432  */
       
   433 EXPORT_C __NAKED__ TInt NKern::DisableInterrupts(TInt /*aLevel*/)
       
   434 	{
       
   435 #ifdef __FIQ_IS_UNCONTROLLED__
       
   436 	asm("mrs	r1, cpsr ");
       
   437 	asm("cmp	r0, #0 ");
       
   438 	asm("beq	1f ");
       
   439 	__ASM_CLI();
       
   440 	asm("1: ");
       
   441 	asm("and	r0, r1, #%a0" : : "i" ((TInt)KAllInterruptsMask));	/* return original CPSR I and F bits */
       
   442 	__JUMP(,	lr);
       
   443 #else
       
   444 	asm("cmp	r0, #1 ");
       
   445 	asm("bhi "	CSM_ZN5NKern20DisableAllInterruptsEv);	// if level>1, disable all
       
   446 	asm("mrs	r2, cpsr ");			// r2=original CPSR
       
   447 	asm("bcc	1f ");					// skip if level=0
       
   448 	__ASM_CLI();						// Disable all interrupts to prevent migration
       
   449 	GET_RWNO_TID(,r12);					// r12 -> TSubScheduler
       
   450 	asm("ldr	r3, [r12, #%a0]!" : : "i" _FOFF(TSubScheduler,iKernLockCount));
       
   451 	asm("and	r0, r2, #0xc0 ");
       
   452 	asm("cmp	r3, #0 ");				// test if kernel locked
       
   453 	asm("addeq	r3, r3, #1 ");			// if not, lock the kernel
       
   454 	asm("streq	r3, [r12] ");
       
   455 	asm("orreq	r0, r0, #0x80000000 ");	// and set top bit to indicate kernel locked
       
   456 	__ASM_STI2();						// reenable FIQs only
       
   457 	__JUMP(,	lr);
       
   458 	asm("1: ");
       
   459 	asm("and	r0, r2, #0xc0 ");
       
   460 	__JUMP(,	lr);
       
   461 #endif
       
   462 	}
       
   463 
       
   464 
       
   465 /** Disables all interrupts (e.g. both IRQ and FIQ on ARM). 
       
   466 
       
   467 	@return CPU-specific value passed to NKern::RestoreInterrupts().
       
   468 
       
   469 	@see NKern::RestoreInterrupts()
       
   470  */
       
   471 EXPORT_C __NAKED__ TInt NKern::DisableAllInterrupts()
       
   472 	{
       
   473 	asm("mrs r1, cpsr ");
       
   474 	asm("and r0, r1, #%a0" : : "i" ((TInt)KAllInterruptsMask));	/* return original CPSR I and F bits */
       
   475 	__ASM_CLI();
       
   476 	__JUMP(,lr);
       
   477 	}
       
   478 
       
   479 
       
   480 /** Enables all interrupts (e.g. IRQ and FIQ on ARM).
       
   481 
       
   482 	This function never unlocks the kernel.  So it must be used
       
   483 	only to complement NKern::DisableAllInterrupts. Never use it
       
   484 	to complement NKern::DisableInterrupts.
       
   485 
       
   486 	@see NKern::DisableInterrupts()
       
   487 	@see NKern::DisableAllInterrupts()
       
   488 
       
   489 	@internalComponent
       
   490  */
       
   491 EXPORT_C __NAKED__ void NKern::EnableAllInterrupts()
       
   492 	{
       
   493 	__ASM_STI();
       
   494 	__JUMP(,lr);
       
   495 	}
       
   496 
       
   497 
       
   498 /** Restores interrupts to previous level and unlocks the kernel if it was 
       
   499 	locked when disabling them.
       
   500 
       
   501 	@param 	aRestoreData CPU-specific data returned from NKern::DisableInterrupts
       
   502 			or NKern::DisableAllInterrupts specifying the previous interrupt level.
       
   503 
       
   504 	@see NKern::DisableInterrupts()
       
   505 	@see NKern::DisableAllInterrupts()
       
   506  */
       
   507 EXPORT_C __NAKED__ void NKern::RestoreInterrupts(TInt /*aRestoreData*/)
       
   508 	{
       
   509 	asm("tst r0, r0 ");					// test state of top bit of aLevel
       
   510 	asm("mrs r1, cpsr ");
       
   511 	asm("and r0, r0, #%a0" : : "i" ((TInt)KAllInterruptsMask));
       
   512 	asm("bic r1, r1, #%a0" : : "i" ((TInt)KAllInterruptsMask));
       
   513 	asm("orr r1, r1, r0 ");				// replace I and F bits with those supplied
       
   514 	asm("msr cpsr_c, r1 ");				// flags are unchanged (in particular N)
       
   515 	__JUMP(pl,lr);						// if top bit of aLevel clear, finished
       
   516 
       
   517 	// if top bit of aLevel set, fall through to unlock the kernel
       
   518 	}
       
   519 
       
   520 
       
   521 /**	Unlocks the kernel.
       
   522 
       
   523 	Decrements iKernLockCount for current CPU; if it becomes zero and IDFCs
       
   524 	or a reschedule are	pending, calls the scheduler to process them.
       
   525 	Must be called in mode_svc.
       
   526 
       
   527     @pre    Call either in a thread or an IDFC context.
       
   528     @pre    Do not call from an ISR.
       
   529  */
       
   530 EXPORT_C __NAKED__ void NKern::Unlock()
       
   531 	{
       
   532 	ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR);
       
   533 
       
   534 	GET_RWNO_TID(,r0)						// r0=&SubScheduler()
       
   535 	__ASM_CLI();							// interrupts off
       
   536 	asm("ldr	r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
       
   537 	asm("ldr	r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedIPIs));
       
   538 	asm("ldr	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag));
       
   539 	asm("subs	r3, r1, #1 ");
       
   540 	asm("strne	r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
       
   541 	asm("bne	0f ");						// kernel still locked -> return
       
   542 	asm("cmp	r2, #0 ");					// check for DFCs or reschedule
       
   543 	asm("bne	1f ");
       
   544 	asm("cmp	r12, #0 ");					// IPIs outstanding?
       
   545 	asm("str	r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));	// unlock the kernel
       
   546 	asm("bne	2f ");
       
   547 	asm("0: ");
       
   548 	__ASM_STI();							// interrupts back on
       
   549 	__JUMP(,lr);
       
   550 
       
   551 	// need to run IDFCs and/or reschedule
       
   552 	asm("1: ");
       
   553 	asm("stmfd	sp!, {r0,r4-r11,lr} ");
       
   554 	asm("bl "	CSM_ZN10TScheduler10RescheduleEv );
       
   555 	asm(".global nkern_unlock_resched_return ");
       
   556 	asm("nkern_unlock_resched_return: ");
       
   557 
       
   558 	// need to send any outstanding reschedule IPIs
       
   559 	asm("cmp	r12, #0 ");
       
   560 	asm("blne " CSM_CFUNC(send_accumulated_resched_ipis));
       
   561 	asm("ldmfd	sp!, {r0,r4-r11,lr} ");
       
   562 	__ASM_STI();
       
   563 	__JUMP(,lr);
       
   564 
       
   565 	asm("2:		");
       
   566 	asm("stmfd	sp!, {r0,lr} ");
       
   567 	asm("bl "	CSM_CFUNC(send_accumulated_resched_ipis));
       
   568 	asm("ldmfd	sp!, {r0,lr} ");
       
   569 	__ASM_STI();
       
   570 	__JUMP(,lr);
       
   571 	}
       
   572 
       
   573 
       
   574 /**	Locks the kernel.
       
   575 
       
   576 	Increments iKernLockCount for the current CPU, thereby deferring IDFCs
       
   577 	and preemption.	Must be called in mode_svc.
       
   578 
       
   579     @pre    Call either in a thread or an IDFC context.
       
   580     @pre    Do not call from an ISR.
       
   581  */
       
   582 EXPORT_C __NAKED__ void NKern::Lock()
       
   583 	{
       
   584 	ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR);
       
   585 
       
   586 	__ASM_CLI();
       
   587 	GET_RWNO_TID(,r12);
       
   588 	asm("ldr r3, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
       
   589 	asm("add r3, r3, #1 ");			// lock the kernel
       
   590 	asm("str r3, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
       
   591 	__ASM_STI();
       
   592 	__JUMP(,lr);
       
   593 	}
       
   594 
       
   595 
       
   596 /**	Locks the kernel and returns a pointer to the current thread.
       
   597 
       
   598 	Increments iKernLockCount for the current CPU, thereby deferring IDFCs
       
   599 	and preemption.	Must be called in mode_svc.
       
   600 
       
   601     @pre    Call either in a thread or an IDFC context.
       
   602     @pre    Do not call from an ISR.
       
   603  */
       
   604 EXPORT_C __NAKED__ NThread* NKern::LockC()
       
   605 	{
       
   606 	ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR);
       
   607 
       
   608 	__ASM_CLI();
       
   609 	GET_RWNO_TID(,r12);
       
   610 	asm("ldr r3, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
       
   611 	asm("ldr r0, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
       
   612 	asm("add r3, r3, #1 ");			// lock the kernel
       
   613 	asm("str r3, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
       
   614 	__ASM_STI();
       
   615 	__JUMP(,lr);
       
   616 	}
       
   617 
       
   618 
       
   619 /**	Allows IDFCs and rescheduling if they are pending.
       
   620 
       
   621 	If IDFCs or a reschedule are pending and iKernCSLocked is exactly equal to 1
       
   622 	calls the scheduler to process the IDFCs and possibly reschedule.
       
   623 	Must be called in mode_svc.
       
   624 
       
   625 	@return	Nonzero if a reschedule actually occurred, zero if not.
       
   626 
       
   627     @pre    Call either in a thread or an IDFC context.
       
   628     @pre    Do not call from an ISR.
       
   629  */
       
   630 EXPORT_C __NAKED__ TInt NKern::PreemptionPoint()
       
   631 	{
       
   632 	ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR);
       
   633 
       
   634 	GET_RWNO_TID(,r0)						// r0=&SubScheduler()
       
   635 	asm("ldr	r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
       
   636 	asm("ldr	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag));
       
   637 	asm("ldr	r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedIPIs));
       
   638 	asm("cmp	r1, #1 ");
       
   639 	asm("bgt	0f ");						// if locked more than once return FALSE
       
   640 	asm("cmp	r2, #0 ");					// locked once and IDFCs/reschedule pending?
       
   641 	asm("bne	1f ");						// skip if so
       
   642 	asm("cmp	r12, #0 ");					// locked once and resched IPIs outstanding?
       
   643 	asm("bne	2f ");						// skip if so
       
   644 	asm("0:		");
       
   645 	asm("mov	r0, #0 ");
       
   646 	__JUMP(,	lr);						// else return FALSE
       
   647 
       
   648 	// need to run IDFCs and/or reschedule
       
   649 	asm("1:		");
       
   650 	asm("stmfd	sp!, {r1,r4-r11,lr} ");
       
   651 	asm("bl "	CSM_ZN10TScheduler10RescheduleEv );
       
   652 	asm(".global nkern_preemption_point_resched_return ");
       
   653 	asm("nkern_preemption_point_resched_return: ");
       
   654 	asm("str	r2, [sp] ");
       
   655 	asm("mov	r2, #1 ");
       
   656 	asm("str	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
       
   657 
       
   658 	// need to send any outstanding reschedule IPIs
       
   659 	asm("cmp	r12, #0 ");
       
   660 	asm("blne " CSM_CFUNC(send_accumulated_resched_ipis));
       
   661 	asm("ldmfd	sp!, {r0,r4-r11,lr} ");		// return TRUE if reschedule occurred
       
   662 	__ASM_STI();
       
   663 	__JUMP(,	lr);
       
   664 
       
   665 	asm("2:		");
       
   666 	asm("stmfd	sp!, {r2,lr} ");
       
   667 	asm("bl "	CSM_CFUNC(send_accumulated_resched_ipis));
       
   668 	asm("ldmfd	sp!, {r0,lr} ");			// return TRUE if reschedule occurred
       
   669 	__ASM_STI();
       
   670 	__JUMP(,	lr);
       
   671 	}
       
   672 
       
   673 
       
   674 #ifdef __CPU_HAS_VFP
       
   675 // Do the actual VFP context save
       
   676 __NAKED__ void VfpContextSave(void*)
       
   677 	{
       
   678 	VFP_FMRX(,1,VFP_XREG_FPEXC);
       
   679 	asm("tst	r1, #%a0" : : "i" ((TInt)VFP_FPEXC_EN) );	// Check to see if VFP in use
       
   680 	__JUMP(eq,	lr);										// Return immediately if not
       
   681 
       
   682 	VFP_FMRX(,2,VFP_XREG_FPSCR);
       
   683 	asm("stmia	r0!, {r2} ");								// Save FPSCR
       
   684 
       
   685 #ifndef __VFP_V3
       
   686 	VFP_FMRX(,2,VFP_XREG_FPINST);
       
   687 	VFP_FMRX(,3,VFP_XREG_FPINST2);
       
   688 	asm("stmia	r0!, {r2-r3} ");							// Save FPINST, FPINST2
       
   689 #endif
       
   690 
       
   691 	VFP_FSTMIADW(CC_AL,0,0,16);								// Save D0 - D15
       
   692 
       
   693 #ifdef __VFP_V3
       
   694 	VFP_FMRX(,2,VFP_XREG_MVFR0);
       
   695 	asm("tst r2, #%a0" : : "i" ((TInt)VFP_MVFR0_ASIMD32));	// Check to see if all 32 Advanced SIMD registers are present
       
   696 	asm("beq 0f ");											// Skip ahead if not
       
   697 	GET_CAR(,r2);
       
   698 	asm("tst r2, #%a0" : : "i" ((TInt)VFP_CPACR_D32DIS));	// Check to see if access to the upper 16 registers is disabled
       
   699 	VFP_FSTMIADW(CC_EQ,0,16,16);							// If not then save D16 - D31
       
   700 #endif
       
   701 
       
   702 	asm("0: ");
       
   703 	asm("bic r1, #%a0" : : "i" ((TInt)VFP_FPEXC_EN) );
       
   704 	VFP_FMXR(,VFP_XREG_FPEXC,1);							// Disable VFP
       
   705 
       
   706 	__JUMP(,lr);
       
   707 	}
       
   708 #endif
       
   709 
       
   710 
       
   711 /** Check if the kernel is locked the specified number of times.
       
   712 
       
   713 	@param aCount	The number of times the kernel should be locked
       
   714 					If zero, tests if it is locked at all
       
   715 	@return TRUE if the tested condition is true.
       
   716 
       
   717 	@internalTechnology
       
   718 */
       
   719 EXPORT_C __NAKED__ TBool NKern::KernelLocked(TInt /*aCount*/)
       
   720 	{
       
   721 	asm("mrs	r12, cpsr ");
       
   722 	__ASM_CLI();
       
   723 	GET_RWNO_TID(,r3);
       
   724 	asm("movs	r1, r0 ");			// r1 = aCount
       
   725 	asm("ldr	r0, [r3, #%a0]" : : "i" _FOFF(TSubScheduler, iKernLockCount));
       
   726 	asm("moveq	r1, r0 ");			// if aCount=0, aCount=iKernLockCount
       
   727 	asm("cmp	r1, r0 ");			//
       
   728 	asm("movne	r0, #0 ");			// if aCount!=iKernLockCount, return FALSE else return iKernLockCount
       
   729 	asm("msr	cpsr, r12 ");
       
   730 	__JUMP(,lr);
       
   731 	}
       
   732 
       
   733 
       
   734 // Only call this if thread migration is disabled, i.e.
       
   735 // interrupts disabled, kernel locked or current thread in 'freeze cpu' mode
       
   736 extern "C" __NAKED__ TSubScheduler& SubScheduler()
       
   737 	{
       
   738 	GET_RWNO_TID(,r0);
       
   739 	__JUMP(,lr);
       
   740 	}
       
   741 
       
   742 /** Returns the NThread control block for the currently scheduled thread.
       
   743 
       
   744     Note that this is the calling thread if called from a thread context, or the
       
   745 	interrupted thread if called from an interrupt context.
       
   746 	
       
   747 	@return A pointer to the NThread for the currently scheduled thread.
       
   748 	
       
   749 	@pre Call in any context.
       
   750 */
       
   751 EXPORT_C __NAKED__ NThread* NKern::CurrentThread()
       
   752 	{
       
   753 	asm("mrs	r12, cpsr ");
       
   754 	__ASM_CLI();
       
   755 	GET_RWNO_TID(,r0);
       
   756 	asm("cmp	r0, #0 ");
       
   757 	asm("ldrne	r0, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
       
   758 	asm("msr	cpsr, r12 ");
       
   759 	__JUMP(,lr);
       
   760 	}
       
   761 
       
   762 
       
   763 /** Returns the NThread control block for the currently scheduled thread.
       
   764 
       
   765     Note that this is the calling thread if called from a thread context, or the
       
   766 	interrupted thread if called from an interrupt context.
       
   767 	
       
   768 	@return A pointer to the NThread for the currently scheduled thread.
       
   769 	
       
   770 	@pre Call with migration disabled - i.e. from an ISR, IDFC, with interrupts
       
   771 			disabled or with preemption disabled.
       
   772 */
       
   773 extern "C" __NAKED__ NThread* NCurrentThreadL()
       
   774 	{
       
   775 	GET_RWNO_TID(,r0);
       
   776 	asm("ldr	r0, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
       
   777 	__JUMP(,lr);
       
   778 	}
       
   779 
       
   780 
       
   781 /** Returns the CPU number of the calling CPU.
       
   782 
       
   783 	@return the CPU number of the calling CPU.
       
   784 	
       
   785 	@pre Call in any context.
       
   786 */
       
   787 EXPORT_C __NAKED__ TInt NKern::CurrentCpu()
       
   788 	{
       
   789 	asm("mrs	r12, cpsr ");
       
   790 	__ASM_CLI();
       
   791 	GET_RWNO_TID(,r0);
       
   792 	asm("cmp	r0, #0 ");
       
   793 	asm("ldrne	r0, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iCpuNum));
       
   794 	asm("msr	cpsr, r12 ");
       
   795 	__JUMP(,lr);
       
   796 	}
       
   797 
       
   798 
       
   799 /**	Returns the current processor context type (thread, IDFC or interrupt).
       
   800 
       
   801 	@return	A value from NKern::TContext enumeration (but never EEscaped).
       
   802 	
       
   803 	@pre	Call in any context.
       
   804 
       
   805 	@see	NKern::TContext
       
   806  */
       
   807 EXPORT_C __NAKED__ TInt NKern::CurrentContext()
       
   808 	{
       
   809 	asm("mrs r1, cpsr ");
       
   810 	__ASM_CLI();							// interrupts off to stop migration
       
   811 	GET_RWNO_TID(,r3);						// r3 = &SubScheduler()
       
   812 	asm("mov r0, #2 ");						// 2 = interrupt
       
   813 	asm("and r2, r1, #0x1f ");				// r1 = mode
       
   814 	asm("cmp r2, #0x13 ");
       
   815 	asm("ldreqb r0, [r3, #%a0]" : : "i" _FOFF(TSubScheduler,iInIDFC));
       
   816 	asm("bne 0f ");							// if not svc, must be interrupt
       
   817 	asm("cmp r0, #0 ");
       
   818 	asm("movne r0, #1 ");					// if iInIDFC, return 1 else return 0
       
   819 	asm("0: ");
       
   820 	asm("msr cpsr, r1 ");					// restore interrupts
       
   821 	__JUMP(,lr);
       
   822 	}
       
   823 
       
   824 
       
   825 extern "C" __NAKED__ void send_irq_ipi(TSubScheduler*)
       
   826 	{
       
   827 	__DATA_SYNC_BARRIER_Z__(r3);			// need DSB before sending any IPI
       
   828 	asm("ldr	r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuMask));
       
   829 	asm("ldr	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr));	// we assume i_GicDistAddr is the same for all CPUs
       
   830 	asm("mov	r1, #%a0" : : "i" ((TInt)TRANSFERRED_IRQ_VECTOR));
       
   831 	asm("orr	r1, r1, r3, lsl #16 ");
       
   832 	asm("str	r1, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPIs
       
   833 	__JUMP(,lr);
       
   834 	}
       
   835 
       
   836 // Send any outstanding reschedule IPIs when the kernel is unlocked on this CPU.
       
   837 // Call with interrupts disabled, R0->TSubScheduler, R12=R0->iReschedIPIs
       
   838 // Return with R0 unaltered.
       
   839 extern "C" __NAKED__ void send_accumulated_resched_ipis()
       
   840 	{
       
   841 	asm("ldr	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr));
       
   842 	asm("mov	r1, #0 ");
       
   843 	asm("str	r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedIPIs));
       
   844 	__DATA_SYNC_BARRIER__(r1);				// need DSB before sending any IPI
       
   845 	asm("mov	r1, r12, lsl #16 ");
       
   846 //	asm("orr	r1, r1, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR));	RESCHED_IPI_VECTOR=0
       
   847 	asm("str	r1, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPIs
       
   848 	__JUMP(,lr);
       
   849 	}
       
   850 
       
   851 // Send a reschedule IPI to the specified CPU
       
   852 extern "C" __NAKED__ void send_resched_ipi(TInt /*aCpu*/)
       
   853 	{
       
   854 	GET_RWNO_TID(,r3);
       
   855 	__DATA_SYNC_BARRIER_Z__(r2);			// need DSB before sending any IPI
       
   856 	asm("ldr	r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr));	// we assume i_GicDistAddr is the same for all CPUs
       
   857 	ASM_DEBUG1(SendReschedIPI,r0);
       
   858 	asm("mov	r1, #0x10000 ");
       
   859 	asm("mov	r1, r1, lsl r0 ");	// 0x10000<<aCpu
       
   860 //	asm("orr	r1, r1, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR));	RESCHED_IPI_VECTOR=0
       
   861 	asm("str	r1, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPIs
       
   862 	__JUMP(,lr);
       
   863 	}
       
   864 
       
   865 // Send a reschedule IPI to the current processor
       
   866 // *** DON'T DO ANY TRACING OR INSTRUMENTATION ***
       
   867 extern "C" __NAKED__ void send_self_resched_ipi()
       
   868 	{
       
   869 	GET_RWNO_TID(,r3);
       
   870 	__DATA_SYNC_BARRIER_Z__(r2);			// need DSB before sending any IPI
       
   871 	asm("ldr	r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr));	// we assume i_GicDistAddr is the same for all CPUs
       
   872 	asm("mov	r1, #0x02000000 ");			// target = requesting CPU only
       
   873 //	asm("orr	r1, r1, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR));	RESCHED_IPI_VECTOR=0
       
   874 	asm("str	r1, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPI
       
   875 	__JUMP(,lr);
       
   876 	}
       
   877 
       
   878 extern "C" __NAKED__ void send_resched_ipis(TUint32 aMask)
       
   879 	{
       
   880 	ASM_DEBUG1(SendReschedIPIs,r0);
       
   881 	__DATA_SYNC_BARRIER_Z__(r2);			// need DSB before sending any IPI
       
   882 	asm("cmp	r0, #0 ");		// any bits set in aMask?
       
   883 	GET_RWNO_TID(ne,r3);
       
   884 	asm("ldrne	r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr));	// we assume i_GicDistAddr is the same for all CPUs
       
   885 	asm("movne	r0, r0, lsl #16 ");
       
   886 //	asm("orrne	r0, r0, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR));	RESCHED_IPI_VECTOR=0
       
   887 	asm("strne	r0, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPIs if any
       
   888 	__JUMP(,lr);
       
   889 	}
       
   890 
       
   891 
       
   892 extern "C" __NAKED__ void send_resched_ipi_and_wait(TInt /*aCpu*/)
       
   893 	{
       
   894 	asm("ldr	r1, __TheSubSchedulers ");
       
   895 	asm("mov	r2, #0x10000 ");
       
   896 	asm("mov	r2, r2, lsl r0 ");	// 0x10000<<aCpu
       
   897 	ASM_DEBUG1(SendReschedIPIAndWait,r0);
       
   898 	asm("add	r0, r1, r0, lsl #9 ");	// sizeof(TSubScheduler)=512
       
   899 	asm("ldr	r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr));	// we assume i_GicDistAddr is the same for all CPUs
       
   900 	asm("ldr	r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqCount));
       
   901 	__DATA_SYNC_BARRIER_Z__(r1);		// make sure i_IrqCount is read before IPI is sent
       
   902 //	asm("orr	r2, r2, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR));	RESCHED_IPI_VECTOR=0
       
   903 	asm("str	r2, [r3, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPIs
       
   904 	__DATA_SYNC_BARRIER__(r1);			// make sure IPI has been sent
       
   905 	asm("1: ");
       
   906 	asm("ldrb	r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag));
       
   907 	asm("ldr	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqNestCount));
       
   908 	asm("ldr	r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqCount));
       
   909 	asm("cmp	r1, #0 ");
       
   910 	asm("beq	0f ");					// iRescheduleNeededFlag not set -> wait
       
   911 	asm("cmp	r2, #0 ");
       
   912 	asm("bge	2f ");					// if other CPU is in an ISR, finish
       
   913 	asm("cmp	r3, r12 ");				// if not, has i_IrqCount changed?
       
   914 	asm("0: ");
       
   915 	ARM_WFEcc(CC_EQ);					// if not, wait for something to happen ...
       
   916 	asm("beq	1b ");					// ... and loop
       
   917 	asm("2: ");
       
   918 	__DATA_MEMORY_BARRIER__(r1);		// make sure subsequent memory accesses don't jump the gun
       
   919 										// guaranteed to observe final thread state after this
       
   920 	__JUMP(,lr);
       
   921 
       
   922 	asm("__TheSubSchedulers: ");
       
   923 	asm(".word TheSubSchedulers ");
       
   924 	}
       
   925 
       
   926 /*	If the current thread is subject to timeslicing, update its remaining time
       
   927 	from the current CPU's local timer. Don't stop the timer.
       
   928 	If the remaining time is negative, save it as zero.
       
   929  */
       
   930 __NAKED__ void TSubScheduler::SaveTimesliceTimer(NThreadBase* /*aThread*/)
       
   931 	{
       
   932 	asm("ldr	r3, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iTime));
       
   933 	asm("ldrb	r12, [r1, #%a0]" : : "i" _FOFF(NThreadBase,i_NThread_Initial));
       
   934 	asm("ldr	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_LocalTimerAddr));
       
   935 	asm("cmp	r3, #0 ");
       
   936 	asm("ble	0f ");					// thread isn't timesliced or timeslice already expired so skip
       
   937 	asm("cmp	r12, #0 ");
       
   938 	asm("bne	0f ");					// initial (i.e. idle) thread, so skip
       
   939 	asm("ldr	r3, [r2, #%a0]" : : "i" _FOFF(ArmLocalTimer,iTimerCount));
       
   940 	asm("ldr	r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimerMultI));
       
   941 	asm("cmp	r3, #0 ");
       
   942 	asm("movmi	r0, #0 ");				// if timer count is negative, save zero
       
   943 	asm("bmi	1f ");
       
   944 	asm("umull	r0, r3, r12, r3 ");		// scale up to max timer clock
       
   945 	asm("adds	r0, r0, #0x00800000 ");
       
   946 	asm("adcs	r3, r3, #0 ");
       
   947 	asm("mov	r0, r0, lsr #24 ");
       
   948 	asm("orr	r0, r0, r3, lsl #8 ");
       
   949 	asm("1:		");
       
   950 	asm("str	r0, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iTime));
       
   951 	asm("0:		");
       
   952 	__JUMP(,lr);
       
   953 	}
       
   954 
       
   955 /*	Update aOld's execution time and set up the timer for aNew
       
   956 	Update this CPU's timestamp value
       
   957 
       
   958 	if (!aOld) aOld=iInitialThread
       
   959 	if (!aNew) aNew=iInitialThread
       
   960 	newcount = aNew->iTime>0 ? Max(aNew->iTime*i_TimerMultF/2^32, 1) : 2^31-1
       
   961 	cli()
       
   962 	oldcount = timer count
       
   963 	if (oldcount<=0 || aOld!=aNew)
       
   964 		{
       
   965 		timer count = newcount
       
   966 		elapsed = i_LastTimerSet - oldcount
       
   967 		i_LastTimerSet = newcount
       
   968 		elapsed = elapsed * i_TimerMultI / 2^24
       
   969 		aOld->iTotalCpuTime64 += elapsed
       
   970 		correction = i_TimestampError;
       
   971 		if (correction > i_MaxCorrection)
       
   972 			correction = i_MaxCorrection
       
   973 		else if (correction < -i_MaxCorrection)
       
   974 			correction = -i_MaxCorrection
       
   975 		i_TimestampError -= correction
       
   976 		i_LastTimestamp += elapsed + i_TimerGap - correction
       
   977 		}
       
   978 	sti()
       
   979  */
       
   980 __NAKED__ void TSubScheduler::UpdateThreadTimes(NThreadBase* /*aOld*/, NThreadBase* /*aNew*/)
       
   981 	{
       
   982 	asm("cmp	r2, #0 ");
       
   983 	asm("ldreq	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iInitialThread));
       
   984 	asm("ldr	r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimerMultF));
       
   985 	asm("cmp	r1, #0 ");
       
   986 	asm("ldreq	r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iInitialThread));
       
   987 	asm("ldr	r3, [r2, #%a0]" : : "i" _FOFF(NThreadBase,iTime));
       
   988 	asm("stmfd	sp!, {r4-r7} ");
       
   989 	asm("ldr	r6, [r2, #%a0]" : : "i" _FOFF(NThreadBase,iRunCount64));
       
   990 	asm("ldr	r7, [r2, #%a0]" : : "i" (_FOFF(NThreadBase,iRunCount64)+4));
       
   991 	asm("cmp	r1, r2 ");
       
   992 	asm("beq	2f ");
       
   993 	asm("adds	r6, r6, #1 ");
       
   994 	asm("str	r6, [r2, #%a0]" : : "i" _FOFF(NThreadBase,iRunCount64));
       
   995 	asm("ldr	r4, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedCount64));
       
   996 	asm("ldr	r6, [r0, #%a0]" : : "i" (_FOFF(TSubScheduler,iReschedCount64)+4));
       
   997 	asm("adcs	r7, r7, #0 ");
       
   998 	asm("str	r7, [r2, #%a0]" : : "i" (_FOFF(NThreadBase,iRunCount64)+4));
       
   999 	asm("adds	r4, r4, #1 ");
       
  1000 	asm("adcs	r6, r6, #0 ");
       
  1001 	asm("str	r4, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedCount64));
       
  1002 	asm("str	r6, [r0, #%a0]" : : "i" (_FOFF(TSubScheduler,iReschedCount64)+4));
       
  1003 	asm("2:		");
       
  1004 	asm("cmp	r3, #1 ");					// aNew->iTime > 0 ?
       
  1005 	asm("umullge r4, r3, r12, r3 ");
       
  1006 	asm("ldr	r5, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_LocalTimerAddr));
       
  1007 	asm("movlt	r3, #0x7fffffff ");
       
  1008 	asm("addges	r3, r3, r4, lsr #31 ");		// round up top 32 bits if bit 31 set
       
  1009 	asm("moveq	r3, #1 ");					// if result zero, limit to 1
       
  1010 	asm("ldr	r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_LastTimerSet));
       
  1011 	__ASM_CLI();
       
  1012 	asm("ldr	r4, [r5, #%a0]" : : "i" _FOFF(ArmLocalTimer,iTimerCount));
       
  1013 	asm("cmp	r1, r2 ");
       
  1014 	asm("bne	1f ");
       
  1015 	asm("cmp	r4, #0 ");
       
  1016 	asm("bgt	0f ");						// same thread, timeslice not expired -> leave timer alone
       
  1017 	asm("1:		");
       
  1018 	asm("str	r3, [r5, #%a0]" : : "i" _FOFF(ArmLocalTimer,iTimerCount));	// set new timeslice value in timer
       
  1019 	asm("ldr	r5, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimerMultI));
       
  1020 	asm("str	r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_LastTimerSet));
       
  1021 	asm("sub	r12, r12, r4 ");			// r12 = elapsed (actual timer ticks)
       
  1022 	asm("umull	r4, r5, r12, r5 ");
       
  1023 	asm("ldr	r3, [r1, #%a0]!" : : "i" _FOFF(NThreadBase,iTotalCpuTime64));
       
  1024 	asm("ldr	r12, [r1, #4] ");
       
  1025 	asm("adds	r4, r4, #0x00800000 ");
       
  1026 	asm("adcs	r5, r5, #0 ");
       
  1027 	asm("mov	r4, r4, lsr #24 ");
       
  1028 	asm("orr	r4, r4, r5, lsl #8 ");		// r4 = elapsed
       
  1029 	asm("adds	r3, r3, r4 ");
       
  1030 	asm("adcs	r12, r12, #0 ");
       
  1031 	asm("stmia	r1, {r3,r12} ");			// aOld->iTotalCpuTime64 += elapsed
       
  1032 	asm("ldr	r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimestampError));
       
  1033 	asm("ldr	r5, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_MaxCorrection));
       
  1034 	asm("ldr	r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iLastTimestamp64));
       
  1035 	asm("ldr	r2, [r0, #%a0]" : : "i" (_FOFF(TSubScheduler,iLastTimestamp64)+4));
       
  1036 	asm("mov	r12, r3 ");
       
  1037 	asm("cmp	r3, r5 ");
       
  1038 	asm("movgt	r3, r5 ");					// if (correction>i_MaxCorrection) correction=i_MaxCorrection
       
  1039 	asm("cmn	r3, r5 ");
       
  1040 	asm("rsblt	r3, r5, #0 ");				// if (correction+i_MaxCorrection<0) correction=-i_MaxCorrection
       
  1041 	asm("ldr	r5, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimerGap));
       
  1042 	asm("sub	r12, r12, r3 ");
       
  1043 	asm("str	r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimestampError));
       
  1044 	asm("add	r4, r4, r5 ");				// r4 = elapsed + i_TimerGap
       
  1045 	asm("adds	r1, r1, r4 ");
       
  1046 	asm("adcs	r2, r2, #0 ");				// iLastTimestamp64 + (elapsed + i_TimerGap)
       
  1047 	asm("subs	r1, r1, r3 ");
       
  1048 	asm("sbcs	r1, r1, r3, asr #32 ");		// iLastTimestamp64 + (elapsed + i_TimerGap - correction)
       
  1049 	asm("str	r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iLastTimestamp64));
       
  1050 	asm("str	r2, [r0, #%a0]" : : "i" (_FOFF(TSubScheduler,iLastTimestamp64)+4));
       
  1051 	asm("0:		");
       
  1052 	__ASM_STI();
       
  1053 	asm("ldmfd	sp!, {r4-r7} ");
       
  1054 	__JUMP(,lr);
       
  1055 	}
       
  1056 
       
  1057