kernel/eka/nkernsmp/arm/ncsched.cia
changeset 43 96e5fb8b040d
equal deleted inserted replaced
-1:000000000000 43:96e5fb8b040d
       
     1 // Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
       
     2 // All rights reserved.
       
     3 // This component and the accompanying materials are made available
       
     4 // under the terms of the License "Eclipse Public License v1.0"
       
     5 // which accompanies this distribution, and is available
       
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
       
     7 //
       
     8 // Initial Contributors:
       
     9 // Nokia Corporation - initial contribution.
       
    10 //
       
    11 // Contributors:
       
    12 //
       
    13 // Description:
       
    14 // e32\nkernsmp\arm\ncsched.cia
       
    15 // 
       
    16 //
       
    17 
       
    18 // NThreadBase member data
       
    19 #define __INCLUDE_NTHREADBASE_DEFINES__
       
    20 
       
    21 // TDfc member data
       
    22 #define __INCLUDE_TDFC_DEFINES__
       
    23 
       
    24 #include <e32cia.h>
       
    25 #include <arm.h>
       
    26 #include "nkern.h"
       
    27 #include <arm_gic.h>
       
    28 #include <arm_scu.h>
       
    29 #include <arm_tmr.h>
       
    30 //#include <highrestimer.h>
       
    31 //#include "emievents.h"
       
    32 
       
    33 #ifdef _DEBUG
       
    34 #define ASM_KILL_LINK(rp,rs)	asm("mov "#rs", #0xdf ");\
       
    35 								asm("orr "#rs", "#rs", "#rs", lsl #8 ");\
       
    36 								asm("orr "#rs", "#rs", "#rs", lsl #16 ");\
       
    37 								asm("str "#rs", ["#rp"] ");\
       
    38 								asm("str "#rs", ["#rp", #4] ");
       
    39 #else
       
    40 #define ASM_KILL_LINK(rp,rs)
       
    41 #endif
       
    42 
       
    43 #define ALIGN_STACK_START			\
       
    44 	asm("mov r12, sp");				\
       
    45 	asm("tst sp, #4");				\
       
    46 	asm("subeq sp, sp, #4");		\
       
    47 	asm("str r12, [sp,#-4]!")
       
    48 
       
    49 #define ALIGN_STACK_END				\
       
    50 	asm("ldr sp, [sp]")
       
    51 
       
    52 
       
    53 //#define __DEBUG_BAD_ADDR
       
    54 
       
    55 extern "C" void NewThreadTrace(NThread* a);
       
    56 extern "C" void send_accumulated_resched_ipis();
       
    57 
       
    58 
       
    59 __NAKED__ void TScheduler::Reschedule()
       
    60 	{
       
    61 	//
       
    62 	// Enter in mode_svc with kernel locked, interrupts can be on or off
       
    63 	// Exit in mode_svc with kernel unlocked, interrupts off
       
    64 	// NOTE: STACK ALIGNMENT UNKNOWN ON ENTRY
       
    65 	// NOTE: R4-R11 are modified
       
    66 	//
       
    67 	asm("mov	r2, sp ");					// bit 0 will be reschedule flag
       
    68 	asm("bic	sp, sp, #4 ");				// align stack
       
    69 	GET_RWNO_TID(,r0)						// r0->TSubScheduler
       
    70 	asm("stmfd	sp!, {r2,lr} ");			// save original SP/resched flag, return address
       
    71 	__ASM_CLI();							// interrupts off
       
    72 	asm("ldr	r1, [r0, #%a0]" : : "i" (_FOFF(TSubScheduler,iDfcPendingFlag)&~3));	// check iDfcPendingFlag and iExIDfcPendingFlag
       
    73 	asm("mov	r11, r0 ");					// r11->TSubScheduler
       
    74 	asm("ldr	r10, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_LocalTimerAddr));	// r10->CPU local timer
       
    75 
       
    76 	asm("start_resched: ");
       
    77 	asm("movs	r1, r1, lsr #16 ");			// check if IDFCs or ExIDFCs pending
       
    78 
       
    79 	asm("blne "	CSM_ZN13TSubScheduler9QueueDfcsEv);		// queue any pending DFCs
       
    80 	asm("ldrb	r1, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag));
       
    81 	asm("ldr	r3, [sp, #0] ");
       
    82 	asm("mrs	r2, spsr ");				// r2=spsr_svc
       
    83 	asm("cmp	r1, #0 ");					// check if a reschedule is required
       
    84 	asm("beq	no_resched_needed ");		// branch out if not
       
    85 	__ASM_STI();							// interrupts back on
       
    86 	asm("orr	r3, r3, #1 ");
       
    87 	asm("str	r3, [sp, #0] ");			// set resched flag
       
    88 	asm("stmfd	sp!, {r0,r2} ");			// store SPSR_SVC
       
    89 	asm("ldr	r5, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
       
    90 #ifdef __CPU_ARMV7
       
    91 	asm("mrc	p14, 6, r7, c1, c0, 0 ");	// r7 = TEEHBR
       
    92 #else
       
    93 	asm("mov	r7, #0 ");
       
    94 #endif
       
    95 	GET_RWRO_TID(,r8);						// r8 = User RO Thread ID
       
    96 	GET_RWRW_TID(,r9);						// r9 = User RW Thread ID
       
    97 #ifdef __CPU_HAS_VFP
       
    98 	VFP_FMRX(,0,VFP_XREG_FPEXC);			// r0 = FPEXC
       
    99 	asm("bic r0, r0, #%a0" : : "i" ((TInt)VFP_FPEXC_EN) ); // Store FPEXC with VFP disabled in case this thread runs on a different core next time
       
   100 #else
       
   101 	asm("mov	r0, #0 ");
       
   102 #endif
       
   103 	GET_CAR(,	r1);						// r1 = CAR
       
   104 	asm("mrc	p15, 0, r12, c3, c0, 0 ");	// r12 = DACR
       
   105 	asm("ldr	r4, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iInitialThread));
       
   106 
       
   107 	// Save auxiliary registers
       
   108 	// R0=FPEXC, R1=CAR, R7=TEEHBR, R8=RWROTID, R9=RWRWTID, R12=DACR
       
   109 	asm("sub	sp, sp, #%a0" : : "i" _FOFF(SThreadReschedStack,iSpare));
       
   110 	asm("str	sp, [r5, #%a0]" : : "i" _FOFF(NThread,iSavedSP));	// store original thread's stack pointer
       
   111 	asm("stmia	sp, {r0-r1,r7-r9,r12} ");
       
   112 
       
   113 	// We must move to a temporary stack before selecting the next thread.
       
   114 	// This is because another CPU may begin executing this thread before the
       
   115 	// select_next_thread() function returns and our stack would then be
       
   116 	// corrupted. We use the stack belonging to this CPU's initial thread since
       
   117 	// we are guaranteed that will never run on another CPU.
       
   118 	asm("ldr	sp, [r4, #%a0]" : : "i" _FOFF(NThread,iSavedSP));
       
   119 
       
   120 	asm("select_thread: ");
       
   121 	asm("mov	r0, r11 ");
       
   122 	asm("bl "	CSM_ZN13TSubScheduler16SelectNextThreadEv );	// also sets r0->iCurrentThread
       
   123 #ifdef BTRACE_CPU_USAGE
       
   124 	asm("ldr	r2, __BTraceFilter ");
       
   125 #endif
       
   126 	asm("movs	r3, r0 ");					// r3 = new thread (might be 0)
       
   127 	asm("ldrne	sp, [r0, #%a0]" : : "i" _FOFF(NThread,iSavedSP));	// if a thread has been selected, move to its stack
       
   128 	asm("beq	no_thread ");				// branch out if no thread is ready
       
   129 
       
   130 #ifdef BTRACE_CPU_USAGE
       
   131 	asm("ldrb	r1, [r2, #4] ");			// check category 4 trace
       
   132 	asm("cmp	r1, #0 ");
       
   133 	asm("beq	1f ");
       
   134 	asm("stmfd	sp!, {r0-r3} ");
       
   135 	asm("bl		NewThreadTrace ");
       
   136 	asm("ldmfd	sp!, {r0-r3} ");
       
   137 	asm("1: ");
       
   138 #endif	// BTRACE_CPU_USAGE
       
   139 
       
   140 	asm("cmp	r3, r5 ");					// same thread?
       
   141 	asm("beq	same_thread ");
       
   142 	asm("ldrb	r1, [r3, #%a0]" : : "i" _FOFF(NThreadBase, i_ThrdAttr));
       
   143 	asm("ldr	r4, [r11, #%a0]" : : "i" _FOFF(TSubScheduler, iScheduler));
       
   144 	asm("mov	r2, r3, lsr #6 ");			// r2=current thread>>6
       
   145 	asm("tst	r1, #%a0" : : "i" ((TInt)KThreadAttAddressSpace));	// address space required?
       
   146 	asm("ldrne	r4, [r4, #%a0]" : : "i" _FOFF(TScheduler,iProcessHandler));	// if so, get pointer to process handler
       
   147 
       
   148 	// we are doing a thread switch so restore new thread's auxiliary registers
       
   149 	// R0=FPEXC, R1=CAR, R7=TEEHBR, R8=RWROTID, R9=RWRWTID, R12=DACR
       
   150 	asm("ldmia	sp, {r0-r1,r7-r9,r12} ");
       
   151 
       
   152 #ifdef __CPU_ARMV7
       
   153 	asm("mcr	p14, 6, r7, c1, c0, 0 ");	// r7 = TEEHBR
       
   154 #endif
       
   155 	SET_RWRO_TID(,r8);						// r8 = User RO Thread ID
       
   156 	SET_RWRW_TID(,r9);						// r9 = User RW Thread ID
       
   157 #ifdef __CPU_HAS_VFP
       
   158 	VFP_FMXR(,VFP_XREG_FPEXC,0);			// r0 = FPEXC
       
   159 #endif
       
   160 	SET_CAR(,	r1);						// r1 = CAR
       
   161 	asm("mcr	p15, 0, r12, c3, c0, 0 ");	// r12 = DACR
       
   162 
       
   163 	asm("beq	no_as_switch ");			// skip if address space change not required
       
   164 
       
   165 	// Do address space switching
       
   166 	// Handler called with:
       
   167 	// r11->subscheduler, r3->current thread
       
   168 	// r9->new address space, r5->old address space
       
   169 	// Must preserve r10,r11,r3, can modify other registers
       
   170 	asm("ldr	r5, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iAddressSpace));	// get current address space ptr
       
   171 	asm("ldr	r9, [r3, #%a0]" : : "i" _FOFF(NThreadBase,iAddressSpace));		// get new address space ptr
       
   172 	asm("adr	lr, as_switch_return ");
       
   173 	__JUMP(,	r4);
       
   174 
       
   175 	asm("no_as_switch: ");
       
   176 	asm("mrc	p15, 0, r4, c13, c0, 1 ");	// r4 = CONTEXTID (threadID:ASID)
       
   177 	asm("and	r4, r4, #0xff ");			// isolate ASID
       
   178 	asm("orr	r2, r4, r2, lsl #8 ");		// r2 = new ContextID (new thread ID : ASID)
       
   179 	__DATA_SYNC_BARRIER_Z__(r12);			// needed before change to ContextID
       
   180 	asm("mcr	p15, 0, r2, c13, c0, 1 ");	// set ContextID (ASID + debugging thread ID)
       
   181 	__INST_SYNC_BARRIER__(r12);
       
   182 #ifdef __CPU_NEEDS_BTAC_FLUSH_AFTER_ASID_CHANGE
       
   183 	asm("mcr	p15, 0, r12, c7, c5, 6 ");	// flush BTAC
       
   184 #endif
       
   185 
       
   186 	asm("as_switch_return: ");
       
   187 	asm("same_thread: ");
       
   188 	asm("add	sp, sp, #%a0" : : "i" _FOFF(SThreadReschedStack,iSpare));	// step past auxiliary registers
       
   189  	asm("ldmib	sp!, {r2,r12} ");			// r2=SPSR_SVC, r12=original SP + resched flag
       
   190 	__ASM_CLI();							// interrupts off
       
   191 	asm("ldr	r1, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag));
       
   192 	asm("msr	spsr, r2 ");				// restore spsr_svc
       
   193 	asm("mov	r0, r11 ");
       
   194 	asm("mov	r2, r12 ");					// r2 = original SP + reschedule flag
       
   195 	asm("cmp	r1, #0 ");					// check for more IDFCs and/or another reschedule
       
   196 	asm("bne	start_resched ");			// loop if required
       
   197 	asm("ldr	r14, [r3, #%a0]" : : "i" _FOFF(NThreadBase,iCsFunction));
       
   198 	asm("ldr	r12, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedIPIs));
       
   199 	asm("str	r1, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
       
   200 	asm("cmp	r14, #%a0" : : "i" ((TInt)NThreadBase::ECSDivertPending));
       
   201 	asm("ldr	lr, [sp, #4] ");			// restore R10, R11, return address
       
   202 	asm("bic	sp, r2, #3 ");				// restore initial unaligned stack pointer
       
   203 	asm("and	r2, r2, #1 ");				// r2 = reschedule flag
       
   204 	asm("beq	resched_thread_divert ");
       
   205 
       
   206 	// Return with:	R0=&SubScheduler, R1=0, R2=TRUE if reschedule occurred, R3=iCurrentThread
       
   207 	//				R12=iReschedIPIs
       
   208 	__JUMP(,	lr);
       
   209 
       
   210 	asm("no_resched_needed: ");
       
   211 	asm("ldr	r3, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
       
   212 	asm("ldr	r12, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedIPIs));
       
   213 	asm("mov	r0, r11 ");
       
   214 	asm("str	r1, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
       
   215 	asm("ldr	r2, [r3, #%a0]" : : "i" _FOFF(NThreadBase,iCsFunction));
       
   216 	asm("cmp	r2, #%a0" : : "i" ((TInt)NThreadBase::ECSDivertPending));
       
   217 	asm("ldmfd	sp, {r2,lr} ");				// r2 = original SP + reschedule flag, restore lr
       
   218 	asm("bic	sp, r2, #3 ");				// restore initial unaligned stack pointer
       
   219 	asm("and	r2, r2, #1 ");				// r2 = reschedule flag
       
   220 	asm("beq	resched_thread_divert ");
       
   221 
       
   222 	// Return with:	R0=&SubScheduler, R1=0, R2=TRUE if reschedule occurred, R3=iCurrentThread
       
   223 	//				R12=iReschedIPIs
       
   224 	__JUMP(,	lr);
       
   225 
       
   226 	asm("resched_thread_divert: ");
       
   227 	asm("mov	r1, #1 ");
       
   228 	asm("str	r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
       
   229 	asm("bic	sp, sp, #4 ");				// align stack
       
   230 	asm("stmfd	sp!, {r0-r5,r12,lr} ");		// save registers for diagnostic purposes
       
   231 	asm("mov	r4, r3 ");					// don't really need to bother about registers since thread is exiting
       
   232 
       
   233 	// need to send any outstanding reschedule IPIs
       
   234 	asm("cmp	r12, #0 ");
       
   235 	asm("blne " CSM_CFUNC(send_accumulated_resched_ipis));
       
   236 
       
   237 	__ASM_STI();
       
   238 	asm("ldrb	r1, [r4, #%a0]" : : "i" _FOFF(NThreadBase,iFastMutexDefer));
       
   239 	asm("cmp	r1, #1 ");
       
   240 	asm("bne	1f ");
       
   241 	__ASM_CRASH();
       
   242 	asm("1: ");
       
   243 	asm("mov	r2, #0 ");
       
   244 	asm("strb	r2, [r4, #%a0]" : : "i" _FOFF(NThreadBase,iFastMutexDefer));
       
   245 	asm("mov	r0, r4 ");
       
   246 	asm("bl "	CSM_ZN11NThreadBase4ExitEv );
       
   247 	__ASM_CRASH();	// shouldn't get here
       
   248 
       
   249 	// There is no thread ready to run
       
   250 	// R11->TSubScheduler, R1=unknown, R2=0, R3=__BTraceFilter, R12=unknown
       
   251 	asm("no_thread: ");
       
   252 	__ASM_CLI();
       
   253 	asm("ldr	r12, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedIPIs));
       
   254 	asm("mov	r0, r11 ");
       
   255 	asm("cmp	r12, #0 ");
       
   256 	asm("blne " CSM_CFUNC(send_accumulated_resched_ipis));
       
   257 	__ASM_STI();
       
   258 	__DATA_SYNC_BARRIER_Z__(r1);
       
   259 	ARM_WFI;
       
   260 	asm("no_thread2: ");
       
   261 	asm("ldr	r1, [r11, #%a0]" : : "i" (_FOFF(TSubScheduler,iDfcPendingFlag)&~3));	// check iDfcPendingFlag and iExIDfcPendingFlag
       
   262 	asm("mov	r0, r11 ");
       
   263 	asm("movs	r1, r1, lsr #16 ");
       
   264 	asm("beq	no_thread ");
       
   265 	asm("bl "	CSM_ZN13TSubScheduler9QueueDfcsEv);		// queue any pending DFCs
       
   266 	asm("ldrb	r1, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag));
       
   267 	asm("cmp	r1, #0 ");					// check if a reschedule is required
       
   268 	asm("beq	no_thread2 ");
       
   269 	asm("b		select_thread ");
       
   270 
       
   271 
       
   272 
       
   273 /******************************************************************************
       
   274  Missed out stuff:
       
   275 	EMI EVENT LOGGING
       
   276 	__CPU_ARM1136_ERRATUM_351912_FIXED
       
   277 	Debug hooks in the scheduler
       
   278  ******************************************************************************/
       
   279 
       
   280 	asm("__BTraceFilter: ");
       
   281 	asm(".word %a0 " : : "i" ((TInt)&BTraceData.iFilter[0]));
       
   282 	};
       
   283 
       
   284 
       
   285 /** 
       
   286  * Returns the range of linear memory which inserting the scheduler hooks needs to modify.
       
   287  * 
       
   288  * @param aStart Set to the lowest memory address which needs to be modified.
       
   289  * @param aEnd   Set to the highest memory address +1 which needs to be modified.
       
   290 
       
   291  @pre	Kernel must be locked.
       
   292  @pre	Call in a thread context.
       
   293  @pre	Interrupts must be enabled.
       
   294  */
       
   295 EXPORT_C __NAKED__ void NKern::SchedulerHooks(TLinAddr& aStart, TLinAddr& aEnd)
       
   296 	{
       
   297 #if 0
       
   298 	ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC);
       
   299 #ifdef __DEBUGGER_SUPPORT__
       
   300 	asm("adr r2,resched_trampoline_hook_address");
       
   301 	asm("str r2,[r0]");
       
   302 	asm("adr r2,resched_trampoline_hook_address+4");
       
   303 	asm("str r2,[r1]");
       
   304 #else
       
   305 	asm("mov r2,#0");
       
   306 	asm("str r2,[r0]");
       
   307 	asm("str r2,[r1]");
       
   308 #endif
       
   309 #endif
       
   310 	__JUMP(,lr);
       
   311 	};
       
   312 
       
   313 
       
   314 /** 
       
   315  * Modifies the scheduler code so that it can call the function set by
       
   316  * NKern::SetRescheduleCallback().
       
   317  *
       
   318  * This requires that the region of memory indicated by NKern::SchedulerHooks() is writable.
       
   319 
       
   320  @pre	Kernel must be locked.
       
   321  @pre	Call in a thread context.
       
   322  @pre	Interrupts must be enabled.
       
   323  */
       
   324 EXPORT_C __NAKED__ void NKern::InsertSchedulerHooks()
       
   325 	{
       
   326 #if 0
       
   327 	ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC);
       
   328 #ifdef __DEBUGGER_SUPPORT__
       
   329 	asm("adr r0,resched_trampoline_hook_address");
       
   330 	asm("adr r1,resched_trampoline");
       
   331 	asm("sub r1, r1, r0");
       
   332 	asm("sub r1, r1, #8");
       
   333 	asm("mov r1, r1, asr #2");
       
   334 	asm("add r1, r1, #0xea000000");  // r1 = a branch instruction from resched_trampoline_hook_address to resched_trampoline
       
   335 
       
   336 #if defined(__CPU_MEMORY_TYPE_REMAPPING)
       
   337 	// These platforms have shadow memory in non-writable page. We cannot use the standard
       
   338 	// Epoc::CopyToShadowMemory interface as we hold Kernel lock here.
       
   339 	// Instead, we'll temporarily disable access permission checking in MMU by switching
       
   340 	// domain#0 into Manager Mode (see Domain Access Control Register).
       
   341 	asm("mrs r12, CPSR ");				// save cpsr setting and ...
       
   342 	CPSIDAIF;							// ...disable interrupts
       
   343 	asm("mrc p15, 0, r2, c3, c0, 0 ");	// read DACR
       
   344 	asm("orr r3, r2, #3");				// domain #0 is the first two bits. manager mode is 11b
       
   345 	asm("mcr p15, 0, r3, c3, c0, 0 ");	// write DACR
       
   346 	asm("str r1,[r0]");
       
   347 	asm("mcr p15, 0, r2, c3, c0, 0 ");	// write back the original value of DACR
       
   348 	asm("msr CPSR_cxsf, r12 "); 		// restore cpsr setting (re-enable interrupts)
       
   349 #else
       
   350 	asm("str r1,[r0]");
       
   351 #endif
       
   352 
       
   353 #endif
       
   354 #endif
       
   355 	__JUMP(,lr);
       
   356 	};
       
   357 
       
   358 
       
   359 /** 
       
   360  * Reverts the modification of the Scheduler code performed by NKern::InsertSchedulerHooks()
       
   361  *
       
   362  * This requires that the region of memory indicated by NKern::SchedulerHooks() is writable.
       
   363 
       
   364  @pre	Kernel must be locked.
       
   365  @pre	Call in a thread context.
       
   366  @pre	Interrupts must be enabled.
       
   367  */
       
   368 EXPORT_C __NAKED__ void NKern::RemoveSchedulerHooks()
       
   369 	{
       
   370 #if 0
       
   371 	ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC);
       
   372 #ifdef __DEBUGGER_SUPPORT__
       
   373 	asm("adr r0,resched_trampoline_hook_address");
       
   374 	asm("ldr r1,resched_trampoline_unhook_data");
       
   375 
       
   376 #if defined(__CPU_MEMORY_TYPE_REMAPPING)
       
   377 	// See comments above in InsertSchedulerHooks
       
   378 	asm("mrs r12, CPSR ");				// save cpsr setting and ...
       
   379 	CPSIDAIF;							// ...disable interrupts
       
   380 	asm("mrc p15, 0, r2, c3, c0, 0 ");	// read DACR
       
   381 	asm("orr r3, r2, #3");				// domain #0 is the first two bits. manager mode is 11b
       
   382 	asm("mcr p15, 0, r3, c3, c0, 0 ");	// write DACR
       
   383 	asm("str r1,[r0]");
       
   384 	asm("mcr p15, 0, r2, c3, c0, 0 ");	// write back the original value of DACR
       
   385 	asm("msr CPSR_cxsf, r12 "); 		// restore cpsr setting (re-enable interrupts)
       
   386 #else
       
   387 	asm("str r1,[r0]");
       
   388 #endif
       
   389 
       
   390 #endif
       
   391 #endif
       
   392 	__JUMP(,lr);
       
   393 	};
       
   394 
       
   395 
       
   396 /** 
       
   397  * Set the function which is to be called on every thread reschedule.
       
   398  *
       
   399  * @param aCallback  Pointer to callback function, or NULL to disable callback.
       
   400 
       
   401  @pre	Kernel must be locked.
       
   402  @pre	Call in a thread context.
       
   403  @pre	Interrupts must be enabled.
       
   404  */
       
   405 EXPORT_C __NAKED__ void NKern::SetRescheduleCallback(TRescheduleCallback /*aCallback*/)
       
   406 	{
       
   407 #if 0
       
   408 	ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC);
       
   409 #ifdef __DEBUGGER_SUPPORT__
       
   410 	asm("ldr r1, __TheScheduler ");
       
   411 	asm("str r0, [r1, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleHook));
       
   412 #endif
       
   413 #endif
       
   414 	__JUMP(,lr);
       
   415 	};
       
   416 
       
   417 
       
   418 
       
   419 /** Disables interrupts to specified level.
       
   420 
       
   421 	Note that if we are not disabling all interrupts we must lock the kernel
       
   422 	here, otherwise a high priority interrupt which is still enabled could
       
   423 	cause a reschedule and the new thread could then reenable interrupts.
       
   424 
       
   425 	@param  aLevel Interrupts are disbabled up to and including aLevel.  On ARM,
       
   426 			level 1 stands for IRQ only and level 2 stands for IRQ and FIQ.
       
   427 	@return CPU-specific value passed to RestoreInterrupts.
       
   428 
       
   429 	@pre 1 <= aLevel <= maximum level (CPU-specific)
       
   430 
       
   431 	@see NKern::RestoreInterrupts()
       
   432  */
       
   433 EXPORT_C __NAKED__ TInt NKern::DisableInterrupts(TInt /*aLevel*/)
       
   434 	{
       
   435 #ifdef __FIQ_IS_UNCONTROLLED__
       
   436 	asm("mrs	r1, cpsr ");
       
   437 	asm("cmp	r0, #0 ");
       
   438 	asm("beq	1f ");
       
   439 	__ASM_CLI();
       
   440 	asm("1: ");
       
   441 	asm("and	r0, r1, #%a0" : : "i" ((TInt)KAllInterruptsMask));	/* return original CPSR I and F bits */
       
   442 	__JUMP(,	lr);
       
   443 #else
       
   444 	asm("cmp	r0, #1 ");
       
   445 	asm("bhi "	CSM_ZN5NKern20DisableAllInterruptsEv);	// if level>1, disable all
       
   446 	asm("mrs	r2, cpsr ");			// r2=original CPSR
       
   447 	asm("bcc	1f ");					// skip if level=0
       
   448 	__ASM_CLI();						// Disable all interrupts to prevent migration
       
   449 	GET_RWNO_TID(,r12);					// r12 -> TSubScheduler
       
   450 	asm("ldr	r3, [r12, #%a0]!" : : "i" _FOFF(TSubScheduler,iKernLockCount));
       
   451 	asm("and	r0, r2, #0xc0 ");
       
   452 	asm("cmp	r3, #0 ");				// test if kernel locked
       
   453 	asm("addeq	r3, r3, #1 ");			// if not, lock the kernel
       
   454 	asm("streq	r3, [r12] ");
       
   455 	asm("orreq	r0, r0, #0x80000000 ");	// and set top bit to indicate kernel locked
       
   456 	__ASM_STI2();						// reenable FIQs only
       
   457 	__JUMP(,	lr);
       
   458 	asm("1: ");
       
   459 	asm("and	r0, r2, #0xc0 ");
       
   460 	__JUMP(,	lr);
       
   461 #endif
       
   462 	}
       
   463 
       
   464 
       
   465 /** Disables all interrupts (e.g. both IRQ and FIQ on ARM). 
       
   466 
       
   467 	@return CPU-specific value passed to NKern::RestoreInterrupts().
       
   468 
       
   469 	@see NKern::RestoreInterrupts()
       
   470  */
       
   471 EXPORT_C __NAKED__ TInt NKern::DisableAllInterrupts()
       
   472 	{
       
   473 	asm("mrs r1, cpsr ");
       
   474 	asm("and r0, r1, #%a0" : : "i" ((TInt)KAllInterruptsMask));	/* return original CPSR I and F bits */
       
   475 	__ASM_CLI();
       
   476 	__JUMP(,lr);
       
   477 	}
       
   478 
       
   479 
       
   480 /** Enables all interrupts (e.g. IRQ and FIQ on ARM).
       
   481 
       
   482 	This function never unlocks the kernel.  So it must be used
       
   483 	only to complement NKern::DisableAllInterrupts. Never use it
       
   484 	to complement NKern::DisableInterrupts.
       
   485 
       
   486 	@see NKern::DisableInterrupts()
       
   487 	@see NKern::DisableAllInterrupts()
       
   488 
       
   489 	@internalComponent
       
   490  */
       
   491 EXPORT_C __NAKED__ void NKern::EnableAllInterrupts()
       
   492 	{
       
   493 	__ASM_STI();
       
   494 	__JUMP(,lr);
       
   495 	}
       
   496 
       
   497 
       
   498 /** Restores interrupts to previous level and unlocks the kernel if it was 
       
   499 	locked when disabling them.
       
   500 
       
   501 	@param 	aRestoreData CPU-specific data returned from NKern::DisableInterrupts
       
   502 			or NKern::DisableAllInterrupts specifying the previous interrupt level.
       
   503 
       
   504 	@see NKern::DisableInterrupts()
       
   505 	@see NKern::DisableAllInterrupts()
       
   506  */
       
   507 EXPORT_C __NAKED__ void NKern::RestoreInterrupts(TInt /*aRestoreData*/)
       
   508 	{
       
   509 	asm("tst r0, r0 ");					// test state of top bit of aLevel
       
   510 	asm("mrs r1, cpsr ");
       
   511 	asm("and r0, r0, #%a0" : : "i" ((TInt)KAllInterruptsMask));
       
   512 	asm("bic r1, r1, #%a0" : : "i" ((TInt)KAllInterruptsMask));
       
   513 	asm("orr r1, r1, r0 ");				// replace I and F bits with those supplied
       
   514 	asm("msr cpsr_c, r1 ");				// flags are unchanged (in particular N)
       
   515 	__JUMP(pl,lr);						// if top bit of aLevel clear, finished
       
   516 
       
   517 	// if top bit of aLevel set, fall through to unlock the kernel
       
   518 	}
       
   519 
       
   520 
       
   521 /**	Unlocks the kernel.
       
   522 
       
   523 	Decrements iKernLockCount for current CPU; if it becomes zero and IDFCs
       
   524 	or a reschedule are	pending, calls the scheduler to process them.
       
   525 	Must be called in mode_svc.
       
   526 
       
   527     @pre    Call either in a thread or an IDFC context.
       
   528     @pre    Do not call from an ISR.
       
   529  */
       
   530 EXPORT_C __NAKED__ void NKern::Unlock()
       
   531 	{
       
   532 	ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR);
       
   533 
       
   534 	GET_RWNO_TID(,r0)						// r0=&SubScheduler()
       
   535 	__ASM_CLI();							// interrupts off
       
   536 	asm("ldr	r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
       
   537 	asm("ldr	r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedIPIs));
       
   538 	asm("ldr	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag));
       
   539 	asm("subs	r3, r1, #1 ");
       
   540 	asm("strne	r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
       
   541 	asm("bne	0f ");						// kernel still locked -> return
       
   542 	asm("cmp	r2, #0 ");					// check for DFCs or reschedule
       
   543 	asm("bne	1f ");
       
   544 	asm("cmp	r12, #0 ");					// IPIs outstanding?
       
   545 	asm("str	r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));	// unlock the kernel
       
   546 	asm("bne	2f ");
       
   547 	asm("0: ");
       
   548 	__ASM_STI();							// interrupts back on
       
   549 	__JUMP(,lr);
       
   550 
       
   551 	// need to run IDFCs and/or reschedule
       
   552 	asm("1: ");
       
   553 	asm("stmfd	sp!, {r0,r4-r11,lr} ");
       
   554 	asm("bl "	CSM_ZN10TScheduler10RescheduleEv );
       
   555 	asm(".global nkern_unlock_resched_return ");
       
   556 	asm("nkern_unlock_resched_return: ");
       
   557 
       
   558 	// need to send any outstanding reschedule IPIs
       
   559 	asm("cmp	r12, #0 ");
       
   560 	asm("blne " CSM_CFUNC(send_accumulated_resched_ipis));
       
   561 	asm("ldmfd	sp!, {r0,r4-r11,lr} ");
       
   562 	__ASM_STI();
       
   563 	__JUMP(,lr);
       
   564 
       
   565 	asm("2:		");
       
   566 	asm("stmfd	sp!, {r0,lr} ");
       
   567 	asm("bl "	CSM_CFUNC(send_accumulated_resched_ipis));
       
   568 	asm("ldmfd	sp!, {r0,lr} ");
       
   569 	__ASM_STI();
       
   570 	__JUMP(,lr);
       
   571 	}
       
   572 
       
   573 
       
   574 /**	Locks the kernel.
       
   575 
       
   576 	Increments iKernLockCount for the current CPU, thereby deferring IDFCs
       
   577 	and preemption.	Must be called in mode_svc.
       
   578 
       
   579     @pre    Call either in a thread or an IDFC context.
       
   580     @pre    Do not call from an ISR.
       
   581  */
       
   582 EXPORT_C __NAKED__ void NKern::Lock()
       
   583 	{
       
   584 	ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR);
       
   585 
       
   586 	__ASM_CLI();
       
   587 	GET_RWNO_TID(,r12);
       
   588 	asm("ldr r3, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
       
   589 	asm("add r3, r3, #1 ");			// lock the kernel
       
   590 	asm("str r3, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
       
   591 	__ASM_STI();
       
   592 	__JUMP(,lr);
       
   593 	}
       
   594 
       
   595 
       
   596 /**	Locks the kernel and returns a pointer to the current thread.
       
   597 
       
   598 	Increments iKernLockCount for the current CPU, thereby deferring IDFCs
       
   599 	and preemption.	Must be called in mode_svc.
       
   600 
       
   601     @pre    Call either in a thread or an IDFC context.
       
   602     @pre    Do not call from an ISR.
       
   603  */
       
   604 EXPORT_C __NAKED__ NThread* NKern::LockC()
       
   605 	{
       
   606 	ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR);
       
   607 
       
   608 	__ASM_CLI();
       
   609 	GET_RWNO_TID(,r12);
       
   610 	asm("ldr r3, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
       
   611 	asm("ldr r0, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
       
   612 	asm("add r3, r3, #1 ");			// lock the kernel
       
   613 	asm("str r3, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
       
   614 	__ASM_STI();
       
   615 	__JUMP(,lr);
       
   616 	}
       
   617 
       
   618 
       
   619 /**	Allows IDFCs and rescheduling if they are pending.
       
   620 
       
   621 	If IDFCs or a reschedule are pending and iKernCSLocked is exactly equal to 1
       
   622 	calls the scheduler to process the IDFCs and possibly reschedule.
       
   623 	Must be called in mode_svc.
       
   624 
       
   625 	@return	Nonzero if a reschedule actually occurred, zero if not.
       
   626 
       
   627     @pre    Call either in a thread or an IDFC context.
       
   628     @pre    Do not call from an ISR.
       
   629  */
       
   630 EXPORT_C __NAKED__ TInt NKern::PreemptionPoint()
       
   631 	{
       
   632 	ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR);
       
   633 
       
   634 	GET_RWNO_TID(,r0)						// r0=&SubScheduler()
       
   635 	asm("ldr	r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
       
   636 	asm("ldr	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag));
       
   637 	asm("ldr	r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedIPIs));
       
   638 	asm("cmp	r1, #1 ");
       
   639 	asm("bgt	0f ");						// if locked more than once return FALSE
       
   640 	asm("cmp	r2, #0 ");					// locked once and IDFCs/reschedule pending?
       
   641 	asm("bne	1f ");						// skip if so
       
   642 	asm("cmp	r12, #0 ");					// locked once and resched IPIs outstanding?
       
   643 	asm("bne	2f ");						// skip if so
       
   644 	asm("0:		");
       
   645 	asm("mov	r0, #0 ");
       
   646 	__JUMP(,	lr);						// else return FALSE
       
   647 
       
   648 	// need to run IDFCs and/or reschedule
       
   649 	asm("1:		");
       
   650 	asm("stmfd	sp!, {r1,r4-r11,lr} ");
       
   651 	asm("bl "	CSM_ZN10TScheduler10RescheduleEv );
       
   652 	asm(".global nkern_preemption_point_resched_return ");
       
   653 	asm("nkern_preemption_point_resched_return: ");
       
   654 	asm("str	r2, [sp] ");
       
   655 	asm("mov	r2, #1 ");
       
   656 	asm("str	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
       
   657 
       
   658 	// need to send any outstanding reschedule IPIs
       
   659 	asm("cmp	r12, #0 ");
       
   660 	asm("blne " CSM_CFUNC(send_accumulated_resched_ipis));
       
   661 	asm("ldmfd	sp!, {r0,r4-r11,lr} ");		// return TRUE if reschedule occurred
       
   662 	__ASM_STI();
       
   663 	__JUMP(,	lr);
       
   664 
       
   665 	asm("2:		");
       
   666 	asm("stmfd	sp!, {r2,lr} ");
       
   667 	asm("bl "	CSM_CFUNC(send_accumulated_resched_ipis));
       
   668 	asm("ldmfd	sp!, {r0,lr} ");			// return TRUE if reschedule occurred
       
   669 	__ASM_STI();
       
   670 	__JUMP(,	lr);
       
   671 	}
       
   672 
       
   673 
       
   674 #ifdef __CPU_HAS_VFP
       
   675 // Do the actual VFP context save
       
   676 __NAKED__ void VfpContextSave(void*)
       
   677 	{
       
   678 	VFP_FMRX(,1,VFP_XREG_FPEXC);
       
   679 	asm("tst r1, #%a0" : : "i" ((TInt)VFP_FPEXC_EN) );		// Check to see if VFP in use
       
   680 	__JUMP(eq, lr);											// Return immediately if not
       
   681 	asm("tst r1, #%a0" : : "i" ((TInt)VFP_FPEXC_EX) );		// Check to see if an exception has occurred
       
   682 	asm("beq 1f ");											// Skip ahead if not
       
   683 	asm("bic r1, r1, #%a0" : : "i" ((TInt)VFP_FPEXC_EX));
       
   684 	VFP_FMXR(,VFP_XREG_FPEXC,1);							// Reset exception flag
       
   685 	asm("orr r1, r1, #%a0" : : "i" ((TInt)VFP_FPEXC_EX));	// But store it for later
       
   686 	asm("1: ");
       
   687 
       
   688 
       
   689 	VFP_FMRX(,2,VFP_XREG_FPSCR);
       
   690 	asm("stmia	r0!, {r2} ");								// Save FPSCR
       
   691 
       
   692 #ifndef __VFP_V3
       
   693 	VFP_FMRX(,2,VFP_XREG_FPINST);
       
   694 	VFP_FMRX(,3,VFP_XREG_FPINST2);
       
   695 	asm("stmia	r0!, {r2-r3} ");							// Save FPINST, FPINST2
       
   696 #endif
       
   697 
       
   698 	VFP_FSTMIADW(CC_AL,0,0,16);								// Save D0 - D15
       
   699 
       
   700 #ifdef __VFP_V3
       
   701 	VFP_FMRX(,2,VFP_XREG_MVFR0);
       
   702 	asm("tst r2, #%a0" : : "i" ((TInt)VFP_MVFR0_ASIMD32));	// Check to see if all 32 Advanced SIMD registers are present
       
   703 	asm("beq 0f ");											// Skip ahead if not
       
   704 	GET_CAR(,r2);
       
   705 	asm("tst r2, #%a0" : : "i" ((TInt)VFP_CPACR_D32DIS));	// Check to see if access to the upper 16 registers is disabled
       
   706 	VFP_FSTMIADW(CC_EQ,0,16,16);							// If not then save D16 - D31
       
   707 #endif
       
   708 
       
   709 	asm("0: ");
       
   710 	asm("bic r1, r1, #%a0" : : "i" ((TInt)VFP_FPEXC_EN) );
       
   711 	VFP_FMXR(,VFP_XREG_FPEXC,1);							// Disable VFP
       
   712 
       
   713 	__JUMP(,lr);
       
   714 	}
       
   715 #endif
       
   716 
       
   717 
       
   718 /** Check if the kernel is locked the specified number of times.
       
   719 
       
   720 	@param aCount	The number of times the kernel should be locked
       
   721 					If zero, tests if it is locked at all
       
   722 	@return TRUE if the tested condition is true.
       
   723 
       
   724 	@internalTechnology
       
   725 */
       
   726 EXPORT_C __NAKED__ TBool NKern::KernelLocked(TInt /*aCount*/)
       
   727 	{
       
   728 	asm("mrs	r12, cpsr ");
       
   729 	__ASM_CLI();
       
   730 	GET_RWNO_TID(,r3);
       
   731 	asm("movs	r1, r0 ");			// r1 = aCount
       
   732 	asm("ldr	r0, [r3, #%a0]" : : "i" _FOFF(TSubScheduler, iKernLockCount));
       
   733 	asm("moveq	r1, r0 ");			// if aCount=0, aCount=iKernLockCount
       
   734 	asm("cmp	r1, r0 ");			//
       
   735 	asm("movne	r0, #0 ");			// if aCount!=iKernLockCount, return FALSE else return iKernLockCount
       
   736 	asm("msr	cpsr, r12 ");
       
   737 	__JUMP(,lr);
       
   738 	}
       
   739 
       
   740 
       
   741 // Only call this if thread migration is disabled, i.e.
       
   742 // interrupts disabled, kernel locked or current thread in 'freeze cpu' mode
       
   743 extern "C" __NAKED__ TSubScheduler& SubScheduler()
       
   744 	{
       
   745 	GET_RWNO_TID(,r0);
       
   746 	__JUMP(,lr);
       
   747 	}
       
   748 
       
   749 /** Returns the NThread control block for the currently scheduled thread.
       
   750 
       
   751     Note that this is the calling thread if called from a thread context, or the
       
   752 	interrupted thread if called from an interrupt context.
       
   753 	
       
   754 	@return A pointer to the NThread for the currently scheduled thread.
       
   755 	
       
   756 	@pre Call in any context.
       
   757 */
       
   758 EXPORT_C __NAKED__ NThread* NKern::CurrentThread()
       
   759 	{
       
   760 	asm("mrs	r12, cpsr ");
       
   761 	__ASM_CLI();
       
   762 	GET_RWNO_TID(,r0);
       
   763 	asm("cmp	r0, #0 ");
       
   764 	asm("ldrne	r0, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
       
   765 	asm("msr	cpsr, r12 ");
       
   766 	__JUMP(,lr);
       
   767 	}
       
   768 
       
   769 
       
   770 /** Returns the NThread control block for the currently scheduled thread.
       
   771 
       
   772     Note that this is the calling thread if called from a thread context, or the
       
   773 	interrupted thread if called from an interrupt context.
       
   774 	
       
   775 	@return A pointer to the NThread for the currently scheduled thread.
       
   776 	
       
   777 	@pre Call with migration disabled - i.e. from an ISR, IDFC, with interrupts
       
   778 			disabled or with preemption disabled.
       
   779 */
       
   780 extern "C" __NAKED__ NThread* NCurrentThreadL()
       
   781 	{
       
   782 	GET_RWNO_TID(,r0);
       
   783 	asm("ldr	r0, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
       
   784 	__JUMP(,lr);
       
   785 	}
       
   786 
       
   787 
       
   788 /** Returns the CPU number of the calling CPU.
       
   789 
       
   790 	@return the CPU number of the calling CPU.
       
   791 	
       
   792 	@pre Call in any context.
       
   793 */
       
   794 EXPORT_C __NAKED__ TInt NKern::CurrentCpu()
       
   795 	{
       
   796 	asm("mrs	r12, cpsr ");
       
   797 	__ASM_CLI();
       
   798 	GET_RWNO_TID(,r0);
       
   799 	asm("cmp	r0, #0 ");
       
   800 	asm("ldrne	r0, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iCpuNum));
       
   801 	asm("msr	cpsr, r12 ");
       
   802 	__JUMP(,lr);
       
   803 	}
       
   804 
       
   805 
       
   806 /**	Returns the current processor context type (thread, IDFC or interrupt).
       
   807 
       
   808 	@return	A value from NKern::TContext enumeration (but never EEscaped).
       
   809 	
       
   810 	@pre	Call in any context.
       
   811 
       
   812 	@see	NKern::TContext
       
   813  */
       
   814 EXPORT_C __NAKED__ TInt NKern::CurrentContext()
       
   815 	{
       
   816 	asm("mrs r1, cpsr ");
       
   817 	__ASM_CLI();							// interrupts off to stop migration
       
   818 	GET_RWNO_TID(,r3);						// r3 = &SubScheduler()
       
   819 	asm("mov r0, #2 ");						// 2 = interrupt
       
   820 	asm("and r2, r1, #0x1f ");				// r1 = mode
       
   821 	asm("cmp r2, #0x13 ");
       
   822 	asm("ldreqb r0, [r3, #%a0]" : : "i" _FOFF(TSubScheduler,iInIDFC));
       
   823 	asm("bne 0f ");							// if not svc, must be interrupt
       
   824 	asm("cmp r0, #0 ");
       
   825 	asm("movne r0, #1 ");					// if iInIDFC, return 1 else return 0
       
   826 	asm("0: ");
       
   827 	asm("msr cpsr, r1 ");					// restore interrupts
       
   828 	__JUMP(,lr);
       
   829 	}
       
   830 
       
   831 
       
   832 extern "C" __NAKED__ void send_irq_ipi(TSubScheduler*)
       
   833 	{
       
   834 	__DATA_SYNC_BARRIER_Z__(r3);			// need DSB before sending any IPI
       
   835 	asm("ldr	r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuMask));
       
   836 	asm("ldr	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr));	// we assume i_GicDistAddr is the same for all CPUs
       
   837 	asm("mov	r1, #%a0" : : "i" ((TInt)TRANSFERRED_IRQ_VECTOR));
       
   838 	asm("orr	r1, r1, r3, lsl #16 ");
       
   839 	asm("str	r1, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPIs
       
   840 	__JUMP(,lr);
       
   841 	}
       
   842 
       
   843 // Send any outstanding reschedule IPIs when the kernel is unlocked on this CPU.
       
   844 // Call with interrupts disabled, R0->TSubScheduler, R12=R0->iReschedIPIs
       
   845 // Return with R0 unaltered.
       
   846 extern "C" __NAKED__ void send_accumulated_resched_ipis()
       
   847 	{
       
   848 	asm("ldr	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr));
       
   849 	asm("mov	r1, #0 ");
       
   850 	asm("str	r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedIPIs));
       
   851 	__DATA_SYNC_BARRIER__(r1);				// need DSB before sending any IPI
       
   852 	asm("mov	r1, r12, lsl #16 ");
       
   853 //	asm("orr	r1, r1, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR));	RESCHED_IPI_VECTOR=0
       
   854 	asm("str	r1, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPIs
       
   855 	__JUMP(,lr);
       
   856 	}
       
   857 
       
   858 // Send a reschedule IPI to the specified CPU
       
   859 extern "C" __NAKED__ void send_resched_ipi(TInt /*aCpu*/)
       
   860 	{
       
   861 	GET_RWNO_TID(,r3);
       
   862 	__DATA_SYNC_BARRIER_Z__(r2);			// need DSB before sending any IPI
       
   863 	asm("ldr	r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr));	// we assume i_GicDistAddr is the same for all CPUs
       
   864 	ASM_DEBUG1(SendReschedIPI,r0);
       
   865 	asm("mov	r1, #0x10000 ");
       
   866 	asm("mov	r1, r1, lsl r0 ");	// 0x10000<<aCpu
       
   867 //	asm("orr	r1, r1, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR));	RESCHED_IPI_VECTOR=0
       
   868 	asm("str	r1, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPIs
       
   869 	__JUMP(,lr);
       
   870 	}
       
   871 
       
   872 // Send a reschedule IPI to the current processor
       
   873 // *** DON'T DO ANY TRACING OR INSTRUMENTATION ***
       
   874 extern "C" __NAKED__ void send_self_resched_ipi()
       
   875 	{
       
   876 	GET_RWNO_TID(,r3);
       
   877 	__DATA_SYNC_BARRIER_Z__(r2);			// need DSB before sending any IPI
       
   878 	asm("ldr	r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr));	// we assume i_GicDistAddr is the same for all CPUs
       
   879 	asm("mov	r1, #0x02000000 ");			// target = requesting CPU only
       
   880 //	asm("orr	r1, r1, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR));	RESCHED_IPI_VECTOR=0
       
   881 	asm("str	r1, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPI
       
   882 	__JUMP(,lr);
       
   883 	}
       
   884 
       
   885 extern "C" __NAKED__ void send_resched_ipis(TUint32 aMask)
       
   886 	{
       
   887 	ASM_DEBUG1(SendReschedIPIs,r0);
       
   888 	__DATA_SYNC_BARRIER_Z__(r2);			// need DSB before sending any IPI
       
   889 	asm("cmp	r0, #0 ");		// any bits set in aMask?
       
   890 	GET_RWNO_TID(ne,r3);
       
   891 	asm("ldrne	r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr));	// we assume i_GicDistAddr is the same for all CPUs
       
   892 	asm("movne	r0, r0, lsl #16 ");
       
   893 //	asm("orrne	r0, r0, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR));	RESCHED_IPI_VECTOR=0
       
   894 	asm("strne	r0, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPIs if any
       
   895 	__JUMP(,lr);
       
   896 	}
       
   897 
       
   898 
       
   899 extern "C" __NAKED__ void send_resched_ipi_and_wait(TInt /*aCpu*/)
       
   900 	{
       
   901 	asm("ldr	r1, __TheSubSchedulers ");
       
   902 	asm("mov	r2, #0x10000 ");
       
   903 	asm("mov	r2, r2, lsl r0 ");	// 0x10000<<aCpu
       
   904 	ASM_DEBUG1(SendReschedIPIAndWait,r0);
       
   905 	asm("add	r0, r1, r0, lsl #9 ");	// sizeof(TSubScheduler)=512
       
   906 	asm("ldr	r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr));	// we assume i_GicDistAddr is the same for all CPUs
       
   907 	asm("ldr	r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqCount));
       
   908 	__DATA_SYNC_BARRIER_Z__(r1);		// make sure i_IrqCount is read before IPI is sent
       
   909 //	asm("orr	r2, r2, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR));	RESCHED_IPI_VECTOR=0
       
   910 	asm("str	r2, [r3, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPIs
       
   911 	__DATA_SYNC_BARRIER__(r1);			// make sure IPI has been sent
       
   912 	asm("1: ");
       
   913 	asm("ldrb	r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag));
       
   914 	asm("ldr	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqNestCount));
       
   915 	asm("ldr	r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqCount));
       
   916 	asm("cmp	r1, #0 ");
       
   917 	asm("beq	0f ");					// iRescheduleNeededFlag not set -> wait
       
   918 	asm("cmp	r2, #0 ");
       
   919 	asm("bge	2f ");					// if other CPU is in an ISR, finish
       
   920 	asm("cmp	r3, r12 ");				// if not, has i_IrqCount changed?
       
   921 	asm("0: ");
       
   922 	ARM_WFEcc(CC_EQ);					// if not, wait for something to happen ...
       
   923 	asm("beq	1b ");					// ... and loop
       
   924 	asm("2: ");
       
   925 	__DATA_MEMORY_BARRIER__(r1);		// make sure subsequent memory accesses don't jump the gun
       
   926 										// guaranteed to observe final thread state after this
       
   927 	__JUMP(,lr);
       
   928 
       
   929 	asm("__TheSubSchedulers: ");
       
   930 	asm(".word TheSubSchedulers ");
       
   931 	}
       
   932 
       
   933 /*	If the current thread is subject to timeslicing, update its remaining time
       
   934 	from the current CPU's local timer. Don't stop the timer.
       
   935 	If the remaining time is negative, save it as zero.
       
   936  */
       
   937 __NAKED__ void TSubScheduler::SaveTimesliceTimer(NThreadBase* /*aThread*/)
       
   938 	{
       
   939 	asm("ldr	r3, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iTime));
       
   940 	asm("ldrb	r12, [r1, #%a0]" : : "i" _FOFF(NThreadBase,i_NThread_Initial));
       
   941 	asm("ldr	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_LocalTimerAddr));
       
   942 	asm("cmp	r3, #0 ");
       
   943 	asm("ble	0f ");					// thread isn't timesliced or timeslice already expired so skip
       
   944 	asm("cmp	r12, #0 ");
       
   945 	asm("bne	0f ");					// initial (i.e. idle) thread, so skip
       
   946 	asm("ldr	r3, [r2, #%a0]" : : "i" _FOFF(ArmLocalTimer,iTimerCount));
       
   947 	asm("ldr	r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimerMultI));
       
   948 	asm("cmp	r3, #0 ");
       
   949 	asm("movmi	r0, #0 ");				// if timer count is negative, save zero
       
   950 	asm("bmi	1f ");
       
   951 	asm("umull	r0, r3, r12, r3 ");		// scale up to max timer clock
       
   952 	asm("adds	r0, r0, #0x00800000 ");
       
   953 	asm("adcs	r3, r3, #0 ");
       
   954 	asm("mov	r0, r0, lsr #24 ");
       
   955 	asm("orr	r0, r0, r3, lsl #8 ");
       
   956 	asm("1:		");
       
   957 	asm("str	r0, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iTime));
       
   958 	asm("0:		");
       
   959 	__JUMP(,lr);
       
   960 	}
       
   961 
       
   962 /*	Update aOld's execution time and set up the timer for aNew
       
   963 	Update this CPU's timestamp value
       
   964 
       
   965 	if (!aOld) aOld=iInitialThread
       
   966 	if (!aNew) aNew=iInitialThread
       
   967 	newcount = aNew->iTime>0 ? Max(aNew->iTime*i_TimerMultF/2^32, 1) : 2^31-1
       
   968 	cli()
       
   969 	oldcount = timer count
       
   970 	if (oldcount<=0 || aOld!=aNew)
       
   971 		{
       
   972 		timer count = newcount
       
   973 		elapsed = i_LastTimerSet - oldcount
       
   974 		i_LastTimerSet = newcount
       
   975 		elapsed = elapsed * i_TimerMultI / 2^24
       
   976 		aOld->iTotalCpuTime64 += elapsed
       
   977 		correction = i_TimestampError;
       
   978 		if (correction > i_MaxCorrection)
       
   979 			correction = i_MaxCorrection
       
   980 		else if (correction < -i_MaxCorrection)
       
   981 			correction = -i_MaxCorrection
       
   982 		i_TimestampError -= correction
       
   983 		i_LastTimestamp += elapsed + i_TimerGap - correction
       
   984 		}
       
   985 	sti()
       
   986  */
       
   987 __NAKED__ void TSubScheduler::UpdateThreadTimes(NThreadBase* /*aOld*/, NThreadBase* /*aNew*/)
       
   988 	{
       
   989 	asm("cmp	r2, #0 ");
       
   990 	asm("ldreq	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iInitialThread));
       
   991 	asm("ldr	r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimerMultF));
       
   992 	asm("cmp	r1, #0 ");
       
   993 	asm("ldreq	r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iInitialThread));
       
   994 	asm("ldr	r3, [r2, #%a0]" : : "i" _FOFF(NThreadBase,iTime));
       
   995 	asm("stmfd	sp!, {r4-r7} ");
       
   996 	asm("ldr	r6, [r2, #%a0]" : : "i" _FOFF(NThreadBase,iRunCount64));
       
   997 	asm("ldr	r7, [r2, #%a0]" : : "i" (_FOFF(NThreadBase,iRunCount64)+4));
       
   998 	asm("cmp	r1, r2 ");
       
   999 	asm("beq	2f ");
       
  1000 	asm("adds	r6, r6, #1 ");
       
  1001 	asm("str	r6, [r2, #%a0]" : : "i" _FOFF(NThreadBase,iRunCount64));
       
  1002 	asm("ldr	r4, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedCount64));
       
  1003 	asm("ldr	r6, [r0, #%a0]" : : "i" (_FOFF(TSubScheduler,iReschedCount64)+4));
       
  1004 	asm("adcs	r7, r7, #0 ");
       
  1005 	asm("str	r7, [r2, #%a0]" : : "i" (_FOFF(NThreadBase,iRunCount64)+4));
       
  1006 	asm("adds	r4, r4, #1 ");
       
  1007 	asm("adcs	r6, r6, #0 ");
       
  1008 	asm("str	r4, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedCount64));
       
  1009 	asm("str	r6, [r0, #%a0]" : : "i" (_FOFF(TSubScheduler,iReschedCount64)+4));
       
  1010 	asm("2:		");
       
  1011 	asm("cmp	r3, #1 ");					// aNew->iTime > 0 ?
       
  1012 	asm("umullge r4, r3, r12, r3 ");
       
  1013 	asm("ldr	r5, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_LocalTimerAddr));
       
  1014 	asm("movlt	r3, #0x7fffffff ");
       
  1015 	asm("addges	r3, r3, r4, lsr #31 ");		// round up top 32 bits if bit 31 set
       
  1016 	asm("moveq	r3, #1 ");					// if result zero, limit to 1
       
  1017 	asm("ldr	r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_LastTimerSet));
       
  1018 	__ASM_CLI();
       
  1019 	asm("ldr	r4, [r5, #%a0]" : : "i" _FOFF(ArmLocalTimer,iTimerCount));
       
  1020 	asm("cmp	r1, r2 ");
       
  1021 	asm("bne	1f ");
       
  1022 	asm("cmp	r4, #0 ");
       
  1023 	asm("bgt	0f ");						// same thread, timeslice not expired -> leave timer alone
       
  1024 	asm("1:		");
       
  1025 	asm("str	r3, [r5, #%a0]" : : "i" _FOFF(ArmLocalTimer,iTimerCount));	// set new timeslice value in timer
       
  1026 	asm("ldr	r5, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimerMultI));
       
  1027 	asm("str	r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_LastTimerSet));
       
  1028 	asm("sub	r12, r12, r4 ");			// r12 = elapsed (actual timer ticks)
       
  1029 	asm("umull	r4, r5, r12, r5 ");
       
  1030 	asm("ldr	r3, [r1, #%a0]!" : : "i" _FOFF(NThreadBase,iTotalCpuTime64));
       
  1031 	asm("ldr	r12, [r1, #4] ");
       
  1032 	asm("adds	r4, r4, #0x00800000 ");
       
  1033 	asm("adcs	r5, r5, #0 ");
       
  1034 	asm("mov	r4, r4, lsr #24 ");
       
  1035 	asm("orr	r4, r4, r5, lsl #8 ");		// r4 = elapsed
       
  1036 	asm("adds	r3, r3, r4 ");
       
  1037 	asm("adcs	r12, r12, #0 ");
       
  1038 	asm("stmia	r1, {r3,r12} ");			// aOld->iTotalCpuTime64 += elapsed
       
  1039 	asm("ldr	r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimestampError));
       
  1040 	asm("ldr	r5, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_MaxCorrection));
       
  1041 	asm("ldr	r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iLastTimestamp64));
       
  1042 	asm("ldr	r2, [r0, #%a0]" : : "i" (_FOFF(TSubScheduler,iLastTimestamp64)+4));
       
  1043 	asm("mov	r12, r3 ");
       
  1044 	asm("cmp	r3, r5 ");
       
  1045 	asm("movgt	r3, r5 ");					// if (correction>i_MaxCorrection) correction=i_MaxCorrection
       
  1046 	asm("cmn	r3, r5 ");
       
  1047 	asm("rsblt	r3, r5, #0 ");				// if (correction+i_MaxCorrection<0) correction=-i_MaxCorrection
       
  1048 	asm("ldr	r5, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimerGap));
       
  1049 	asm("sub	r12, r12, r3 ");
       
  1050 	asm("str	r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimestampError));
       
  1051 	asm("add	r4, r4, r5 ");				// r4 = elapsed + i_TimerGap
       
  1052 	asm("adds	r1, r1, r4 ");
       
  1053 	asm("adcs	r2, r2, #0 ");				// iLastTimestamp64 + (elapsed + i_TimerGap)
       
  1054 	asm("subs	r1, r1, r3 ");
       
  1055 	asm("sbcs	r1, r1, r3, asr #32 ");		// iLastTimestamp64 + (elapsed + i_TimerGap - correction)
       
  1056 	asm("str	r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iLastTimestamp64));
       
  1057 	asm("str	r2, [r0, #%a0]" : : "i" (_FOFF(TSubScheduler,iLastTimestamp64)+4));
       
  1058 	asm("0:		");
       
  1059 	__ASM_STI();
       
  1060 	asm("ldmfd	sp!, {r4-r7} ");
       
  1061 	__JUMP(,lr);
       
  1062 	}
       
  1063 
       
  1064