kernel/eka/nkernsmp/arm/vectors.cia
changeset 31 56f325a607ea
parent 0 a41df078684a
child 90 947f0dc9f7a8
equal deleted inserted replaced
15:4122176ea935 31:56f325a607ea
    36 
    36 
    37 #ifdef BTRACE_CPU_USAGE
    37 #ifdef BTRACE_CPU_USAGE
    38 extern "C" void btrace_irq_exit();
    38 extern "C" void btrace_irq_exit();
    39 extern "C" void btrace_fiq_exit();
    39 extern "C" void btrace_fiq_exit();
    40 #endif
    40 #endif
    41 
       
    42 #ifdef _DEBUG
    41 #ifdef _DEBUG
    43 #define __CHECK_LOCK_STATE__
    42 #define __CHECK_LOCK_STATE__
    44 #endif
    43 #endif
    45 
    44 
    46 //#define __FAULT_ON_FIQ__
    45 //#define __FAULT_ON_FIQ__
    83 	}
    82 	}
    84 #endif
    83 #endif
    85 
    84 
    86 //#define	__RECORD_STATE__
    85 //#define	__RECORD_STATE__
    87 #ifdef __RECORD_STATE__
    86 #ifdef __RECORD_STATE__
    88 #define RECORD_STATE				\
    87 #define RECORD_STATE()				\
    89 	asm("ldr r3, [sp, #68] ");		\
    88 	asm("ldr r3, [sp, #68] ");		\
    90 	asm("mov r1, sp ");				\
    89 	asm("mov r1, sp ");				\
    91 	asm("bic r12, sp, #0xff ");		\
    90 	asm("bic r12, sp, #0xff ");		\
    92 	asm("bic r12, r12, #0xf00 ");	\
    91 	asm("bic r12, r12, #0xf00 ");	\
    93 	asm("add r12, r12, #24 ");		\
    92 	asm("add r12, r12, #24 ");		\
    96 	asm("ldmia r1!, {r2-r11} ");	\
    95 	asm("ldmia r1!, {r2-r11} ");	\
    97 	asm("stmia r12!, {r2-r11} ");	\
    96 	asm("stmia r12!, {r2-r11} ");	\
    98 	asm("ldmia r1!, {r2-r9} ");		\
    97 	asm("ldmia r1!, {r2-r9} ");		\
    99 	asm("stmia r12!, {r2-r9} ")
    98 	asm("stmia r12!, {r2-r9} ")
   100 
    99 
   101 #define RECORD_STATE_EXC			\
   100 #define RECORD_STATE_EXC()			\
   102 	asm("ldr r3, [sp, #92] ");		\
   101 	asm("ldr r3, [sp, #92] ");		\
   103 	asm("mov r12, sp ");			\
   102 	asm("mov r12, sp ");			\
   104 	asm("bic lr, sp, #0xff ");		\
   103 	asm("bic lr, sp, #0xff ");		\
   105 	asm("bic lr, lr, #0xf00 ");		\
   104 	asm("bic lr, lr, #0xf00 ");		\
   106 	asm("tst r3, #0x0f ");			\
   105 	asm("tst r3, #0x0f ");			\
   108 	asm("ldmia r12!, {r0-r11} ");	\
   107 	asm("ldmia r12!, {r0-r11} ");	\
   109 	asm("stmia lr!, {r0-r11} ");	\
   108 	asm("stmia lr!, {r0-r11} ");	\
   110 	asm("ldmia r12!, {r0-r11} ");	\
   109 	asm("ldmia r12!, {r0-r11} ");	\
   111 	asm("stmia lr!, {r0-r11} ");
   110 	asm("stmia lr!, {r0-r11} ");
   112 #else
   111 #else
   113 #define RECORD_STATE
   112 #define RECORD_STATE()
   114 #define RECORD_STATE_EXC
   113 #define RECORD_STATE_EXC()
       
   114 #endif
       
   115 
       
   116 #ifdef __USER_MEMORY_GUARDS_ENABLED__
       
   117 // This macro can be invoked just before a return-from-exception instruction
       
   118 // It will cause an UNDEF exception if we're about to return to user mode with UMG still on
       
   119 #define	USER_MEMORY_GUARD_CHECK()											\
       
   120 	asm("stmfd	sp!, {lr}");												\
       
   121 	asm("ldr	lr, [sp, #8]");						/* lr<-future CPSR	*/	\
       
   122 	USER_MEMORY_GUARD_ASSERT_OFF_IF_MODE_USR(lr);							\
       
   123 	asm("ldmfd	sp!, {lr}");
       
   124 #else
       
   125 #define	USER_MEMORY_GUARD_CHECK()
   115 #endif
   126 #endif
   116 
   127 
   117 /******************************************************************************
   128 /******************************************************************************
   118  * SWI Handler
   129  * SWI Handler
   119  ******************************************************************************/
   130  ******************************************************************************/
   167 
   178 
   168 	asm(".global exec_wfar_finish ");
   179 	asm(".global exec_wfar_finish ");
   169 	asm("exec_wfar_finish: ");
   180 	asm("exec_wfar_finish: ");
   170 	asm("mrs	r1, spsr ");
   181 	asm("mrs	r1, spsr ");
   171 	asm("tst	r1, #0x0f ");
   182 	asm("tst	r1, #0x0f ");
   172 	asm("bne	fast_swi_exit2 ");		// not returning to user mode
   183 	asm("bne	fast_swi_exit2 ");			// not returning to user mode; in this case we don't run callbacks
       
   184 											// and the UMG was not changed on entry so we don't reset it
       
   185 
   173 #ifdef __CHECK_LOCK_STATE__
   186 #ifdef __CHECK_LOCK_STATE__
   174 	asm("bl "	CSM_CFUNC(check_lock_state));
   187 	asm("bl "	CSM_CFUNC(check_lock_state));
   175 #endif
   188 #endif
   176 	asm("cmp	r4, #3 ");				// callbacks?
   189 	asm("cmp	r4, #3 ");					// callbacks?
   177 	asm("blhs	run_user_mode_callbacks ");
   190 	asm("blhs	run_user_mode_callbacks ");	// run them; NB trashes most registers (R0-R12, R14)
   178 	USER_MEMORY_GUARD_OFF(,r12,r12);
   191 	USER_MEMORY_GUARD_OFF(,r12,r12);		// because we're returning to user mode
   179 	asm("b		fast_swi_exit2 ");
   192 	asm("b		fast_swi_exit2 ");
   180 #endif
   193 #endif
   181 
   194 
   182 	asm("fast_swi_exit: ");
   195 	asm("fast_swi_exit: ");
   183 #if defined(__CHECK_LOCK_STATE__) || defined(__USER_MEMORY_GUARDS_ENABLED__)
   196 #if defined(__CHECK_LOCK_STATE__) || defined(__USER_MEMORY_GUARDS_ENABLED__)
   184 	asm("mrs	r12, spsr ");
   197 	asm("mrs	r12, spsr ");
   185 	asm("tst	r12, #0x0f ");
   198 	asm("tst	r12, #0x0f ");
   186 	asm("bne	1f ");
   199 	asm("bne	fast_swi_exit2 ");			// not returning to user mode; in this case we don't run callbacks
       
   200 											// and the UMG was not changed on entry so we don't restore it
   187 #ifdef __CHECK_LOCK_STATE__
   201 #ifdef __CHECK_LOCK_STATE__
   188 	asm("bl "	CSM_CFUNC(check_lock_state));
   202 	asm("bl "	CSM_CFUNC(check_lock_state));
   189 #endif
   203 #endif
   190 	USER_MEMORY_GUARD_OFF(,r12,r12);
   204 	USER_MEMORY_GUARD_OFF(,r12,r12);		// because we're returning to user mode
   191 	asm("1: ");
   205 #endif
   192 #endif
   206 
   193 	asm("fast_swi_exit2: ");
   207 	asm("fast_swi_exit2: ");
   194 	RECORD_STATE;
   208 	RECORD_STATE();
   195 	asm("ldmib	sp, {r1-r14}^ ");			// restore R1-R12, R13_usr, R14_usr
   209 	asm("ldmib	sp, {r1-r14}^ ");			// restore R1-R12, R13_usr, R14_usr
   196 	asm("nop ");							// don't access banked register immediately after
   210 	asm("nop ");							// don't access banked register immediately after
   197 	asm("add	sp, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR15));
   211 	asm("add	sp, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR15));
       
   212 	USER_MEMORY_GUARD_CHECK();				// check UMG is off if returning to user mode
   198 	RFEIAW(13);								// restore PC and CPSR - return from Exec function
   213 	RFEIAW(13);								// restore PC and CPSR - return from Exec function
   199 
   214 
   200 	
   215 
   201 	asm("slow_swi: ");						// IRQs and FIQs off here
   216 	asm("slow_swi: ");						// IRQs and FIQs off here
   202 	__ASM_STI();							// all interrupts on
   217 	__ASM_STI();							// all interrupts on
   203 	asm("ldr	r4, [r9, #%a0]" : : "i" _FOFF(NThread,iSlowExecTable));
   218 	asm("ldr	r4, [r9, #%a0]" : : "i" _FOFF(NThread,iSlowExecTable));
   204 	asm("mrs	r11, spsr ");				// spsr_svc into r11
   219 	asm("mrs	r11, spsr ");				// spsr_svc into r11
   205 	asm("adr	lr, slow_swi_exit ");
   220 	asm("adr	lr, slow_swi_exit ");
   206 	asm("ldr	r5, [r4, #-12] ");			// r5=limit
   221 	asm("ldr	r5, [r4, #-12] ");			// r5=limit
   207 	asm("add	r6, r4, r12, lsr #6 ");		// r6->dispatch table entry
   222 	asm("add	r6, r4, r12, lsr #6 ");		// r6->dispatch table entry
   208 	asm("cmp	r5, r12, lsr #9 ");			// r5-SWI number
   223 	asm("cmp	r5, r12, lsr #9 ");			// r5-SWI number
   209 	asm("ldmhiia r6, {r5,r6} ");			// if SWI number OK, flags into r5, function addr into r6
   224 	asm("ldmhiia r6, {r5,r6} ");			// if SWI number OK, flags into r5, function addr into r6
   210 	asm("ldrls	pc, [r4, #-8] ");			// if SWI number invalid, call invalid handler
   225 	asm("ldrls	pc, [r4, #-8] ");			// if SWI number invalid, call invalid handler, returning to slow_swi_exit below
   211 
   226 
   212 	// Acquire system lock if necessary: warning - any scratch registers modified after __ArmVectorSwi()
   227 	// Acquire system lock if necessary: warning - any scratch registers modified after __ArmVectorSwi()
   213 	// function preamble will be restored after call to NKern::LockSystem() with stale values.
   228 	// function preamble will be restored after call to NKern::LockSystem() with stale values.
   214 	asm("tst	r5, #%a0" : : "i" ((TInt)KExecFlagClaim));	// claim system lock?
   229 	asm("tst	r5, #%a0" : : "i" ((TInt)KExecFlagClaim));	// claim system lock?
   215 	asm("beq	slow_swi_no_wait ");						// skip if not
   230 	asm("beq	slow_swi_no_wait ");						// skip if not
   216 	asm("bl "	CSM_ZN5NKern10LockSystemEv );
   231 	asm("bl "	CSM_ZN5NKern10LockSystemEv );
   217 	asm("ldmia	sp, {r0-r3} ");
   232 	asm("ldmia	sp, {r0-r3} ");								// reload original values
   218 	asm("slow_swi_no_wait: ");
   233 	asm("slow_swi_no_wait: ");
   219 
   234 
   220 	// Check to see if extra arguments are needed.  Needs to be placed here because NKern::LockSystem()
   235 	// Check to see if extra arguments are needed.  Needs to be placed after call to NKern::LockSystem()
   221 	// will not preserve value of r2 and ldmia call will replace it with a stale copy.
   236 	// above, as r2 is reloaded with its original value by the ldmia instruction above
   222 	asm("tst	r5, #%a0" : : "i" ((TInt)KExecFlagExtraArgMask));	// extra arguments needed?
   237 	asm("tst	r5, #%a0" : : "i" ((TInt)KExecFlagExtraArgMask));	// extra arguments needed?
   223 	asm("addne	r2, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR4));	// if so, point r2 at saved registers on stack
   238 	asm("addne	r2, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR4));	// if so, point r2 at saved registers on stack
   224 
   239 
   225 	asm("tst	r5, #%a0" : : "i" ((TInt)KExecFlagPreprocess));	// preprocess (handle lookup)? can use r4, r7, r8, r12, r0
   240 	asm("tst	r5, #%a0" : : "i" ((TInt)KExecFlagPreprocess));	// preprocess (handle lookup)? can use r4, r7, r8, r12, r0
   226 	asm("mov	lr, pc ");
   241 	asm("mov	lr, pc ");
   235 
   250 
   236 	asm("slow_swi_exit: ");
   251 	asm("slow_swi_exit: ");
   237 	__ASM_CLI();
   252 	__ASM_CLI();
   238 	asm("ldr	r4, [r9, #%a0]" : : "i" _FOFF(NThreadBase,iUserModeCallbacks));
   253 	asm("ldr	r4, [r9, #%a0]" : : "i" _FOFF(NThreadBase,iUserModeCallbacks));
   239 	asm("tst	r11, #0x0f ");				// returning to user mode?
   254 	asm("tst	r11, #0x0f ");				// returning to user mode?
   240 	asm("bne	slow_swi_exit2 ");			// no
   255 	asm("bne	slow_swi_exit2 ");			// not returning to user mode; in this case we don't run callbacks
       
   256 											// and the UMG was not changed on entry so we don't reset it
       
   257 
   241 #ifdef __CHECK_LOCK_STATE__
   258 #ifdef __CHECK_LOCK_STATE__
   242 	asm("bl "	CSM_CFUNC(check_lock_state));
   259 	asm("bl "	CSM_CFUNC(check_lock_state));
   243 #endif
   260 #endif
   244 	asm("cmp	r4, #3 ");					// callbacks?
   261 	asm("cmp	r4, #3 ");					// callbacks?
   245 	asm("blhs	run_user_mode_callbacks ");	// yes
   262 	asm("blhs	run_user_mode_callbacks ");	// run them; NB trashes most registers (R0-R12, R14)
   246 	USER_MEMORY_GUARD_OFF(,r12,r12);
   263 	USER_MEMORY_GUARD_OFF(,r12,r12);		// because we're returning to user mode
       
   264 
   247 	asm("slow_swi_exit2: ");
   265 	asm("slow_swi_exit2: ");
   248 	RECORD_STATE;
   266 	RECORD_STATE();
   249 	asm("ldmia	sp, {r0-r14}^ ");			// R0=return value, restore R1-R12, R13_usr, R14_usr
   267 	asm("ldmia	sp, {r0-r14}^ ");			// R0=return value, restore R1-R12, R13_usr, R14_usr
   250 	asm("nop ");							// don't access banked register immediately after
   268 	asm("nop ");							// don't access banked register immediately after
   251 	asm("add	sp, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR15));
   269 	asm("add	sp, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR15));
       
   270 	USER_MEMORY_GUARD_CHECK();				// check UMG is off if returning to user mode
   252 	RFEIAW(13);								// restore PC and CPSR - return from Exec function
   271 	RFEIAW(13);								// restore PC and CPSR - return from Exec function
   253 	}
   272 	}
   254 
   273 
   255 
   274 
   256 /******************************************************************************
   275 /******************************************************************************
   257  * IRQ Postamble
   276  * IRQ Postamble
   258  * This routine is called after the IRQ has been dispatched
   277  * This routine is called after the IRQ has been dispatched
   259  * Enter in mode_sys
   278  * Enter in mode_sys
   260  * R4->TSubScheduler, R6->GIC CPU interface
   279  * R4->TSubScheduler, R6->GIC CPU interface
   261  * If user memory guards active and not nested, R8 = saved DACR
       
   262  * For nested IRQ, R0-R12, R14_sys, return address, return CPSR are on top
   280  * For nested IRQ, R0-R12, R14_sys, return address, return CPSR are on top
   263  *	of the mode_sys (i.e. current) stack
   281  *	 of the mode_sys (i.e. current) stack
   264  * For non-nested IRQ, registers are saved on top of mode_svc stack and
   282  * For non-nested IRQ, registers are saved on top of mode_svc stack and
   265  *	pointed to by R5 in the order:
   283  *	 pointed to by R5 in the order:
   266  *	R5->R0 ... R12 R13_usr R14_usr <spare> PC CPSR
   284  *	 R5->R0 ... R12 R13_usr R14_usr <spare> PC CPSR
       
   285  *   and if user memory guards are active, R8 = saved DACR
   267  ******************************************************************************/
   286  ******************************************************************************/
   268 
   287 
   269 extern "C" __NAKED__ void __ArmVectorIrq()
   288 extern "C" __NAKED__ void __ArmVectorIrq()
   270 	{
   289 	{
   271 	// Interrupts may be enabled here
   290 	// Interrupts may be enabled here
   275 	asm("ldr	r7, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqNestCount));
   294 	asm("ldr	r7, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqNestCount));
   276 	asm("ldrb	r0, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, iEventHandlersPending));
   295 	asm("ldrb	r0, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, iEventHandlersPending));
   277 	__DATA_MEMORY_BARRIER_Z__(r2);
   296 	__DATA_MEMORY_BARRIER_Z__(r2);
   278 #ifdef BTRACE_CPU_USAGE
   297 #ifdef BTRACE_CPU_USAGE
   279 	asm("ldrb	r10, [r10] ");
   298 	asm("ldrb	r10, [r10] ");
   280 #endif
       
   281 #ifdef __USER_MEMORY_GUARDS_ENABLED__
       
   282 	asm("mov	r11, r8 ");
       
   283 #endif
   299 #endif
   284 	asm("subs	r7, r7, #1 ");
   300 	asm("subs	r7, r7, #1 ");
   285 	asm("bpl	nested_irq_exit ");
   301 	asm("bpl	nested_irq_exit ");
   286 	asm("cmp	r0, #0 ");
   302 	asm("cmp	r0, #0 ");
   287 	asm("beq	no_event_handlers ");
   303 	asm("beq	no_event_handlers ");
   288 	asm("mov	r0, r4 ");
   304 	asm("mov	r0, r4 ");
   289 	asm("bl		run_event_handlers ");
   305 	asm("bl		run_event_handlers ");
   290 
   306 
   291 	asm("no_event_handlers: ");
   307 	asm("no_event_handlers: ");
       
   308 #ifdef __USER_MEMORY_GUARDS_ENABLED__
       
   309 	asm("mov	r11, r8 ");
       
   310 #endif
   292 	asm("ldr	r8, [r5, #%a0]" : : "i" _FOFF(SThreadExcStack,iCPSR));	// r8 = interrupted cpsr
   311 	asm("ldr	r8, [r5, #%a0]" : : "i" _FOFF(SThreadExcStack,iCPSR));	// r8 = interrupted cpsr
   293 	asm("ldr	r0, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, iKernLockCount));
   312 	asm("ldr	r0, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, iKernLockCount));
   294 	__ASM_CLI();							// all interrupts off
   313 	__ASM_CLI();							// all interrupts off
   295 	asm("and	r2, r8, #0x1f ");
   314 	asm("and	r2, r8, #0x1f ");
   296 	asm("ldr	r1, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag));
   315 	asm("ldr	r1, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag));
   299 	asm("cmpeq	r0, #0 ");					// if mode_usr or mode_svc, is kernel locked?
   318 	asm("cmpeq	r0, #0 ");					// if mode_usr or mode_svc, is kernel locked?
   300 	asm("str	r7, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqNestCount));
   319 	asm("str	r7, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqNestCount));
   301 	asm("bne	irq_kernel_locked_exit ");	// if neither or if kernel locked, exit immediately
   320 	asm("bne	irq_kernel_locked_exit ");	// if neither or if kernel locked, exit immediately
   302 	asm("cmp	r1, #0 ");					// If not, IDFCs/reschedule pending?
   321 	asm("cmp	r1, #0 ");					// If not, IDFCs/reschedule pending?
   303 	asm("beq	irq_kernel_locked_exit ");	// if not, exit
   322 	asm("beq	irq_kernel_locked_exit ");	// if not, exit
       
   323 
   304 	asm("mov	r1, #1 ");
   324 	asm("mov	r1, #1 ");
   305 	asm("str	r1, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, iKernLockCount));	// lock the kernel
   325 	asm("str	r1, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, iKernLockCount));	// lock the kernel
   306 	__ASM_STI_MODE(MODE_SVC);				// mode_svc, interrupts on
   326 	__ASM_STI_MODE(MODE_SVC);				// mode_svc, interrupts on
   307 
   327 
   308 	// Saved registers are on top of mode_svc stack
   328 	// Saved registers are on top of mode_svc stack
   321 
   341 
   322 	// need to send any outstanding reschedule IPIs
   342 	// need to send any outstanding reschedule IPIs
   323 	asm("cmp	r12, #0 ");
   343 	asm("cmp	r12, #0 ");
   324 	asm("blne " CSM_CFUNC(send_accumulated_resched_ipis));
   344 	asm("blne " CSM_CFUNC(send_accumulated_resched_ipis));
   325 	asm("tst	r8, #0x0f ");				// returning to user mode?
   345 	asm("tst	r8, #0x0f ");				// returning to user mode?
   326 	asm("bne	irq_post_resched_exit ");	// no - just return
   346 	asm("bne	irq_post_resched_exit ");	// if not, we don't check locks or run callbacks
       
   347 
   327 #ifdef __CHECK_LOCK_STATE__
   348 #ifdef __CHECK_LOCK_STATE__
   328 	asm("bl "	CSM_CFUNC(check_lock_state));
   349 	asm("bl "	CSM_CFUNC(check_lock_state));
   329 #endif
   350 #endif
   330 	asm("cmp	r4, #3 ");					// callbacks?
   351 	asm("cmp	r4, #3 ");					// callbacks?
   331 	asm("blhs	run_user_mode_callbacks ");	// yes - run them
   352 	asm("blhs	run_user_mode_callbacks ");	// run them; NB trashes most registers (R0-R12, R14)
   332 
   353 
   333 	asm("irq_post_resched_exit: ");
   354 	asm("irq_post_resched_exit: ");
   334 	asm("ldmfd	sp!, {r0,lr} ");			// restore UMG, lr_svc
   355 	asm("ldmfd	sp!, {r11,lr} ");			// restore UMG, lr_svc
   335 	USER_MEMORY_GUARD_RESTORE(r0,r12);
   356 	USER_MEMORY_GUARD_RESTORE(r11,r12);
   336 	RECORD_STATE;
   357 	RECORD_STATE();
   337 	asm("ldmia	sp, {r0-r14}^ ");			// restore R0-R12, R13_usr, R14_usr
   358 	asm("ldmia	sp, {r0-r14}^ ");			// restore R0-R12, R13_usr, R14_usr
   338 	asm("nop ");							// don't access banked register immediately after
   359 	asm("nop ");							// don't access banked register immediately after
   339 	asm("add	sp, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR15));
   360 	asm("add	sp, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR15));
   340 	RFEIAW(13);								// restore PC and CPSR - return from interrupt
   361 	USER_MEMORY_GUARD_CHECK();				// check UMG is off if returning to user mode
       
   362 	RFEIAW(13);								// restore PC and CPSR - return from Exec function
   341 
   363 
   342 	asm("irq_kernel_locked_exit: ");
   364 	asm("irq_kernel_locked_exit: ");
   343 #ifdef __CHECK_LOCK_STATE__
   365 #ifdef __CHECK_LOCK_STATE__
   344 	asm("tst	r8, #0x0f ");
   366 	asm("tst	r8, #0x0f ");
   345 	asm("bleq " CSM_CFUNC(check_lock_state));
   367 	asm("bleq " CSM_CFUNC(check_lock_state));
   346 #endif
   368 #endif
   347 	USER_MEMORY_GUARD_RESTORE(r11,r12);
       
   348 #ifdef BTRACE_CPU_USAGE
   369 #ifdef BTRACE_CPU_USAGE
   349 	asm("cmp	r10, #0 ");
   370 	asm("cmp	r10, #0 ");
   350 	asm("blne	btrace_irq_exit ");
   371 	asm("blne	btrace_irq_exit ");
   351 #endif
   372 #endif
       
   373 	USER_MEMORY_GUARD_RESTORE(r11,r12);
   352 	__ASM_CLI_MODE(MODE_SVC);				// mode_svc, interrupts off
   374 	__ASM_CLI_MODE(MODE_SVC);				// mode_svc, interrupts off
   353 	RECORD_STATE;
   375 	RECORD_STATE();
   354 	asm("ldmia	sp, {r0-r14}^ ");			// restore R0-R12, R13_usr, R14_usr
   376 	asm("ldmia	sp, {r0-r14}^ ");			// restore R0-R12, R13_usr, R14_usr
   355 	asm("nop ");							// don't access banked register immediately after
   377 	asm("nop ");							// don't access banked register immediately after
   356 	asm("add	sp, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR15));
   378 	asm("add	sp, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR15));
   357 	RFEIAW(13);								// restore PC and CPSR - return from interrupt
   379 	USER_MEMORY_GUARD_CHECK();				// check UMG is off if returning to user mode
       
   380 	RFEIAW(13);								// restore PC and CPSR - return from Exec function
   358 
   381 
   359 	asm("nested_irq_exit: ");
   382 	asm("nested_irq_exit: ");
   360 	__ASM_CLI1();
   383 	__ASM_CLI1();
   361 	asm("str	r7, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqNestCount));
   384 	asm("str	r7, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqNestCount));
   362 #ifdef BTRACE_CPU_USAGE
   385 #ifdef BTRACE_CPU_USAGE
   363 	asm("cmp	r10, #0 ");
   386 	asm("cmp	r10, #0 ");
   364 	asm("blne	btrace_irq_exit ");
   387 	asm("blne	btrace_irq_exit ");
   365 #endif
   388 #endif
   366 	asm("ldmia	sp!, {r0-r12,r14} ");		// restore r0-r12, r14_sys
   389 	asm("ldmia	sp!, {r0-r12,r14} ");		// restore r0-r12, r14_sys
   367 	RFEIAW(13);								// restore PC and CPSR
   390 	USER_MEMORY_GUARD_CHECK();				// check UMG is off if returning to user mode
       
   391 	RFEIAW(13);								// restore PC and CPSR - return from Exec function
   368 
   392 
   369 	asm("__BTraceCpuUsageFilter: ");
   393 	asm("__BTraceCpuUsageFilter: ");
   370 	asm(".word	%a0" : : "i" ((TInt)&BTraceData.iFilter[BTrace::ECpuUsage]));
   394 	asm(".word	%a0" : : "i" ((TInt)&BTraceData.iFilter[BTrace::ECpuUsage]));
   371 	}
   395 	}
   372 
   396 
   373 
   397 
   374 /******************************************************************************
   398 /******************************************************************************
   375  * FIQ Postamble
   399  * FIQ Postamble
   376  * This routine is called after the FIQ has been dispatched
   400  * This routine is called after the FIQ has been dispatched
   377  * spsr_fiq, r0-r3 are unmodified
   401  * spsr_fiq, r0-r3 are unmodified
   378  * Return address is on the top of the FIQ stack
   402  * Return address is on the top of the FIQ stack -- except that if user memory
       
   403  * guards are in use, the saved DACR was pushed afterwards, so that's on top
       
   404  * of the stack and the return address is next
   379  ******************************************************************************/
   405  ******************************************************************************/
   380 
   406 
   381 extern "C" __NAKED__ void __ArmVectorFiq()
   407 extern "C" __NAKED__ void __ArmVectorFiq()
   382 	{
   408 	{
   383 #ifdef __FAULT_ON_FIQ__
   409 #ifdef __FAULT_ON_FIQ__
   429 	asm("bl		btrace_fiq_exit ");
   455 	asm("bl		btrace_fiq_exit ");
   430 	asm("ldmfd	sp!, {r0-r3} ");
   456 	asm("ldmfd	sp!, {r0-r3} ");
   431 	asm("1: ");
   457 	asm("1: ");
   432 #endif
   458 #endif
   433 #ifdef __USER_MEMORY_GUARDS_ENABLED__
   459 #ifdef __USER_MEMORY_GUARDS_ENABLED__
   434 	asm("ldr	r11, [sp], #4 ");
   460 	asm("ldr	r8, [sp], #4 ");
   435 	USER_MEMORY_GUARD_RESTORE(r11,r12);
   461 #endif
   436 #endif
   462 	USER_MEMORY_GUARD_RESTORE(r8,r12);
   437 	asm("ldmfd	sp!, {pc}^ ");				// return from interrupt
   463 	asm("ldmfd	sp!, {pc}^ ");				// return from interrupt
   438 
   464 
   439 	asm("__TheScheduler: ");
   465 	asm("__TheScheduler: ");
   440 	asm(".word TheScheduler ");
   466 	asm(".word TheScheduler ");
   441 	}
   467 	}
   485 	asm("bl		hw_init_exc ");
   511 	asm("bl		hw_init_exc ");
   486 	asm("add	sp, sp, #20 ");
   512 	asm("add	sp, sp, #20 ");
   487 	asm("ldmia	sp, {r0-r14}^ ");			// restore R0-R12, R13_usr, R14_usr
   513 	asm("ldmia	sp, {r0-r14}^ ");			// restore R0-R12, R13_usr, R14_usr
   488 	asm("nop ");							// don't access banked register immediately after
   514 	asm("nop ");							// don't access banked register immediately after
   489 	asm("add	sp, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR15));
   515 	asm("add	sp, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR15));
   490 	RFEIAW(13);								// restore PC and CPSR - return from interrupt
   516 	RFEIAW(13);								// restore PC and CPSR - return from Exec function
   491 #endif
   517 #endif
       
   518 
   492 	asm("ldr	r0, [sp, #%a0]" : : "i" _FOFF(SThreadExcStack,iCPSR));
   519 	asm("ldr	r0, [sp, #%a0]" : : "i" _FOFF(SThreadExcStack,iCPSR));
   493 	asm("mrs	r12, cpsr ");
   520 	asm("mrs	r12, cpsr ");
   494 	asm("and	r3, r0, #0x1f ");			// r3=processor mode when abort occurred
   521 	asm("and	r3, r0, #0x1f ");			// r3=processor mode when abort occurred
   495 	asm("bic	r12, r12, #0xc0 ");
   522 	asm("bic	r12, r12, #0xc0 ");
   496 	asm("cmp	r3, #0x10 ");				// aborted in user mode?
   523 	asm("cmp	r3, #0x10 ");				// aborted in user mode?
   680 	asm("undef_coproc_thumb: ");
   707 	asm("undef_coproc_thumb: ");
   681 	asm("undef_coproc_arm: ");
   708 	asm("undef_coproc_arm: ");
   682 	asm("mov	r0, sp ");
   709 	asm("mov	r0, sp ");
   683 	asm("bl "	CSM_CFUNC(HandleSpecialOpcode));
   710 	asm("bl "	CSM_CFUNC(HandleSpecialOpcode));
   684 	asm("cmp	r0, #0 ");
   711 	asm("cmp	r0, #0 ");
   685 	asm("beq	exc_dispatch ");			// if not handled, dispatch normally
   712 	asm("beq	exc_dispatch ");			// if not handled, dispatch normally, else return
   686 											// else return
   713 
   687 	// return from exception
   714 	// return from exception
   688 	// R4 points to current thread, R11->TSubScheduler, SP->TArmExcInfo
   715 	// R4 points to current thread, R11->TSubScheduler, SP->TArmExcInfo
   689 	asm("exc_return: ");
   716 	asm("exc_return: ");
   690 	__ASM_CLI();
   717 	__ASM_CLI();
   691 	asm("ldr	r0, [sp, #%a0]" : : "i" _FOFF(TArmExcInfo,iCpsr));
   718 	asm("ldr	r0, [sp, #%a0]" : : "i" _FOFF(TArmExcInfo,iCpsr));
   692 	asm("ldr	r1, [r4, #%a0]" : : "i" _FOFF(NThreadBase,iUserModeCallbacks));
   719 	asm("ldr	r1, [r4, #%a0]" : : "i" _FOFF(NThreadBase,iUserModeCallbacks));
   693 	asm("mov	r9, r4 ");
   720 	asm("mov	r9, r4 ");
   694 	asm("tst	r0, #0x0f ");				// returning to user mode?
   721 	asm("tst	r0, #0x0f ");				// returning to user mode?
   695 	asm("bne	exc_return2 ");				// no
   722 	asm("bne	exc_return2 ");				// if not, we don't check locks or run callbacks
       
   723 
   696 #ifdef __CHECK_LOCK_STATE__
   724 #ifdef __CHECK_LOCK_STATE__
   697 	asm("bleq " CSM_CFUNC(check_lock_state));
   725 	asm("bl " CSM_CFUNC(check_lock_state));
   698 #endif
   726 #endif
   699 	asm("cmp	r1, #3 ");					// callbacks?
   727 	asm("cmp	r1, #3 ");					// callbacks?
   700 	asm("blhs	run_user_mode_callbacks ");	// yes - run them
   728 	asm("blo	exc_return2 ");
   701 	RECORD_STATE_EXC;
   729 	asm("stmfd	sp!, {r6} ");																		\
       
   730 	asm("bl		run_user_mode_callbacks ");	// run them; NB trashes most registers (R0-R12, R14)
       
   731 	asm("ldmfd	sp!, {r6} ");																		\
       
   732 
       
   733 	asm("exc_return2: ");
       
   734 	RECORD_STATE_EXC();
   702 	USER_MEMORY_GUARD_RESTORE(r6,r0);
   735 	USER_MEMORY_GUARD_RESTORE(r6,r0);
   703 
   736 
   704 	asm("exc_return2: ");
       
   705 	asm("add	r7, sp, #%a0" : : "i" _FOFF(TArmExcInfo,iSpsrSvc));	// r7->saved spsr_svc
   737 	asm("add	r7, sp, #%a0" : : "i" _FOFF(TArmExcInfo,iSpsrSvc));	// r7->saved spsr_svc
   706 	asm("ldmia	r7!, {r0-r2,r14} ");		// r0=original spsr_svc, r2=original sp_svc, restore lr_svc
   738 	asm("ldmia	r7!, {r0-r2,r14} ");		// r0=original spsr_svc, r2=original sp_svc, restore lr_svc
   707 	asm("add	r6, sp, #%a0" : : "i" _FOFF(TArmExcInfo,iR15));		// r6->saved PC, CPSR
   739 	asm("add	r6, sp, #%a0" : : "i" _FOFF(TArmExcInfo,iR15));		// r6->saved PC, CPSR
   708 	asm("msr	spsr, r0 ");				// restore spsr_svc
   740 	asm("msr	spsr, r0 ");				// restore spsr_svc
   709 	asm("ldmia	r6, {r0,r1} ");
   741 	asm("ldmia	r6, {r0,r1} ");
   710 	asm("stmdb	r2!, {r0,r1} ");			// move saved PC, CPSR so sp_svc ends up at original place
   742 	asm("stmdb	r2!, {r0,r1} ");			// move saved PC, CPSR so sp_svc ends up at original place
   711 	asm("str	r2, [r6, #-4] ");			// overwrite iExcCode with original sp_svc - 8
   743 	asm("str	r2, [r6, #-4] ");			// overwrite iExcCode with original sp_svc - 8
   712 	asm("ldmia	r7, {r0-r14}^ ");			// restore R0-R12, R13_usr, R14_usr
   744 	asm("ldmia	r7, {r0-r14}^ ");			// restore R0-R12, R13_usr, R14_usr
   713 	asm("nop	");							// don't touch banked register immediately afterwards
   745 	asm("nop	");							// don't touch banked register immediately afterwards
   714 	asm("ldr	sp, [sp, #%a0]" : : "i" _FOFF(TArmExcInfo,iExcCode));	// R13_svc = original R13_svc - 8
   746 	asm("ldr	sp, [sp, #%a0]" : : "i" _FOFF(TArmExcInfo,iExcCode));	// R13_svc = original R13_svc - 8
   715 	RFEIAW(13);								// restore R13_svc and return from exception
   747 	USER_MEMORY_GUARD_CHECK();				// check UMG is off if returning to user mode
       
   748 	RFEIAW(13);								// restore PC and CPSR - return from Exec function
   716 
   749 
   717 	// get here if exception occurred in mode other than usr or svc
   750 	// get here if exception occurred in mode other than usr or svc
   718 	// we are in mode_abt or mode_und with IRQs disabled
   751 	// we are in mode_abt or mode_und with IRQs disabled
   719 	// R0=original CPSR R10->saved registers on exception stack R11->TSubScheduler
   752 	// R0=original CPSR R10->saved registers on exception stack R11->TSubScheduler
   720 	// R12=processor mode of exception (abt/und)
   753 	// R12=processor mode of exception (abt/und)
   900  * On entry:
   933  * On entry:
   901  *		CPU in mode_svc, interrupts disabled, kernel unlocked, thread not in CS
   934  *		CPU in mode_svc, interrupts disabled, kernel unlocked, thread not in CS
   902  *		R9 points to current NThread
   935  *		R9 points to current NThread
   903  *		We know there is at least one callback on the list
   936  *		We know there is at least one callback on the list
   904  *		Stack not necessarily 8 byte aligned
   937  *		Stack not necessarily 8 byte aligned
       
   938  *		User memory guards on (if in use)
   905  * On return:
   939  * On return:
   906  *		CPU in mode_svc, interrupts disabled, kernel unlocked, thread not in CS
   940  *		CPU in mode_svc, interrupts disabled, kernel unlocked, thread not in CS
   907  *		No TUserModeCallbacks outstanding at the point where interrupts were
   941  *		No TUserModeCallbacks outstanding at the point where interrupts were disabled.
   908  *		disabled.
       
   909  *		R0-R12,R14 modified
   942  *		R0-R12,R14 modified
   910  ******************************************************************************/
   943  ******************************************************************************/
   911 extern "C" __NAKED__ void DoRunUserModeCallbacks()
   944 extern "C" __NAKED__ void DoRunUserModeCallbacks()
   912 	{
   945 	{
   913 	asm(".global run_user_mode_callbacks ");
   946 	asm(".global run_user_mode_callbacks ");
   914 	asm("run_user_mode_callbacks: ");
   947 	asm("run_user_mode_callbacks: ");
   915 
   948 
   916 #ifdef __USER_MEMORY_GUARDS_ENABLED__
   949 	USER_MEMORY_GUARD_ASSERT_ON(r12);
   917 	asm("mrc p15, 0, r12, c3, c0, 0 ");
   950 
   918 	asm("tst r12, #0xc0000000 ");
       
   919 	asm("cdpne p15, 0, c0, c0, c0, 0 ");
       
   920 #endif
       
   921 #ifdef __CHECK_LOCK_STATE__
   951 #ifdef __CHECK_LOCK_STATE__
   922 	asm("ldr	r0,	[r9, #%a0]" : : "i" _FOFF(NThreadBase,iCsCount));
   952 	asm("ldr	r0,	[r9, #%a0]" : : "i" _FOFF(NThreadBase,iCsCount));
   923 	asm("cmp	r0, #0 ");
   953 	asm("cmp	r0, #0 ");
   924 	asm("beq	0f ");
   954 	asm("beq	0f ");
   925 	__ASM_CRASH();
   955 	__ASM_CRASH();
   926 #endif
   956 #endif
       
   957 
   927 	asm("0:		");
   958 	asm("0:		");
   928 	__ASM_STI();
   959 	__ASM_STI();					// enable interrupts
   929 	asm("mov	r10, sp ");			// save stack pointer
   960 	asm("mov	r10, sp ");			// save stack pointer
   930 	asm("mov	r11, lr ");			// save return address
   961 	asm("mov	r11, lr ");			// save return address
   931 	asm("add	r8, r9, #%a0" : : "i" _FOFF(NThreadBase,iUserModeCallbacks));
   962 	asm("add	r8, r9, #%a0" : : "i" _FOFF(NThreadBase,iUserModeCallbacks));
   932 	asm("mov	r0, #1 ");			// shouldn't have been in CS to begin with
   963 	asm("mov	r0, #1 ");			// shouldn't have been in CS to begin with
   933 	asm("bic	sp, sp, #4 ");		// align stack to 8 byte boundary
   964 	asm("bic	sp, sp, #4 ");		// align stack to 8 byte boundary
   967 	asm("mov	sp, r10 ");			// restore stack pointer
   998 	asm("mov	sp, r10 ");			// restore stack pointer
   968 	__JUMP(,	r11);
   999 	__JUMP(,	r11);
   969 
  1000 
   970 	// more callbacks have been queued so loop round and do them
  1001 	// more callbacks have been queued so loop round and do them
   971 	asm("4:		");
  1002 	asm("4:		");
   972 	__ASM_STI();
  1003 	__ASM_STI();					// enable interrupts
   973 	asm("b		1b ");
  1004 	asm("b		1b ");
   974 
  1005 
   975 	// CsFunction outstanding so do it
  1006 	// CsFunction outstanding so do it
   976 	asm("5:		");
  1007 	asm("5:		");
   977 	__ASM_STI();
  1008 	__ASM_STI();					// enable interrupts
   978 	asm("bl		ThreadLeaveCS__5NKern ");
  1009 	asm("bl		ThreadLeaveCS__5NKern ");
   979 	__ASM_CLI();					// turn off interrupts
  1010 	__ASM_CLI();					// turn off interrupts
   980 	__DATA_MEMORY_BARRIER__(r6);
  1011 	__DATA_MEMORY_BARRIER__(r6);
   981 	asm("ldr	r1, [r8] ");
  1012 	asm("ldr	r1, [r8] ");
   982 	asm("mov	sp, r10 ");
  1013 	asm("mov	sp, r10 ");