kernel/eka/nkernsmp/arm/vectors.cia
changeset 0 a41df078684a
child 31 56f325a607ea
equal deleted inserted replaced
-1:000000000000 0:a41df078684a
       
     1 // Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
       
     2 // All rights reserved.
       
     3 // This component and the accompanying materials are made available
       
     4 // under the terms of the License "Eclipse Public License v1.0"
       
     5 // which accompanies this distribution, and is available
       
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
       
     7 //
       
     8 // Initial Contributors:
       
     9 // Nokia Corporation - initial contribution.
       
    10 //
       
    11 // Contributors:
       
    12 //
       
    13 // Description:
       
    14 // e32\nkernsmp\arm\vectors.cia
       
    15 // 
       
    16 //
       
    17 
       
    18 #include <e32cia.h>
       
    19 #include <arm.h>
       
    20 #include <arm_gic.h>
       
    21 #include <arm_scu.h>
       
    22 #include <arm_tmr.h>
       
    23 
       
    24 void FastMutexNestAttempt();
       
    25 void FastMutexSignalError();
       
    26 extern "C" void ExcFault(TAny*);
       
    27 
       
    28 extern "C" void send_accumulated_resched_ipis();
       
    29 
       
    30 extern "C" TInt HandleSpecialOpcode(TArmExcInfo* aContext, TInt aType);
       
    31 
       
    32 extern "C" {
       
    33 extern TUint32 CrashStateOut;
       
    34 extern SFullArmRegSet DefaultRegSet;
       
    35 }
       
    36 
       
    37 #ifdef BTRACE_CPU_USAGE
       
    38 extern "C" void btrace_irq_exit();
       
    39 extern "C" void btrace_fiq_exit();
       
    40 #endif
       
    41 
       
    42 #ifdef _DEBUG
       
    43 #define __CHECK_LOCK_STATE__
       
    44 #endif
       
    45 
       
    46 //#define __FAULT_ON_FIQ__
       
    47 
       
    48 #ifdef __CHECK_LOCK_STATE__
       
    49 /******************************************************************************
       
    50  * Check that the kernel is unlocked, no fast mutex is held and the thread
       
    51  * is not in a critical section when returning to user mode.
       
    52  ******************************************************************************/
       
    53 extern "C" __NAKED__ void check_lock_state()
       
    54 	{
       
    55 	GET_RWNO_TID(,r12);
       
    56 	asm("ldr	r12, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
       
    57 	asm("cmp	r12, #0 ");
       
    58 	asm("beq	1f ");
       
    59 	__ASM_CRASH();
       
    60 	asm("1:		");
       
    61 	GET_RWNO_TID(,r12);
       
    62 	asm("ldr	r12, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
       
    63 	asm("ldr	r12, [r12, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex));
       
    64 	asm("cmp	r12, #0 ");
       
    65 	asm("beq	2f ");
       
    66 	__ASM_CRASH();
       
    67 	asm("2:		");
       
    68 	GET_RWNO_TID(,r12);
       
    69 	asm("ldr	r12, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
       
    70 	asm("ldr	r12, [r12, #%a0]" : : "i" _FOFF(NThread,iCsCount));
       
    71 	asm("cmp	r12, #0 ");
       
    72 	asm("beq	3f ");
       
    73 	__ASM_CRASH();
       
    74 	asm("3:		");
       
    75 	GET_RWNO_TID(,r12);
       
    76 	asm("ldr	r12, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
       
    77 	asm("ldrh	r12, [r12, #%a0]" : : "i" _FOFF(NSchedulable,iFreezeCpu));
       
    78 	asm("cmp	r12, #0 ");
       
    79 	asm("beq	4f ");
       
    80 	__ASM_CRASH();
       
    81 	asm("4:		");
       
    82 	__JUMP(,lr);
       
    83 	}
       
    84 #endif
       
    85 
       
    86 //#define	__RECORD_STATE__
       
    87 #ifdef __RECORD_STATE__
       
    88 #define RECORD_STATE				\
       
    89 	asm("ldr r3, [sp, #68] ");		\
       
    90 	asm("mov r1, sp ");				\
       
    91 	asm("bic r12, sp, #0xff ");		\
       
    92 	asm("bic r12, r12, #0xf00 ");	\
       
    93 	asm("add r12, r12, #24 ");		\
       
    94 	asm("tst r3, #0x0f ");			\
       
    95 	asm("addne pc, pc, #12 ");		\
       
    96 	asm("ldmia r1!, {r2-r11} ");	\
       
    97 	asm("stmia r12!, {r2-r11} ");	\
       
    98 	asm("ldmia r1!, {r2-r9} ");		\
       
    99 	asm("stmia r12!, {r2-r9} ")
       
   100 
       
   101 #define RECORD_STATE_EXC			\
       
   102 	asm("ldr r3, [sp, #92] ");		\
       
   103 	asm("mov r12, sp ");			\
       
   104 	asm("bic lr, sp, #0xff ");		\
       
   105 	asm("bic lr, lr, #0xf00 ");		\
       
   106 	asm("tst r3, #0x0f ");			\
       
   107 	asm("addne pc, pc, #12 ");		\
       
   108 	asm("ldmia r12!, {r0-r11} ");	\
       
   109 	asm("stmia lr!, {r0-r11} ");	\
       
   110 	asm("ldmia r12!, {r0-r11} ");	\
       
   111 	asm("stmia lr!, {r0-r11} ");
       
   112 #else
       
   113 #define RECORD_STATE
       
   114 #define RECORD_STATE_EXC
       
   115 #endif
       
   116 
       
   117 /******************************************************************************
       
   118  * SWI Handler
       
   119  ******************************************************************************/
       
   120 
       
   121 extern "C" __NAKED__ void __ArmVectorSwi()
       
   122 	{
       
   123 	// IRQs disabled, FIQs enabled here
       
   124 	__ASM_CLI();							// all interrupts off
       
   125 	SRSDBW(MODE_SVC);						// save return address and return CPSR to supervisor stack
       
   126 	asm("sub	sp, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR15));
       
   127 	asm("stmia	sp, {r0-r14}^ ");			// save R0-R12, R13_usr, R14_usr
       
   128 	asm("mov	r4, #%a0" : : "i" ((TInt)SThreadExcStack::ESvc));
       
   129 	USER_MEMORY_GUARD_ON_IF_MODE_USR(r11);
       
   130 	asm("ldr	r12, [lr, #-4] ");			// get SWI opcode
       
   131 	GET_RWNO_TID(,r11);
       
   132 	asm("str	r4, [sp, #%a0]" : : "i" _FOFF(SThreadExcStack,iExcCode));	// word describing exception type
       
   133 	asm("movs	r12, r12, lsl #9 ");		// 512*SWI number into r12
       
   134 	asm("adr	lr, fast_swi_exit ");
       
   135 	asm("ldr	r9, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
       
   136 	asm("bcc	slow_swi ");				// bit 23=0 for slow/unprot
       
   137 	asm("mov	r1, r9 ");
       
   138 	asm("beq	wait_for_any_request ");	// special case for Exec::WaitForAnyRequest
       
   139 	asm("ldr	r2, [r1, #%a0]" : : "i" _FOFF(NThread,iFastExecTable));
       
   140 	asm("ldr	r3, [r2], r12, lsr #7 ");	// r3=limit, r2->dispatch table entry
       
   141 	asm("ldr	r2, [r2] ");				// r2->kernel function
       
   142 	asm("cmp	r3, r12, lsr #9 ");			// r3-SWI number
       
   143 	__JUMP(hi,	r2);						// if SWI number valid, call kernel function
       
   144 	asm("mvn	r12, #0 ");					// put invalid SWI number into r12
       
   145 	asm("b		slow_swi ");					// go through slow SWI routine to call invalid SWI handler
       
   146 
       
   147 #ifndef __FAST_SEM_MACHINE_CODED__
       
   148 	asm("wait_for_any_request: ");
       
   149 	__ASM_STI();							// all interrupts on
       
   150 	asm("b		WaitForAnyRequest__5NKern ");
       
   151 #else
       
   152 	asm(".global exec_wfar_wait ");
       
   153 	asm("exec_wfar_wait: ");
       
   154 	asm("mov	r2, #1 ");
       
   155 	asm("str	r2, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));			// else lock the kernel
       
   156 	__ASM_STI();
       
   157 	asm("strb	r2, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag));	// and set the reschedule flag
       
   158 	asm("bl "	CSM_ZN10TScheduler10RescheduleEv );	// reschedule
       
   159 	asm(".global exec_wfar_resched_return ");
       
   160 	asm("exec_wfar_resched_return: ");
       
   161 	asm("ldr	r4, [r3, #%a0]" : : "i" _FOFF(NThreadBase,iUserModeCallbacks));
       
   162 	asm("mov	r9, r3 ");
       
   163 
       
   164 	// need to send any outstanding reschedule IPIs
       
   165 	asm("cmp	r12, #0 ");
       
   166 	asm("blne " CSM_CFUNC(send_accumulated_resched_ipis));
       
   167 
       
   168 	asm(".global exec_wfar_finish ");
       
   169 	asm("exec_wfar_finish: ");
       
   170 	asm("mrs	r1, spsr ");
       
   171 	asm("tst	r1, #0x0f ");
       
   172 	asm("bne	fast_swi_exit2 ");		// not returning to user mode
       
   173 #ifdef __CHECK_LOCK_STATE__
       
   174 	asm("bl "	CSM_CFUNC(check_lock_state));
       
   175 #endif
       
   176 	asm("cmp	r4, #3 ");				// callbacks?
       
   177 	asm("blhs	run_user_mode_callbacks ");
       
   178 	USER_MEMORY_GUARD_OFF(,r12,r12);
       
   179 	asm("b		fast_swi_exit2 ");
       
   180 #endif
       
   181 
       
   182 	asm("fast_swi_exit: ");
       
   183 #if defined(__CHECK_LOCK_STATE__) || defined(__USER_MEMORY_GUARDS_ENABLED__)
       
   184 	asm("mrs	r12, spsr ");
       
   185 	asm("tst	r12, #0x0f ");
       
   186 	asm("bne	1f ");
       
   187 #ifdef __CHECK_LOCK_STATE__
       
   188 	asm("bl "	CSM_CFUNC(check_lock_state));
       
   189 #endif
       
   190 	USER_MEMORY_GUARD_OFF(,r12,r12);
       
   191 	asm("1: ");
       
   192 #endif
       
   193 	asm("fast_swi_exit2: ");
       
   194 	RECORD_STATE;
       
   195 	asm("ldmib	sp, {r1-r14}^ ");			// restore R1-R12, R13_usr, R14_usr
       
   196 	asm("nop ");							// don't access banked register immediately after
       
   197 	asm("add	sp, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR15));
       
   198 	RFEIAW(13);								// restore PC and CPSR - return from Exec function
       
   199 
       
   200 	
       
   201 	asm("slow_swi: ");						// IRQs and FIQs off here
       
   202 	__ASM_STI();							// all interrupts on
       
   203 	asm("ldr	r4, [r9, #%a0]" : : "i" _FOFF(NThread,iSlowExecTable));
       
   204 	asm("mrs	r11, spsr ");				// spsr_svc into r11
       
   205 	asm("adr	lr, slow_swi_exit ");
       
   206 	asm("ldr	r5, [r4, #-12] ");			// r5=limit
       
   207 	asm("add	r6, r4, r12, lsr #6 ");		// r6->dispatch table entry
       
   208 	asm("cmp	r5, r12, lsr #9 ");			// r5-SWI number
       
   209 	asm("ldmhiia r6, {r5,r6} ");			// if SWI number OK, flags into r5, function addr into r6
       
   210 	asm("ldrls	pc, [r4, #-8] ");			// if SWI number invalid, call invalid handler
       
   211 
       
   212 	// Acquire system lock if necessary: warning - any scratch registers modified after __ArmVectorSwi()
       
   213 	// function preamble will be restored after call to NKern::LockSystem() with stale values.
       
   214 	asm("tst	r5, #%a0" : : "i" ((TInt)KExecFlagClaim));	// claim system lock?
       
   215 	asm("beq	slow_swi_no_wait ");						// skip if not
       
   216 	asm("bl "	CSM_ZN5NKern10LockSystemEv );
       
   217 	asm("ldmia	sp, {r0-r3} ");
       
   218 	asm("slow_swi_no_wait: ");
       
   219 
       
   220 	// Check to see if extra arguments are needed.  Needs to be placed here because NKern::LockSystem()
       
   221 	// will not preserve value of r2 and ldmia call will replace it with a stale copy.
       
   222 	asm("tst	r5, #%a0" : : "i" ((TInt)KExecFlagExtraArgMask));	// extra arguments needed?
       
   223 	asm("addne	r2, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR4));	// if so, point r2 at saved registers on stack
       
   224 
       
   225 	asm("tst	r5, #%a0" : : "i" ((TInt)KExecFlagPreprocess));	// preprocess (handle lookup)? can use r4, r7, r8, r12, r0
       
   226 	asm("mov	lr, pc ");
       
   227 	asm("ldrne	pc, [r4, #-4] ");			// call preprocess handler if required
       
   228 	asm("orr	r5, r9, r5, lsr #30 ");		// r5 = current NThread pointer with bits 0,1 = (flags & (KExecFlagRelease|KExecFlagClaim))>>30
       
   229 	asm("mov	lr, pc ");
       
   230 	__JUMP(,	r6);						// call exec function, preserve r5,r11
       
   231 	asm("str	r0, [sp, #%a0]" : : "i" _FOFF(SThreadExcStack,iR0));	// save return value
       
   232 	asm("bic	r9, r5, #3 ");				// r9 = current NThread pointer
       
   233 	asm("tst	r5, #%a0" : : "i" ((TInt)KExecFlagRelease>>30));		// release system lock?
       
   234 	asm("blne "	CSM_ZN5NKern12UnlockSystemEv );
       
   235 
       
   236 	asm("slow_swi_exit: ");
       
   237 	__ASM_CLI();
       
   238 	asm("ldr	r4, [r9, #%a0]" : : "i" _FOFF(NThreadBase,iUserModeCallbacks));
       
   239 	asm("tst	r11, #0x0f ");				// returning to user mode?
       
   240 	asm("bne	slow_swi_exit2 ");			// no
       
   241 #ifdef __CHECK_LOCK_STATE__
       
   242 	asm("bl "	CSM_CFUNC(check_lock_state));
       
   243 #endif
       
   244 	asm("cmp	r4, #3 ");					// callbacks?
       
   245 	asm("blhs	run_user_mode_callbacks ");	// yes
       
   246 	USER_MEMORY_GUARD_OFF(,r12,r12);
       
   247 	asm("slow_swi_exit2: ");
       
   248 	RECORD_STATE;
       
   249 	asm("ldmia	sp, {r0-r14}^ ");			// R0=return value, restore R1-R12, R13_usr, R14_usr
       
   250 	asm("nop ");							// don't access banked register immediately after
       
   251 	asm("add	sp, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR15));
       
   252 	RFEIAW(13);								// restore PC and CPSR - return from Exec function
       
   253 	}
       
   254 
       
   255 
       
   256 /******************************************************************************
       
   257  * IRQ Postamble
       
   258  * This routine is called after the IRQ has been dispatched
       
   259  * Enter in mode_sys
       
   260  * R4->TSubScheduler, R6->GIC CPU interface
       
   261  * If user memory guards active and not nested, R8 = saved DACR
       
   262  * For nested IRQ, R0-R12, R14_sys, return address, return CPSR are on top
       
   263  *	of the mode_sys (i.e. current) stack
       
   264  * For non-nested IRQ, registers are saved on top of mode_svc stack and
       
   265  *	pointed to by R5 in the order:
       
   266  *	R5->R0 ... R12 R13_usr R14_usr <spare> PC CPSR
       
   267  ******************************************************************************/
       
   268 
       
   269 extern "C" __NAKED__ void __ArmVectorIrq()
       
   270 	{
       
   271 	// Interrupts may be enabled here
       
   272 #ifdef BTRACE_CPU_USAGE
       
   273 	asm("ldr	r10, __BTraceCpuUsageFilter ");
       
   274 #endif
       
   275 	asm("ldr	r7, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqNestCount));
       
   276 	asm("ldrb	r0, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, iEventHandlersPending));
       
   277 	__DATA_MEMORY_BARRIER_Z__(r2);
       
   278 #ifdef BTRACE_CPU_USAGE
       
   279 	asm("ldrb	r10, [r10] ");
       
   280 #endif
       
   281 #ifdef __USER_MEMORY_GUARDS_ENABLED__
       
   282 	asm("mov	r11, r8 ");
       
   283 #endif
       
   284 	asm("subs	r7, r7, #1 ");
       
   285 	asm("bpl	nested_irq_exit ");
       
   286 	asm("cmp	r0, #0 ");
       
   287 	asm("beq	no_event_handlers ");
       
   288 	asm("mov	r0, r4 ");
       
   289 	asm("bl		run_event_handlers ");
       
   290 
       
   291 	asm("no_event_handlers: ");
       
   292 	asm("ldr	r8, [r5, #%a0]" : : "i" _FOFF(SThreadExcStack,iCPSR));	// r8 = interrupted cpsr
       
   293 	asm("ldr	r0, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, iKernLockCount));
       
   294 	__ASM_CLI();							// all interrupts off
       
   295 	asm("and	r2, r8, #0x1f ");
       
   296 	asm("ldr	r1, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag));
       
   297 	asm("cmp	r2, #0x10 ");				// interrupted mode_usr ?
       
   298 	asm("cmpne	r2, #0x13 ");				// if not, interrupted mode_svc ?
       
   299 	asm("cmpeq	r0, #0 ");					// if mode_usr or mode_svc, is kernel locked?
       
   300 	asm("str	r7, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqNestCount));
       
   301 	asm("bne	irq_kernel_locked_exit ");	// if neither or if kernel locked, exit immediately
       
   302 	asm("cmp	r1, #0 ");					// If not, IDFCs/reschedule pending?
       
   303 	asm("beq	irq_kernel_locked_exit ");	// if not, exit
       
   304 	asm("mov	r1, #1 ");
       
   305 	asm("str	r1, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, iKernLockCount));	// lock the kernel
       
   306 	__ASM_STI_MODE(MODE_SVC);				// mode_svc, interrupts on
       
   307 
       
   308 	// Saved registers are on top of mode_svc stack
       
   309 	// reschedule - this also switches context if necessary
       
   310 	// enter this function in mode_svc, interrupts on, kernel locked
       
   311 	// exit this function in mode_svc, all interrupts off, kernel unlocked
       
   312 	asm("irq_do_resched: ");
       
   313 	asm("stmfd	sp!, {r11,lr} ");			// save user memory guard state, lr_svc
       
   314 	asm("bl "	CSM_ZN10TScheduler10RescheduleEv);	// return with R3->current thread
       
   315 	asm(".global irq_resched_return ");
       
   316 	asm("irq_resched_return: ");
       
   317 
       
   318 	asm("ldr	r8, [sp, #%a0]" : : "i" (_FOFF(SThreadExcStack,iCPSR)+8));		// have UMG, lr_svc on stack as well
       
   319 	asm("ldr	r4, [r3, #%a0]" : : "i" _FOFF(NThreadBase,iUserModeCallbacks));
       
   320 	asm("mov	r9, r3 ");
       
   321 
       
   322 	// need to send any outstanding reschedule IPIs
       
   323 	asm("cmp	r12, #0 ");
       
   324 	asm("blne " CSM_CFUNC(send_accumulated_resched_ipis));
       
   325 	asm("tst	r8, #0x0f ");				// returning to user mode?
       
   326 	asm("bne	irq_post_resched_exit ");	// no - just return
       
   327 #ifdef __CHECK_LOCK_STATE__
       
   328 	asm("bl "	CSM_CFUNC(check_lock_state));
       
   329 #endif
       
   330 	asm("cmp	r4, #3 ");					// callbacks?
       
   331 	asm("blhs	run_user_mode_callbacks ");	// yes - run them
       
   332 
       
   333 	asm("irq_post_resched_exit: ");
       
   334 	asm("ldmfd	sp!, {r0,lr} ");			// restore UMG, lr_svc
       
   335 	USER_MEMORY_GUARD_RESTORE(r0,r12);
       
   336 	RECORD_STATE;
       
   337 	asm("ldmia	sp, {r0-r14}^ ");			// restore R0-R12, R13_usr, R14_usr
       
   338 	asm("nop ");							// don't access banked register immediately after
       
   339 	asm("add	sp, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR15));
       
   340 	RFEIAW(13);								// restore PC and CPSR - return from interrupt
       
   341 
       
   342 	asm("irq_kernel_locked_exit: ");
       
   343 #ifdef __CHECK_LOCK_STATE__
       
   344 	asm("tst	r8, #0x0f ");
       
   345 	asm("bleq " CSM_CFUNC(check_lock_state));
       
   346 #endif
       
   347 	USER_MEMORY_GUARD_RESTORE(r11,r12);
       
   348 #ifdef BTRACE_CPU_USAGE
       
   349 	asm("cmp	r10, #0 ");
       
   350 	asm("blne	btrace_irq_exit ");
       
   351 #endif
       
   352 	__ASM_CLI_MODE(MODE_SVC);				// mode_svc, interrupts off
       
   353 	RECORD_STATE;
       
   354 	asm("ldmia	sp, {r0-r14}^ ");			// restore R0-R12, R13_usr, R14_usr
       
   355 	asm("nop ");							// don't access banked register immediately after
       
   356 	asm("add	sp, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR15));
       
   357 	RFEIAW(13);								// restore PC and CPSR - return from interrupt
       
   358 
       
   359 	asm("nested_irq_exit: ");
       
   360 	__ASM_CLI1();
       
   361 	asm("str	r7, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqNestCount));
       
   362 #ifdef BTRACE_CPU_USAGE
       
   363 	asm("cmp	r10, #0 ");
       
   364 	asm("blne	btrace_irq_exit ");
       
   365 #endif
       
   366 	asm("ldmia	sp!, {r0-r12,r14} ");		// restore r0-r12, r14_sys
       
   367 	RFEIAW(13);								// restore PC and CPSR
       
   368 
       
   369 	asm("__BTraceCpuUsageFilter: ");
       
   370 	asm(".word	%a0" : : "i" ((TInt)&BTraceData.iFilter[BTrace::ECpuUsage]));
       
   371 	}
       
   372 
       
   373 
       
   374 /******************************************************************************
       
   375  * FIQ Postamble
       
   376  * This routine is called after the FIQ has been dispatched
       
   377  * spsr_fiq, r0-r3 are unmodified
       
   378  * Return address is on the top of the FIQ stack
       
   379  ******************************************************************************/
       
   380 
       
   381 extern "C" __NAKED__ void __ArmVectorFiq()
       
   382 	{
       
   383 #ifdef __FAULT_ON_FIQ__
       
   384 	asm(".word 0xe7f10f10 ");
       
   385 #endif
       
   386 	// IRQs and FIQs disabled here
       
   387 	// r0-r7 are unaltered from when FIQ occurred
       
   388 	GET_RWNO_TID(,r9);
       
   389 	asm("mrs	r8, spsr ");				// check interrupted mode
       
   390 	asm("and	r10, r8, #0x1f ");
       
   391 	asm("cmp	r10, #0x10 ");				// check for mode_usr
       
   392 	asm("ldr	r11, [r9, #%a0]" : : "i" _FOFF(TSubScheduler, iKernLockCount));
       
   393 	asm("cmpne	r10, #0x13 ");				// or mode_svc
       
   394 	asm("ldreq	r10, [r9, #%a0]" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag));
       
   395 	asm("cmpeq	r11, #0 ");					// and check if kernel locked
       
   396 	asm("bne	FiqExit0 ");				// if wrong mode or kernel locked, return immediately
       
   397 	asm("cmp	r10, #0 ");					// check if reschedule needed
       
   398 	asm("beq	FiqExit0 ");				// if not, return from interrupt
       
   399 
       
   400 	// we interrupted mode_usr or mode_svc, kernel unlocked, reschedule needed
       
   401 #ifdef __USER_MEMORY_GUARDS_ENABLED__
       
   402 	asm("ldr	r8, [sp], #4 ");			// r8_fiq = UMG state
       
   403 #endif
       
   404 	asm("ldr	r14, [sp], #4 ");			// r14_fiq = return address
       
   405 	asm("add	r11, r11, #1 ");
       
   406 	asm("str	r11, [r9, #%a0]" : : "i" _FOFF(TSubScheduler, iKernLockCount));	// lock the kernel
       
   407 	SRSDBW(MODE_SVC);						// save return address and return CPSR to supervisor stack
       
   408 	CPSCHM(MODE_SVC);						// switch to mode_svc, all interrupts off
       
   409 	asm("sub	sp, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR15));
       
   410 	asm("stmia	sp, {r0-r14}^ ");			// save R0-R12, R13_usr, R14_usr
       
   411 	asm("mov	r0, #%a0" : : "i" ((TInt)SThreadExcStack::EFiq));
       
   412 #ifdef __USER_MEMORY_GUARDS_ENABLED__
       
   413 	CPSCHM(MODE_FIQ);						// back to mode_fiq, all interrupts off
       
   414 	asm("mov	r1, r8 ");					// retrieve UMG state
       
   415 	CPSCHM(MODE_SVC);						// switch to mode_svc, all interrupts off
       
   416 	asm("mov	r11, r1 ");					// UMG state into R11
       
   417 #endif
       
   418 	asm("str	r0, [sp, #%a0]" : : "i" _FOFF(SThreadExcStack,iExcCode));	// word describing exception type
       
   419 	__ASM_STI();							// interrupts on
       
   420 	asm("b		irq_do_resched ");			// do reschedule and return from interrupt
       
   421 
       
   422 	asm("FiqExit0: ");
       
   423 #ifdef BTRACE_CPU_USAGE
       
   424 	asm("ldr	r8, __BTraceCpuUsageFilter ");
       
   425 	asm("ldrb	r8, [r8] ");
       
   426 	asm("cmp	r8, #0 ");
       
   427 	asm("beq	1f ");
       
   428 	asm("stmfd	sp!, {r0-r3} ");
       
   429 	asm("bl		btrace_fiq_exit ");
       
   430 	asm("ldmfd	sp!, {r0-r3} ");
       
   431 	asm("1: ");
       
   432 #endif
       
   433 #ifdef __USER_MEMORY_GUARDS_ENABLED__
       
   434 	asm("ldr	r11, [sp], #4 ");
       
   435 	USER_MEMORY_GUARD_RESTORE(r11,r12);
       
   436 #endif
       
   437 	asm("ldmfd	sp!, {pc}^ ");				// return from interrupt
       
   438 
       
   439 	asm("__TheScheduler: ");
       
   440 	asm(".word TheScheduler ");
       
   441 	}
       
   442 
       
   443 
       
   444 /******************************************************************************
       
   445  * Abort handler
       
   446  * This routine is called in response to a data abort, prefetch abort or
       
   447  * undefined instruction exception.
       
   448  ******************************************************************************/
       
   449 
       
   450 extern "C" __NAKED__ void __ArmVectorAbortData()
       
   451 	{
       
   452 	__ASM_CLI();							// disable all interrupts
       
   453 	asm("sub	lr, lr, #8 ");				// lr now points to aborted instruction
       
   454 	SRSDBW(		MODE_ABT);					// save it along with aborted CPSR
       
   455 	asm("sub	sp, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR15));
       
   456 	asm("stmia	sp, {r0-r14}^ ");			// save R0-R12, R13_usr, R14_usr
       
   457 	GET_RWNO_TID(,r11);
       
   458 	asm("mov	r1, #%a0 " : : "i" ((TInt)EArmExceptionDataAbort));
       
   459 	asm("str	r1, [sp, #%a0]" : : "i" _FOFF(SThreadExcStack,iExcCode));	// word describing exception type
       
   460 
       
   461 	asm("handle_exception: ");
       
   462 	// We are in exception mode (abt/und) with registers stacked as follows:
       
   463 	// R13_abt/R13_und -> R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13_usr R14_usr ExcCode PC CPSR
       
   464 #if defined(__CPU_ARM_HAS_WORKING_CLREX)
       
   465 	CLREX									// reset exclusive monitor 	
       
   466 #elif defined(__CPU_ARM_HAS_LDREX_STREX)
       
   467 	STREX(12,0,13);							// dummy STREX to reset exclusivity monitor
       
   468 #endif
       
   469 
       
   470 #if 0	// minimum-dependency exception handling
       
   471 	asm("ldr	r0, [sp, #%a0]" : : "i" _FOFF(SThreadExcStack,iCPSR));
       
   472 	asm("mrs	r4, cpsr ");
       
   473 	asm("orr	r1, r0, #0xc0 ");
       
   474 	asm("msr	cpsr, r1 ");				// back to original mode
       
   475 	asm("mov	r2, sp ");
       
   476 	asm("mov	r3, lr ");
       
   477 	asm("msr	cpsr, r4 ");				// back to mode_abt or mode_und
       
   478 	asm("stmfd	sp!, {r2,r3} ");			// now have R13 R14 R0-R12 R13_usr R14_usr ExcCode PC CPSR
       
   479 	asm("mrc	p15, 0, r1, c5, c0, 0 ");	// DFSR
       
   480 	asm("mrc	p15, 0, r2, c5, c0, 1 ");	// IFSR
       
   481 	asm("mrc	p15, 0, r0, c6, c0, 0 ");	// DFAR
       
   482 	asm("stmfd	sp!, {r0-r2} ");			// now have DFAR DFSR IFSR R13 R14 R0-R12 R13_usr R14_usr ExcCode PC CPSR
       
   483 	asm("mov	r0, sp ");
       
   484 	asm(".extern hw_init_exc ");
       
   485 	asm("bl		hw_init_exc ");
       
   486 	asm("add	sp, sp, #20 ");
       
   487 	asm("ldmia	sp, {r0-r14}^ ");			// restore R0-R12, R13_usr, R14_usr
       
   488 	asm("nop ");							// don't access banked register immediately after
       
   489 	asm("add	sp, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR15));
       
   490 	RFEIAW(13);								// restore PC and CPSR - return from interrupt
       
   491 #endif
       
   492 	asm("ldr	r0, [sp, #%a0]" : : "i" _FOFF(SThreadExcStack,iCPSR));
       
   493 	asm("mrs	r12, cpsr ");
       
   494 	asm("and	r3, r0, #0x1f ");			// r3=processor mode when abort occurred
       
   495 	asm("bic	r12, r12, #0xc0 ");
       
   496 	asm("cmp	r3, #0x10 ");				// aborted in user mode?
       
   497 	asm("cmpne	r3, #0x13 ");				// if not, aborted in mode_svc?
       
   498 	asm("bne	fatal_exception_mode ");	// if neither, fault
       
   499 	asm("cmp	r11, #0 ");
       
   500 	asm("beq	fatal_exception_mode ");	// if subscheduler not yet set up, fault
       
   501 	asm("ldr	r5, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
       
   502 	__ASM_STI();							// reenable interrupts - rescheduling disabled by mode_abt/mode_und
       
   503 	asm("mov	r10, sp ");					// r10 points to saved registers
       
   504 	asm("cmp	r5, #0 ");					// exception with kernel locked?
       
   505 	asm("bne	fatal_exception_mode ");	// if so, fault
       
   506 	asm("add	r5, r5, #1 ");				// lock the kernel
       
   507 	asm("str	r5, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
       
   508 	CPSCHM(MODE_SVC);						// mode_svc, interrupts on, kernel locked
       
   509 
       
   510 	asm("ldr	r5, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
       
   511 	asm("add	r5, r5, #%a0" : : "i" _FOFF(NThread,iStackBase));
       
   512 	asm("ldmia	r5, {r2,r5} ");				// r2=supervisor stack area base, r5=size
       
   513 	asm("subs	r2, sp, r2 ");				// r2=amount of mode_svc stack remaining
       
   514 	asm("blo	fatal_exception_stack ");	// if stack pointer invalid, fault
       
   515 	asm("cmp	r2, r5 ");
       
   516 	asm("bhi	fatal_exception_stack ");
       
   517 	asm("cmp	r2, #128 ");				// check enough stack to handle exception
       
   518 	asm("blo	fatal_exception_stack ");	// if not, fault
       
   519 
       
   520 	// At this point we are in mode_svc with interrupts enabled and the kernel locked.
       
   521 	// We know the supervisor stack is valid and has enough free space to store the exception info.
       
   522 	// Registers: R0=aborted cpsr, R10 points to saved registers, R11->TSubScheduler
       
   523 	// on mode_abt or mode_und stack, R12 holds mode of exception (mode_abt or mode_und).
       
   524 
       
   525 	asm("add	r1, r10, #%a0" : : "i" _FOFF(SThreadExcStack,iR8));
       
   526 	asm("ldmia	r1, {r0-r9} ");				// get saved R8,R9,R10,R11,R12,R13_usr,R14_usr,exccode,PC,CPSR
       
   527 	__ASM_CLI();
       
   528 	asm("mov	r12, sp ");					// save original R13_svc
       
   529 	asm("bic	sp, sp, #4 ");				// align R13_svc to 8 byte boundary
       
   530 	asm("stmfd	sp!, {r0-r9} ");			// save on supervisor stack
       
   531 	asm("ldmia	r10, {r0-r6,r10} ");		// get saved R0-R7
       
   532 	asm("stmfd	sp!, {r0-r6,r10} ");		// save on supervisor stack
       
   533 											// leave R7=exccode, R8=aborted instruction address, R9=aborted CPSR
       
   534 	asm("cmp	r7, #%a0 " : : "i" ((TInt)EArmExceptionUndefinedOpcode));
       
   535 	asm("moveq	r0, #0x1b ");				// mode_und
       
   536 	asm("movne	r0, #0x17 ");				// mode_abt
       
   537 	asm("msr	cpsr, r0 ");				// mode_abt or mode_und, interrupts on
       
   538 	asm("add	sp, sp, #%a0 " : : "i" ((TInt)sizeof(SThreadExcStack)));	// restore exception stack balance
       
   539 	CPSCHM(MODE_SVC);						// back into mode_svc, interrupts on
       
   540 
       
   541 	asm("ldr	r4, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
       
   542 	asm("cmp	r7, #%a0 " : : "i" ((TInt)EArmExceptionPrefetchAbort));
       
   543 	asm("mrceq	p15, 0, r1, c5, c0, 1 ");	// r1=instruction fault status
       
   544 	asm("mrcne	p15, 0, r1, c5, c0, 0 ");	// r1=data fault status
       
   545 #ifdef __CPU_ARM_HAS_CP15_IFAR
       
   546 	asm("mrceq	p15, 0, r0, c6, c0, 2 ");	// r0 = IFAR fault address
       
   547 #else
       
   548 	asm("moveq	r0, r8 ");					// else fault address for prefetch abort = instruction address
       
   549 #endif // __CPU_ARM_HAS_CP15_IFAR
       
   550 	asm("mrcne	p15, 0, r0, c6, c0, 0 ");	// r0= DFAR fault address
       
   551 	asm("mrs	r2, spsr ");				// r2 = spsr_svc
       
   552 	asm("mov	r3, #0 ");					// spare word
       
   553 											// r12 = original R13_svc
       
   554 	asm("ldr	r5, [r4, #%a0]" : : "i" _FOFF(NThread,iHandlers));	// r5 -> SNThreadHandlers
       
   555 	asm("stmfd	sp!, {r0-r3,r12,r14} ");	// save FAR, FSR, SPSR_SVC, 0, R13_svc, R14_svc
       
   556 
       
   557 	USER_MEMORY_GUARD_ON(,r6,r0);
       
   558 
       
   559 	// Now we can unlock the kernel and process the exception
       
   560 	asm("bl "	CSM_ZN5NKern6UnlockEv );
       
   561 
       
   562 	// R4 points to the current thread
       
   563 	// Get the handler address
       
   564 	asm("ldr	r5, [r5, #%a0]" : : "i" _FOFF(SNThreadHandlers,iExceptionHandler));	// r5 -> exception handler
       
   565 
       
   566 	// Kernel is now unlocked so we can retrieve the opcode for an undefined instruction trap
       
   567 	// We might take a page fault doing this but that's OK since the original instruction
       
   568 	// fetch might have taken a page fault and we no longer have any more locks than were
       
   569 	// held at that time.
       
   570 	asm("cmp	r7, #%a0 " : : "i" ((TInt)EArmExceptionUndefinedOpcode));
       
   571 	asm("beq	exc_undef ");
       
   572 
       
   573 	// call the exception dispatcher
       
   574 	asm("exc_dispatch: ");
       
   575 	asm("mov	r1, r4 ");					// pass address of current thread
       
   576 	asm("mov	r0, sp ");					// pass address of TArmExcInfo
       
   577 	asm("adr	lr, exc_return ");
       
   578 	__JUMP(,	r5);						// call exception handler
       
   579 
       
   580 	// Undefined instruction - get the opcode
       
   581 	// R4->current thread, R8=address of aborted instruction, R9=CPSR at time of abort, SP->TArmExcInfo
       
   582 	asm("exc_undef: ");
       
   583 	asm("tst	r9, #0x20 ");				// THUMB?
       
   584 	asm("bne	exc_undef_thumb ");			// branch if so
       
   585 	asm("tst	r9, #0x00800000 ");			// J=1 ?
       
   586 	asm("bne	exc_dispatch ");			// T=0, J=1 -> dispatch normally
       
   587 	asm("tst	r9, #0x0f ");				// ARM - mode_usr ?
       
   588 	asm("ldrne	r0, [r8] ");				// If not, get opcode
       
   589 	USER_MEMORY_GUARD_OFF(eq,r0,r0);
       
   590 	asm("ldreqt r0, [r8] ");				// else get opcode with user permissions
       
   591 	USER_MEMORY_GUARD_ON(eq,r1,r1);
       
   592 	asm("str	r0, [sp, #%a0]" : : "i" _FOFF(TArmExcInfo,iFaultStatus));	// save opcode
       
   593 
       
   594 	// ARM opcode in R0 - check for coprocessor or special UNDEF opcode
       
   595 	// Special undef *7F***F*
       
   596 	asm("orr	r1, r0, #0xF000000F ");		// *7F***F* -> F7F***FF
       
   597 	asm("orr	r1, r1, #0x000FF000 ");		// *7F***F* -> F7FFF*FF
       
   598 	asm("orr	r1, r1, #0x00000F00 ");		// *7F***F* -> F7FFFFFF
       
   599 	asm("cmn	r1, #0x08000001 ");			// check
       
   600 	asm("moveq	r1, #32 ");
       
   601 	asm("beq	special_undef_arm ");		// branch if special undef
       
   602 
       
   603 	// Coprocessor *X***N** X=C,D,E		N=coprocessor number
       
   604 	// Advanced SIMD F2****** F3****** F4X***** (X even)
       
   605 	asm("and	r1, r0, #0x0F000000 ");		// *C****** -> 0C000000
       
   606 	asm("add	r1, r1, #0xF4000000 ");		// *C****** -> 00000000
       
   607 	asm("cmp	r1, #0x03000000 ");
       
   608 	asm("movlo	r1, r0, lsr #8 ");
       
   609 	asm("andlo	r1, r1, #0x0f ");			// r1 = coprocessor number
       
   610 	asm("blo	undef_coproc_arm ");
       
   611 	asm("add	r1, r0, #0x0E000000 ");		// F2****** -> 00******
       
   612 	asm("cmp	r1, #0x02000000 ");
       
   613 	asm("blo	undef_coproc_arm ");
       
   614 	asm("cmp	r1, #0x03000000 ");
       
   615 	asm("bhs	exc_dispatch ");			// if not coproc/AdvSIMD, dispatch normally
       
   616 	asm("tst	r0, #0x00100000 ");
       
   617 	asm("bne	exc_dispatch ");			// if not coproc/AdvSIMD, dispatch normally
       
   618 	asm("mov	r1, #16 ");					// CP=16 for non-coprocessor AdvSIMD
       
   619 	asm("b		undef_coproc_arm ");
       
   620 
       
   621 	asm("exc_undef_thumb: ");
       
   622 	asm("tst	r9, #0x0f ");				// THUMB - mode_usr ?
       
   623 	USER_MEMORY_GUARD_OFF(eq,r0,r0);
       
   624 	asm("ldreqbt r0, [r8], #1 ");			// yes - get low 8 bits
       
   625 	asm("ldreqbt r1, [r8], #1 ");			// get high 8 bits
       
   626 	USER_MEMORY_GUARD_ON(eq,r2,r2);
       
   627 	asm("ldrneh	r0, [r8], #2 ");			// no - get first 16 bits of opcode
       
   628 	asm("orreq	r0, r0, r1, lsl #8 ");		// user mode - r0 = first 16 bits of opcode
       
   629 #ifdef __CPU_THUMB2
       
   630 	// must check for a 32 bit instruction and get second half if necessary
       
   631 	asm("cmp	r0, #0xe800 ");
       
   632 	asm("blo	exc_undef_thumb_16 ");		// skip if 16 bit
       
   633 	asm("tst	r9, #0x0f ");				// mode_usr ?
       
   634 	USER_MEMORY_GUARD_OFF(eq,r1,r1);
       
   635 	asm("ldreqbt r1, [r8], #1 ");			// yes - get low 8 bits
       
   636 	asm("ldreqbt r2, [r8], #1 ");			// get high 8 bits
       
   637 	USER_MEMORY_GUARD_ON(eq,r3,r3);
       
   638 	asm("ldrneh	r1, [r8], #2 ");			// no - get second 16 bits of opcode
       
   639 	asm("orreq	r1, r1, r2, lsl #8 ");		// user mode - r1 = second 16 bits of opcode
       
   640 	asm("orr	r0, r1, r0, lsl #16 ");		// first half of opcode into top of R0
       
   641 	asm("exc_undef_thumb_16: ");
       
   642 #endif
       
   643 	asm("str	r0, [sp, #%a0]" : : "i" _FOFF(TArmExcInfo,iFaultStatus));	// save opcode
       
   644 
       
   645 	// THUMB opcode in R0 - check for coprocessor operation or special UNDEF opcode
       
   646 	// Special undef DE**, F7F*A***
       
   647 	asm("sub	r1, r0, #0xde00 ");
       
   648 	asm("cmp	r1, #0x100 ");
       
   649 	asm("movlo	r1, #33 ");
       
   650 	asm("blo	special_undef_thumb ");		// branch if THUMB1 special undef
       
   651 	asm("orr	r1, r0, #0x000000FF ");		// F7F*A*** -> F7F*A*FF
       
   652 	asm("orr	r1, r1, #0x00000F00 ");		// F7F*A*** -> F7F*AFFF
       
   653 	asm("orr	r1, r1, #0x000F0000 ");		// F7F*A*** -> F7FFAFFF
       
   654 	asm("add	r1, r1, #0x00005000 ");		// F7F*A*** -> F7FFFFFF
       
   655 	asm("cmn	r1, #0x08000001 ");			// check
       
   656 	asm("moveq	r1, #34 ");
       
   657 	asm("beq	special_undef_thumb2 ");	// branch if THUMB2 special undef
       
   658 
       
   659 	// Check for THUMB2 Coprocessor instruction
       
   660 	// 111x 11yy xxxx xxxx | xxxx nnnn xxxx xxxx	nnnn=coprocessor number, yy=00,01,10
       
   661 	// 111x 1111 xxxx xxxx | xxxx xxxx xxxx xxxx	Advanced SIMD
       
   662 	// 1111 1001 xxx0 xxxx | xxxx xxxx xxxx xxxx	Advanced SIMD
       
   663 	asm("orr	r1, r0, #0x10000000 ");
       
   664 	asm("cmn	r1, #0x01000000 ");
       
   665 	asm("movcs	r1, #16 ");					// CP=16 for non-coprocessor AdvSIMD
       
   666 	asm("bcs	undef_coproc_thumb ");
       
   667 	asm("cmp	r1, #0xFC000000 ");
       
   668 	asm("movcs	r1, r0, lsr #8 ");
       
   669 	asm("andcs	r1, r1, #0x0f ");			// r1 = coprocessor number
       
   670 	asm("bcs	undef_coproc_thumb ");
       
   671 	asm("and	r1, r0, #0xFF000000 ");
       
   672 	asm("cmp	r1, #0xF9000000 ");
       
   673 	asm("tsteq	r0, #0x00100000 ");
       
   674 	asm("bne	exc_dispatch ");			// if not coproc/AdvSIMD, dispatch normally
       
   675 	asm("mov	r1, #16 ");					// CP=16 for non-coprocessor AdvSIMD
       
   676 
       
   677 	asm("special_undef_arm: ");
       
   678 	asm("special_undef_thumb: ");
       
   679 	asm("special_undef_thumb2: ");
       
   680 	asm("undef_coproc_thumb: ");
       
   681 	asm("undef_coproc_arm: ");
       
   682 	asm("mov	r0, sp ");
       
   683 	asm("bl "	CSM_CFUNC(HandleSpecialOpcode));
       
   684 	asm("cmp	r0, #0 ");
       
   685 	asm("beq	exc_dispatch ");			// if not handled, dispatch normally
       
   686 											// else return
       
   687 	// return from exception
       
   688 	// R4 points to current thread, R11->TSubScheduler, SP->TArmExcInfo
       
   689 	asm("exc_return: ");
       
   690 	__ASM_CLI();
       
   691 	asm("ldr	r0, [sp, #%a0]" : : "i" _FOFF(TArmExcInfo,iCpsr));
       
   692 	asm("ldr	r1, [r4, #%a0]" : : "i" _FOFF(NThreadBase,iUserModeCallbacks));
       
   693 	asm("mov	r9, r4 ");
       
   694 	asm("tst	r0, #0x0f ");				// returning to user mode?
       
   695 	asm("bne	exc_return2 ");				// no
       
   696 #ifdef __CHECK_LOCK_STATE__
       
   697 	asm("bleq " CSM_CFUNC(check_lock_state));
       
   698 #endif
       
   699 	asm("cmp	r1, #3 ");					// callbacks?
       
   700 	asm("blhs	run_user_mode_callbacks ");	// yes - run them
       
   701 	RECORD_STATE_EXC;
       
   702 	USER_MEMORY_GUARD_RESTORE(r6,r0);
       
   703 
       
   704 	asm("exc_return2: ");
       
   705 	asm("add	r7, sp, #%a0" : : "i" _FOFF(TArmExcInfo,iSpsrSvc));	// r7->saved spsr_svc
       
   706 	asm("ldmia	r7!, {r0-r2,r14} ");		// r0=original spsr_svc, r2=original sp_svc, restore lr_svc
       
   707 	asm("add	r6, sp, #%a0" : : "i" _FOFF(TArmExcInfo,iR15));		// r6->saved PC, CPSR
       
   708 	asm("msr	spsr, r0 ");				// restore spsr_svc
       
   709 	asm("ldmia	r6, {r0,r1} ");
       
   710 	asm("stmdb	r2!, {r0,r1} ");			// move saved PC, CPSR so sp_svc ends up at original place
       
   711 	asm("str	r2, [r6, #-4] ");			// overwrite iExcCode with original sp_svc - 8
       
   712 	asm("ldmia	r7, {r0-r14}^ ");			// restore R0-R12, R13_usr, R14_usr
       
   713 	asm("nop	");							// don't touch banked register immediately afterwards
       
   714 	asm("ldr	sp, [sp, #%a0]" : : "i" _FOFF(TArmExcInfo,iExcCode));	// R13_svc = original R13_svc - 8
       
   715 	RFEIAW(13);								// restore R13_svc and return from exception
       
   716 
       
   717 	// get here if exception occurred in mode other than usr or svc
       
   718 	// we are in mode_abt or mode_und with IRQs disabled
       
   719 	// R0=original CPSR R10->saved registers on exception stack R11->TSubScheduler
       
   720 	// R12=processor mode of exception (abt/und)
       
   721 	asm("fatal_exception_mode: ");
       
   722 	asm("ldr	r2, __TheScheduler ");
       
   723 	asm("ldr	lr, [r2, #%a0]" : : "i" _FOFF(TScheduler,iMonitorExceptionHandler));
       
   724 	asm("cmp	lr, #0 ");
       
   725 	__JUMP(ne,	lr);						// if crash debugger running, let it handle exception
       
   726 
       
   727 	// get here if mode_svc stack has overflowed
       
   728 	// we are in mode_svc with interrupts enabled and the kernel locked
       
   729 	// R0=original CPSR R10->saved registers on exception stack R11->TSubScheduler
       
   730 	// R12=processor mode of exception (abt/und)
       
   731 	asm("fatal_exception_stack: ");
       
   732 	asm("orr	r3, r12, #0xC0 ");
       
   733 	asm("msr	cpsr, r3 ");				// back to exception mode, all interrupts off
       
   734 	asm("mov	r2, r0 ");
       
   735 	asm("cmp	r11, #0 ");
       
   736 	asm("ldreq	r11, __SS0 ");
       
   737 	asm("ldr	r0, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,i_Regs));	// pass in address of stored registers
       
   738 	asm("cmp	r0, #0 ");
       
   739 	asm("ldreq	r0, __DefaultRegs ");
       
   740 	asm("bl "	CSM_ZN3Arm9SaveStateER14SFullArmRegSet );
       
   741 	asm("ldmia	sp!, {r4-r9} ");			// get original R0-R5
       
   742 	asm("stmia	r0!, {r4-r9} ");			// save original R0-R5
       
   743 	asm("ldmia	sp!, {r4-r9} ");			// get original R6-R11
       
   744 	asm("stmia	r0!, {r4-r9} ");			// save original R6-R11
       
   745 	asm("ldmia	sp!, {r4-r9} ");			// get original R12 R13_usr R14_usr iExcCode PC CPSR
       
   746 	asm("stmia	r0!, {r4-r6} ");			// save original R12 R13_usr R14_usr
       
   747 	asm("sub	r0, r0, #60 ");				// R0 back to where it was (6+6+3 = 15 words saved)
       
   748 	asm("str	r7, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet,iExcCode));
       
   749 	asm("str	r8, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet,iN.iR15));
       
   750 	asm("str	r9, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet,iN.iFlags));
       
   751 	asm("mov	r1, #13 ");					// r1 = regnum
       
   752 	asm("mrs	r2, cpsr ");				// r2 = mode
       
   753 	asm("mov	r4, r0 ");
       
   754 	asm("bl "	CSM_ZN3Arm3RegER14SFullArmRegSetim );	// r0 = pointer to exception mode R13
       
   755 	asm("str	sp, [r0] ");				// save correct original value for exception mode R13
       
   756 
       
   757 	// call the exception fault dispatcher
       
   758 	asm("mov	r0, #0 ");
       
   759 	asm("b		ExcFault ");
       
   760 
       
   761 	asm("__SS0: ");
       
   762 	asm(".word	%a0" : : "i" ((TInt)&TheSubSchedulers[0]));
       
   763 	asm("__DefaultRegs: ");
       
   764 	asm(".word	%a0" : : "i" ((TInt)&DefaultRegSet));
       
   765 	}
       
   766 
       
   767 extern "C" __NAKED__ void __ArmVectorAbortPrefetch()
       
   768 	{
       
   769 	__ASM_CLI();							// disable all interrupts
       
   770 	asm("sub	lr, lr, #4");				// lr now points to instruction whose prefetch was aborted
       
   771 	SRSDBW(		MODE_ABT);					// save it along with aborted CPSR
       
   772 	asm("sub	sp, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR15));
       
   773 	asm("stmia	sp, {r0-r14}^ ");			// save R0-R12, R13_usr, R14_usr
       
   774 	GET_RWNO_TID(,r11);
       
   775 	asm("mov	r1, #%a0 " : : "i" ((TInt)EArmExceptionPrefetchAbort));
       
   776 	asm("str	r1, [sp, #%a0]" : : "i" _FOFF(SThreadExcStack,iExcCode));	// word describing exception type
       
   777 	asm("b		handle_exception ");
       
   778 	}
       
   779 
       
   780 extern "C" __NAKED__ void __ArmVectorUndef()
       
   781 	{
       
   782 	__ASM_CLI();							// disable all interrupts
       
   783 	asm("sub	lr, lr, #4");				// lr now points to undefined instruction
       
   784 	SRSDBW(		MODE_UND);					// save it along with aborted CPSR
       
   785 	asm("sub	sp, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR15));
       
   786 	asm("stmia	sp, {r0-r14}^ ");			// save R0-R12, R13_usr, R14_usr
       
   787 	GET_RWNO_TID(,r11);
       
   788 	asm("mov	r1, #%a0 " : : "i" ((TInt)EArmExceptionUndefinedOpcode));
       
   789 	asm("str	r1, [sp, #%a0]" : : "i" _FOFF(SThreadExcStack,iExcCode));	// word describing exception type
       
   790 	asm("mrs	r0, spsr ");				// r0=CPSR at time of exception
       
   791 	asm("tst	r0, #0x20 ");				// exception in THUMB mode?
       
   792 	asm("addne	lr, lr, #2 ");				// if so, correct saved return address
       
   793 	asm("strne	lr, [sp, #%a0]" : : "i" _FOFF(SThreadExcStack,iR15));
       
   794 	asm("b		handle_exception ");
       
   795 	}
       
   796 
       
   797 /******************************************************************************
       
   798  * Kick other CPUs as necessary to process TGenericIPI
       
   799  ******************************************************************************/
       
   800 extern "C" __NAKED__ void send_generic_ipis(TUint32 /*aMask*/)
       
   801 	{
       
   802 	asm("movs	r0, r0, lsl #16 ");		// CPU mask into bits 16-23 - any bits set in aMask?
       
   803 	GET_RWNO_TID(ne,r3);
       
   804 	asm("ldrne	r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr));	// we assume i_GicDistAddr is the same for all CPUs
       
   805 	__DATA_SYNC_BARRIER_Z__(r1);			// need DSB before sending any IPI
       
   806 	asm("orrne	r0, r0, #%a0" : : "i" ((TInt)GENERIC_IPI_VECTOR));
       
   807 	asm("strne	r0, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPIs if any
       
   808 	__JUMP(,lr);
       
   809 	}
       
   810 
       
   811 /******************************************************************************
       
   812  * Handle a crash IPI
       
   813  * Enter in mode_sys or mode_fiq
       
   814  *	If in mode_sys, R7 = nest count, in which case:
       
   815  *		If R7>0 nested IRQ so mode_sys stack contains R0...R12 R14sys PC CPSR
       
   816  *		If R7=0 first IRQ, R5 points to top of mode_svc stack, which contains
       
   817  *			R0...R12 R13usr R14usr iExcCode PC CPSR
       
   818  *	If in mode_fiq, FIQ stack contains R0...R7 R8usr...R14usr iExcCode PC CPSR
       
   819  ******************************************************************************/
       
   820 extern "C" __NAKED__ void handle_crash_ipi()
       
   821 	{
       
   822 	GET_RWNO_TID(,r0);
       
   823 	asm("ldr	r0, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_Regs));
       
   824 	asm("ldr	r0, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet, iExcCode));
       
   825 	asm("cmp	r0, #0 ");
       
   826 	asm("bge	state_already_saved ");		// skip if this CPU has already saved its state (i.e. already crashed)
       
   827 	GET_RWNO_TID(,r0);
       
   828 	asm("ldr	r0, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_Regs));
       
   829 	asm("bl "	CSM_ZN3Arm9SaveStateER14SFullArmRegSet );	// save machine state (NOTE: R0 trashed)
       
   830 	asm("ldr	r1, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet, iN.iFlags));	// mode on entry
       
   831 	asm("and	r1, r1, #0x1f ");
       
   832 	asm("cmp	r1, #0x11 ");				// mode_fiq?
       
   833 	asm("ldreq	r1, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet, iN.iR13Fiq));	// yes - take registers from FIQ stack
       
   834 	asm("beq	1f ");
       
   835 	asm("ldr	r1, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet, iN.iR7));	// nest count
       
   836 	asm("cmp	r1, #0 ");					// nested?
       
   837 	asm("ldreq	r1, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet, iN.iR5));	// no - take registers from SVC stack (R5 points to it)
       
   838 	asm("beq	2f ");
       
   839 	asm("ldr	r1, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet, iN.iR13));	// nested - take R0...R12 R14usr PC CPSR from mode_sys stack
       
   840 	asm("ldmia	r1!, {r2-r11} ");
       
   841 	asm("stmia	r0!, {r2-r11} ");			// save original R0-R9
       
   842 	asm("ldmia	r1!, {r2-r7} ");			// R2=original R10, R3=orig R11, R4=orig R12 R5=orig R14usr R6=orig PC R7=orig CPSR
       
   843 	asm("stmia	r0!, {r2-r4} ");			// save original R10-R12
       
   844 	asm("stmia	r0!, {r1,r5,r6,r7} ");		// save original R13usr, R14usr, PC, CPSR
       
   845 	asm("sub	r0, r0, #68 ");				// R0 back to i_Regs
       
   846 	asm("mov	r4, r0 ");
       
   847 	asm("b		0f ");
       
   848 
       
   849 	asm("1:		");							// R1 points to R0...R12 R13usr R14usr iExcCode PC CPSR
       
   850 	asm("ldmia	r1!, {r2-r11} ");
       
   851 	asm("stmia	r0!, {r2-r11} ");			// save original R0-R9
       
   852 	asm("ldmia	r1!, {r2-r9} ");			// R2=original R10, R3=orig R11, R4=orig R12 R5=orig R13usr R6=orig R14usr R8=orig PC R9=orig CPSR
       
   853 	asm("stmia	r0!, {r2-r6,r8,r9} ");		// save original R10-R12 R13usr R14usr PC CPSR
       
   854 	asm("sub	r0, r0, #68 ");				// R0 back to i_Regs
       
   855 	asm("mov	r4, r0 ");
       
   856 	asm("str	r1, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet, iN.iR13Fiq));	// save original R13Fiq
       
   857 	asm("b		0f ");
       
   858 
       
   859 	asm("2:		");							// R1 points to R0...R12 R13usr R14usr iExcCode PC CPSR
       
   860 	asm("ldmia	r1!, {r2-r11} ");
       
   861 	asm("stmia	r0!, {r2-r11} ");			// save original R0-R9
       
   862 	asm("ldmia	r1!, {r2-r9} ");			// R2=original R10, R3=orig R11, R4=orig R12 R5=orig R13usr R6=orig R14usr R8=orig PC R9=orig CPSR
       
   863 	asm("stmia	r0!, {r2-r6,r8,r9} ");		// save original R10-R12 R13usr R14usr PC CPSR
       
   864 	asm("sub	r0, r0, #68 ");				// R0 back to i_Regs
       
   865 	asm("mov	r4, r0 ");
       
   866 	asm("str	r1, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet, iN.iR13Svc));	// restore original R13Svc
       
   867 
       
   868 	asm("0:		");
       
   869 	asm("state_already_saved: ");
       
   870 	__DATA_SYNC_BARRIER_Z__(r6);
       
   871 
       
   872 	USER_MEMORY_GUARD_OFF(,r0,r0);
       
   873 	asm("mov	r0, #0 ");
       
   874 	asm("mov	r1, #0 ");
       
   875 	asm("mov	r2, #0 ");
       
   876 	asm("bl		NKCrashHandler ");		// call NKCrashHandler(0,0,0)
       
   877 
       
   878 	__DATA_SYNC_BARRIER__(r6);
       
   879 	GET_RWNO_TID(,r0);
       
   880 	asm("ldr	r7, __CrashStateOut ");
       
   881 	asm("ldr	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuMask));
       
   882 	asm("7: ");
       
   883 	LDREX(1,7);
       
   884 	asm("bic	r1, r1, r2 ");
       
   885 	STREX(3,1,7);						// atomic { CrashStateOut &= ~iCpuMask; }
       
   886 	asm("cmp	r3, #0 ");
       
   887 	asm("bne	7b ");
       
   888 	asm("1: ");
       
   889 	ARM_WFE;
       
   890 	asm("b		1b ");					// all done, just wait to be reset
       
   891 
       
   892 	asm("__CrashStateOut: ");
       
   893 	asm(".word CrashStateOut ");
       
   894 	}
       
   895 
       
   896 
       
   897 /******************************************************************************
       
   898  * Run TUserModeCallbacks when a thread is about to return to user mode
       
   899  *
       
   900  * On entry:
       
   901  *		CPU in mode_svc, interrupts disabled, kernel unlocked, thread not in CS
       
   902  *		R9 points to current NThread
       
   903  *		We know there is at least one callback on the list
       
   904  *		Stack not necessarily 8 byte aligned
       
   905  * On return:
       
   906  *		CPU in mode_svc, interrupts disabled, kernel unlocked, thread not in CS
       
   907  *		No TUserModeCallbacks outstanding at the point where interrupts were
       
   908  *		disabled.
       
   909  *		R0-R12,R14 modified
       
   910  ******************************************************************************/
       
   911 extern "C" __NAKED__ void DoRunUserModeCallbacks()
       
   912 	{
       
   913 	asm(".global run_user_mode_callbacks ");
       
   914 	asm("run_user_mode_callbacks: ");
       
   915 
       
   916 #ifdef __USER_MEMORY_GUARDS_ENABLED__
       
   917 	asm("mrc p15, 0, r12, c3, c0, 0 ");
       
   918 	asm("tst r12, #0xc0000000 ");
       
   919 	asm("cdpne p15, 0, c0, c0, c0, 0 ");
       
   920 #endif
       
   921 #ifdef __CHECK_LOCK_STATE__
       
   922 	asm("ldr	r0,	[r9, #%a0]" : : "i" _FOFF(NThreadBase,iCsCount));
       
   923 	asm("cmp	r0, #0 ");
       
   924 	asm("beq	0f ");
       
   925 	__ASM_CRASH();
       
   926 #endif
       
   927 	asm("0:		");
       
   928 	__ASM_STI();
       
   929 	asm("mov	r10, sp ");			// save stack pointer
       
   930 	asm("mov	r11, lr ");			// save return address
       
   931 	asm("add	r8, r9, #%a0" : : "i" _FOFF(NThreadBase,iUserModeCallbacks));
       
   932 	asm("mov	r0, #1 ");			// shouldn't have been in CS to begin with
       
   933 	asm("bic	sp, sp, #4 ");		// align stack to 8 byte boundary
       
   934 	asm("str	r0,	[r9, #%a0]" : : "i" _FOFF(NThreadBase,iCsCount));	// EnterCS()
       
   935 
       
   936 	asm("1:		");
       
   937 	LDREX(		7,8);				// r7 = iUserModeCallbacks
       
   938 	asm("mov	r6, #0 ");
       
   939 	STREX(		12,6,8);			// iUserModeCallbacks = 0 if not changed
       
   940 	asm("cmp	r12, #0 ");
       
   941 	asm("bne	1b ");
       
   942 	__DATA_MEMORY_BARRIER__(r6);
       
   943 
       
   944 	asm("2:		");
       
   945 	asm("movs	r0, r7 ");			// r0 = pointer to callback
       
   946 	asm("beq	3f ");				// branch out if reached end of list
       
   947 	asm("ldmia	r7, {r7, r12} ");	// r7 = callback->iNext, r12 = callback->iFunc
       
   948 	asm("mov	r1, #%a0" : : "i" ((TInt)KUserModeCallbackUnqueued));
       
   949 	asm("str	r1, [r0, #0] ");	// callback->iNext = 1
       
   950 	__DATA_MEMORY_BARRIER__(r6);
       
   951 	asm("adr	lr, 2b ");			// return to beginning of loop
       
   952 	asm("mov	r1, #%a0" : : "i" ((TInt)EUserModeCallbackRun));
       
   953 	__JUMP(,	r12);				// (*callback->iFunc)(callback, EUserModeCallbackRun);
       
   954 
       
   955 	asm("3:		");
       
   956 	__ASM_CLI();					// turn off interrupts
       
   957 	__DATA_MEMORY_BARRIER__(r6);
       
   958 	asm("ldr	r0, [r9, #%a0]" : : "i" _FOFF(NThreadBase,iCsFunction));
       
   959 	asm("ldr	r1, [r8] ");
       
   960 	asm("cmp	r0, #0 ");			// anything to do in LeaveCS() ?
       
   961 	asm("bne	5f ");				// if yes, jump to slow path
       
   962 	asm("cmp	r1, #0 ");			// no - any more callbacks?
       
   963 	asm("bne	4f ");
       
   964 
       
   965 	// no more callbacks, no CsFunction so just LeaveCS() and return
       
   966 	asm("str	r6,	[r9, #%a0]" : : "i" _FOFF(NThreadBase,iCsCount));
       
   967 	asm("mov	sp, r10 ");			// restore stack pointer
       
   968 	__JUMP(,	r11);
       
   969 
       
   970 	// more callbacks have been queued so loop round and do them
       
   971 	asm("4:		");
       
   972 	__ASM_STI();
       
   973 	asm("b		1b ");
       
   974 
       
   975 	// CsFunction outstanding so do it
       
   976 	asm("5:		");
       
   977 	__ASM_STI();
       
   978 	asm("bl		ThreadLeaveCS__5NKern ");
       
   979 	__ASM_CLI();					// turn off interrupts
       
   980 	__DATA_MEMORY_BARRIER__(r6);
       
   981 	asm("ldr	r1, [r8] ");
       
   982 	asm("mov	sp, r10 ");
       
   983 	asm("mov	lr, r11 ");
       
   984 	asm("cmp	r1, #0 ");			// any more callbacks queued?
       
   985 	asm("bne	0b ");				// yes - go right back to the beginning and do them
       
   986 	__JUMP(,	r11);				// else return
       
   987 	}
       
   988 
       
   989 
       
   990