author | William Roberts <williamr@symbian.org> |
Tue, 19 Jan 2010 13:48:03 +0000 | |
changeset 7 | f497542af8e4 |
parent 4 | 56f325a607ea |
child 43 | c1f20ce4abcf |
permissions | -rw-r--r-- |
0 | 1 |
// Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies). |
2 |
// All rights reserved. |
|
3 |
// This component and the accompanying materials are made available |
|
4 |
// under the terms of the License "Eclipse Public License v1.0" |
|
5 |
// which accompanies this distribution, and is available |
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 |
// |
|
8 |
// Initial Contributors: |
|
9 |
// Nokia Corporation - initial contribution. |
|
10 |
// |
|
11 |
// Contributors: |
|
12 |
// |
|
13 |
// Description: |
|
14 |
// e32\nkernsmp\arm\vectors.cia |
|
15 |
// |
|
16 |
// |
|
17 |
||
18 |
#include <e32cia.h> |
|
19 |
#include <arm.h> |
|
20 |
#include <arm_gic.h> |
|
21 |
#include <arm_scu.h> |
|
22 |
#include <arm_tmr.h> |
|
23 |
||
24 |
void FastMutexNestAttempt(); |
|
25 |
void FastMutexSignalError(); |
|
26 |
extern "C" void ExcFault(TAny*); |
|
27 |
||
28 |
extern "C" void send_accumulated_resched_ipis(); |
|
29 |
||
30 |
extern "C" TInt HandleSpecialOpcode(TArmExcInfo* aContext, TInt aType); |
|
31 |
||
32 |
extern "C" { |
|
33 |
extern TUint32 CrashStateOut; |
|
34 |
extern SFullArmRegSet DefaultRegSet; |
|
35 |
} |
|
36 |
||
37 |
#ifdef BTRACE_CPU_USAGE |
|
38 |
extern "C" void btrace_irq_exit(); |
|
39 |
extern "C" void btrace_fiq_exit(); |
|
40 |
#endif |
|
41 |
#ifdef _DEBUG |
|
42 |
#define __CHECK_LOCK_STATE__ |
|
43 |
#endif |
|
44 |
||
45 |
//#define __FAULT_ON_FIQ__ |
|
46 |
||
47 |
#ifdef __CHECK_LOCK_STATE__ |
|
48 |
/****************************************************************************** |
|
49 |
* Check that the kernel is unlocked, no fast mutex is held and the thread |
|
50 |
* is not in a critical section when returning to user mode. |
|
51 |
******************************************************************************/ |
|
52 |
extern "C" __NAKED__ void check_lock_state() |
|
53 |
{ |
|
54 |
GET_RWNO_TID(,r12); |
|
55 |
asm("ldr r12, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount)); |
|
56 |
asm("cmp r12, #0 "); |
|
57 |
asm("beq 1f "); |
|
58 |
__ASM_CRASH(); |
|
59 |
asm("1: "); |
|
60 |
GET_RWNO_TID(,r12); |
|
61 |
asm("ldr r12, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread)); |
|
62 |
asm("ldr r12, [r12, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); |
|
63 |
asm("cmp r12, #0 "); |
|
64 |
asm("beq 2f "); |
|
65 |
__ASM_CRASH(); |
|
66 |
asm("2: "); |
|
67 |
GET_RWNO_TID(,r12); |
|
68 |
asm("ldr r12, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread)); |
|
69 |
asm("ldr r12, [r12, #%a0]" : : "i" _FOFF(NThread,iCsCount)); |
|
70 |
asm("cmp r12, #0 "); |
|
71 |
asm("beq 3f "); |
|
72 |
__ASM_CRASH(); |
|
73 |
asm("3: "); |
|
74 |
GET_RWNO_TID(,r12); |
|
75 |
asm("ldr r12, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread)); |
|
76 |
asm("ldrh r12, [r12, #%a0]" : : "i" _FOFF(NSchedulable,iFreezeCpu)); |
|
77 |
asm("cmp r12, #0 "); |
|
78 |
asm("beq 4f "); |
|
79 |
__ASM_CRASH(); |
|
80 |
asm("4: "); |
|
81 |
__JUMP(,lr); |
|
82 |
} |
|
83 |
#endif |
|
84 |
||
85 |
//#define __RECORD_STATE__ |
|
86 |
#ifdef __RECORD_STATE__ |
|
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
87 |
#define RECORD_STATE() \ |
0 | 88 |
asm("ldr r3, [sp, #68] "); \ |
89 |
asm("mov r1, sp "); \ |
|
90 |
asm("bic r12, sp, #0xff "); \ |
|
91 |
asm("bic r12, r12, #0xf00 "); \ |
|
92 |
asm("add r12, r12, #24 "); \ |
|
93 |
asm("tst r3, #0x0f "); \ |
|
94 |
asm("addne pc, pc, #12 "); \ |
|
95 |
asm("ldmia r1!, {r2-r11} "); \ |
|
96 |
asm("stmia r12!, {r2-r11} "); \ |
|
97 |
asm("ldmia r1!, {r2-r9} "); \ |
|
98 |
asm("stmia r12!, {r2-r9} ") |
|
99 |
||
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
100 |
#define RECORD_STATE_EXC() \ |
0 | 101 |
asm("ldr r3, [sp, #92] "); \ |
102 |
asm("mov r12, sp "); \ |
|
103 |
asm("bic lr, sp, #0xff "); \ |
|
104 |
asm("bic lr, lr, #0xf00 "); \ |
|
105 |
asm("tst r3, #0x0f "); \ |
|
106 |
asm("addne pc, pc, #12 "); \ |
|
107 |
asm("ldmia r12!, {r0-r11} "); \ |
|
108 |
asm("stmia lr!, {r0-r11} "); \ |
|
109 |
asm("ldmia r12!, {r0-r11} "); \ |
|
110 |
asm("stmia lr!, {r0-r11} "); |
|
111 |
#else |
|
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
112 |
#define RECORD_STATE() |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
113 |
#define RECORD_STATE_EXC() |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
114 |
#endif |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
115 |
|
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
116 |
#ifdef __USER_MEMORY_GUARDS_ENABLED__ |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
117 |
// This macro can be invoked just before a return-from-exception instruction |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
118 |
// It will cause an UNDEF exception if we're about to return to user mode with UMG still on |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
119 |
#define USER_MEMORY_GUARD_CHECK() \ |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
120 |
asm("stmfd sp!, {lr}"); \ |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
121 |
asm("ldr lr, [sp, #8]"); /* lr<-future CPSR */ \ |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
122 |
USER_MEMORY_GUARD_ASSERT_OFF_IF_MODE_USR(lr); \ |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
123 |
asm("ldmfd sp!, {lr}"); |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
124 |
#else |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
125 |
#define USER_MEMORY_GUARD_CHECK() |
0 | 126 |
#endif |
127 |
||
128 |
/****************************************************************************** |
|
129 |
* SWI Handler |
|
130 |
******************************************************************************/ |
|
131 |
||
132 |
extern "C" __NAKED__ void __ArmVectorSwi() |
|
133 |
{ |
|
134 |
// IRQs disabled, FIQs enabled here |
|
135 |
__ASM_CLI(); // all interrupts off |
|
136 |
SRSDBW(MODE_SVC); // save return address and return CPSR to supervisor stack |
|
137 |
asm("sub sp, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR15)); |
|
138 |
asm("stmia sp, {r0-r14}^ "); // save R0-R12, R13_usr, R14_usr |
|
139 |
asm("mov r4, #%a0" : : "i" ((TInt)SThreadExcStack::ESvc)); |
|
140 |
USER_MEMORY_GUARD_ON_IF_MODE_USR(r11); |
|
141 |
asm("ldr r12, [lr, #-4] "); // get SWI opcode |
|
142 |
GET_RWNO_TID(,r11); |
|
143 |
asm("str r4, [sp, #%a0]" : : "i" _FOFF(SThreadExcStack,iExcCode)); // word describing exception type |
|
144 |
asm("movs r12, r12, lsl #9 "); // 512*SWI number into r12 |
|
145 |
asm("adr lr, fast_swi_exit "); |
|
146 |
asm("ldr r9, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread)); |
|
147 |
asm("bcc slow_swi "); // bit 23=0 for slow/unprot |
|
148 |
asm("mov r1, r9 "); |
|
149 |
asm("beq wait_for_any_request "); // special case for Exec::WaitForAnyRequest |
|
150 |
asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(NThread,iFastExecTable)); |
|
151 |
asm("ldr r3, [r2], r12, lsr #7 "); // r3=limit, r2->dispatch table entry |
|
152 |
asm("ldr r2, [r2] "); // r2->kernel function |
|
153 |
asm("cmp r3, r12, lsr #9 "); // r3-SWI number |
|
154 |
__JUMP(hi, r2); // if SWI number valid, call kernel function |
|
155 |
asm("mvn r12, #0 "); // put invalid SWI number into r12 |
|
156 |
asm("b slow_swi "); // go through slow SWI routine to call invalid SWI handler |
|
157 |
||
158 |
#ifndef __FAST_SEM_MACHINE_CODED__ |
|
159 |
asm("wait_for_any_request: "); |
|
160 |
__ASM_STI(); // all interrupts on |
|
161 |
asm("b WaitForAnyRequest__5NKern "); |
|
162 |
#else |
|
163 |
asm(".global exec_wfar_wait "); |
|
164 |
asm("exec_wfar_wait: "); |
|
165 |
asm("mov r2, #1 "); |
|
166 |
asm("str r2, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount)); // else lock the kernel |
|
167 |
__ASM_STI(); |
|
168 |
asm("strb r2, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag)); // and set the reschedule flag |
|
169 |
asm("bl " CSM_ZN10TScheduler10RescheduleEv ); // reschedule |
|
170 |
asm(".global exec_wfar_resched_return "); |
|
171 |
asm("exec_wfar_resched_return: "); |
|
172 |
asm("ldr r4, [r3, #%a0]" : : "i" _FOFF(NThreadBase,iUserModeCallbacks)); |
|
173 |
asm("mov r9, r3 "); |
|
174 |
||
175 |
// need to send any outstanding reschedule IPIs |
|
176 |
asm("cmp r12, #0 "); |
|
177 |
asm("blne " CSM_CFUNC(send_accumulated_resched_ipis)); |
|
178 |
||
179 |
asm(".global exec_wfar_finish "); |
|
180 |
asm("exec_wfar_finish: "); |
|
181 |
asm("mrs r1, spsr "); |
|
182 |
asm("tst r1, #0x0f "); |
|
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
183 |
asm("bne fast_swi_exit2 "); // not returning to user mode; in this case we don't run callbacks |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
184 |
// and the UMG was not changed on entry so we don't reset it |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
185 |
|
0 | 186 |
#ifdef __CHECK_LOCK_STATE__ |
187 |
asm("bl " CSM_CFUNC(check_lock_state)); |
|
188 |
#endif |
|
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
189 |
asm("cmp r4, #3 "); // callbacks? |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
190 |
asm("blhs run_user_mode_callbacks "); // run them; NB trashes most registers (R0-R12, R14) |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
191 |
USER_MEMORY_GUARD_OFF(,r12,r12); // because we're returning to user mode |
0 | 192 |
asm("b fast_swi_exit2 "); |
193 |
#endif |
|
194 |
||
195 |
asm("fast_swi_exit: "); |
|
196 |
#if defined(__CHECK_LOCK_STATE__) || defined(__USER_MEMORY_GUARDS_ENABLED__) |
|
197 |
asm("mrs r12, spsr "); |
|
198 |
asm("tst r12, #0x0f "); |
|
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
199 |
asm("bne fast_swi_exit2 "); // not returning to user mode; in this case we don't run callbacks |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
200 |
// and the UMG was not changed on entry so we don't restore it |
0 | 201 |
#ifdef __CHECK_LOCK_STATE__ |
202 |
asm("bl " CSM_CFUNC(check_lock_state)); |
|
203 |
#endif |
|
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
204 |
USER_MEMORY_GUARD_OFF(,r12,r12); // because we're returning to user mode |
0 | 205 |
#endif |
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
206 |
|
0 | 207 |
asm("fast_swi_exit2: "); |
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
208 |
RECORD_STATE(); |
0 | 209 |
asm("ldmib sp, {r1-r14}^ "); // restore R1-R12, R13_usr, R14_usr |
210 |
asm("nop "); // don't access banked register immediately after |
|
211 |
asm("add sp, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR15)); |
|
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
212 |
USER_MEMORY_GUARD_CHECK(); // check UMG is off if returning to user mode |
0 | 213 |
RFEIAW(13); // restore PC and CPSR - return from Exec function |
214 |
||
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
215 |
|
0 | 216 |
asm("slow_swi: "); // IRQs and FIQs off here |
217 |
__ASM_STI(); // all interrupts on |
|
218 |
asm("ldr r4, [r9, #%a0]" : : "i" _FOFF(NThread,iSlowExecTable)); |
|
219 |
asm("mrs r11, spsr "); // spsr_svc into r11 |
|
220 |
asm("adr lr, slow_swi_exit "); |
|
221 |
asm("ldr r5, [r4, #-12] "); // r5=limit |
|
222 |
asm("add r6, r4, r12, lsr #6 "); // r6->dispatch table entry |
|
223 |
asm("cmp r5, r12, lsr #9 "); // r5-SWI number |
|
224 |
asm("ldmhiia r6, {r5,r6} "); // if SWI number OK, flags into r5, function addr into r6 |
|
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
225 |
asm("ldrls pc, [r4, #-8] "); // if SWI number invalid, call invalid handler, returning to slow_swi_exit below |
0 | 226 |
|
227 |
// Acquire system lock if necessary: warning - any scratch registers modified after __ArmVectorSwi() |
|
228 |
// function preamble will be restored after call to NKern::LockSystem() with stale values. |
|
229 |
asm("tst r5, #%a0" : : "i" ((TInt)KExecFlagClaim)); // claim system lock? |
|
230 |
asm("beq slow_swi_no_wait "); // skip if not |
|
231 |
asm("bl " CSM_ZN5NKern10LockSystemEv ); |
|
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
232 |
asm("ldmia sp, {r0-r3} "); // reload original values |
0 | 233 |
asm("slow_swi_no_wait: "); |
234 |
||
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
235 |
// Check to see if extra arguments are needed. Needs to be placed after call to NKern::LockSystem() |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
236 |
// above, as r2 is reloaded with its original value by the ldmia instruction above |
0 | 237 |
asm("tst r5, #%a0" : : "i" ((TInt)KExecFlagExtraArgMask)); // extra arguments needed? |
238 |
asm("addne r2, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR4)); // if so, point r2 at saved registers on stack |
|
239 |
||
240 |
asm("tst r5, #%a0" : : "i" ((TInt)KExecFlagPreprocess)); // preprocess (handle lookup)? can use r4, r7, r8, r12, r0 |
|
241 |
asm("mov lr, pc "); |
|
242 |
asm("ldrne pc, [r4, #-4] "); // call preprocess handler if required |
|
243 |
asm("orr r5, r9, r5, lsr #30 "); // r5 = current NThread pointer with bits 0,1 = (flags & (KExecFlagRelease|KExecFlagClaim))>>30 |
|
244 |
asm("mov lr, pc "); |
|
245 |
__JUMP(, r6); // call exec function, preserve r5,r11 |
|
246 |
asm("str r0, [sp, #%a0]" : : "i" _FOFF(SThreadExcStack,iR0)); // save return value |
|
247 |
asm("bic r9, r5, #3 "); // r9 = current NThread pointer |
|
248 |
asm("tst r5, #%a0" : : "i" ((TInt)KExecFlagRelease>>30)); // release system lock? |
|
249 |
asm("blne " CSM_ZN5NKern12UnlockSystemEv ); |
|
250 |
||
251 |
asm("slow_swi_exit: "); |
|
252 |
__ASM_CLI(); |
|
253 |
asm("ldr r4, [r9, #%a0]" : : "i" _FOFF(NThreadBase,iUserModeCallbacks)); |
|
254 |
asm("tst r11, #0x0f "); // returning to user mode? |
|
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
255 |
asm("bne slow_swi_exit2 "); // not returning to user mode; in this case we don't run callbacks |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
256 |
// and the UMG was not changed on entry so we don't reset it |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
257 |
|
0 | 258 |
#ifdef __CHECK_LOCK_STATE__ |
259 |
asm("bl " CSM_CFUNC(check_lock_state)); |
|
260 |
#endif |
|
261 |
asm("cmp r4, #3 "); // callbacks? |
|
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
262 |
asm("blhs run_user_mode_callbacks "); // run them; NB trashes most registers (R0-R12, R14) |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
263 |
USER_MEMORY_GUARD_OFF(,r12,r12); // because we're returning to user mode |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
264 |
|
0 | 265 |
asm("slow_swi_exit2: "); |
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
266 |
RECORD_STATE(); |
0 | 267 |
asm("ldmia sp, {r0-r14}^ "); // R0=return value, restore R1-R12, R13_usr, R14_usr |
268 |
asm("nop "); // don't access banked register immediately after |
|
269 |
asm("add sp, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR15)); |
|
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
270 |
USER_MEMORY_GUARD_CHECK(); // check UMG is off if returning to user mode |
0 | 271 |
RFEIAW(13); // restore PC and CPSR - return from Exec function |
272 |
} |
|
273 |
||
274 |
||
275 |
/****************************************************************************** |
|
276 |
* IRQ Postamble |
|
277 |
* This routine is called after the IRQ has been dispatched |
|
278 |
* Enter in mode_sys |
|
279 |
* R4->TSubScheduler, R6->GIC CPU interface |
|
280 |
* For nested IRQ, R0-R12, R14_sys, return address, return CPSR are on top |
|
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
281 |
* of the mode_sys (i.e. current) stack |
0 | 282 |
* For non-nested IRQ, registers are saved on top of mode_svc stack and |
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
283 |
* pointed to by R5 in the order: |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
284 |
* R5->R0 ... R12 R13_usr R14_usr <spare> PC CPSR |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
285 |
* and if user memory guards are active, R8 = saved DACR |
0 | 286 |
******************************************************************************/ |
287 |
||
288 |
extern "C" __NAKED__ void __ArmVectorIrq() |
|
289 |
{ |
|
290 |
// Interrupts may be enabled here |
|
291 |
#ifdef BTRACE_CPU_USAGE |
|
292 |
asm("ldr r10, __BTraceCpuUsageFilter "); |
|
293 |
#endif |
|
294 |
asm("ldr r7, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqNestCount)); |
|
295 |
asm("ldrb r0, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, iEventHandlersPending)); |
|
296 |
__DATA_MEMORY_BARRIER_Z__(r2); |
|
297 |
#ifdef BTRACE_CPU_USAGE |
|
298 |
asm("ldrb r10, [r10] "); |
|
299 |
#endif |
|
300 |
asm("subs r7, r7, #1 "); |
|
301 |
asm("bpl nested_irq_exit "); |
|
302 |
asm("cmp r0, #0 "); |
|
303 |
asm("beq no_event_handlers "); |
|
304 |
asm("mov r0, r4 "); |
|
305 |
asm("bl run_event_handlers "); |
|
306 |
||
307 |
asm("no_event_handlers: "); |
|
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
308 |
#ifdef __USER_MEMORY_GUARDS_ENABLED__ |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
309 |
asm("mov r11, r8 "); |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
310 |
#endif |
0 | 311 |
asm("ldr r8, [r5, #%a0]" : : "i" _FOFF(SThreadExcStack,iCPSR)); // r8 = interrupted cpsr |
312 |
asm("ldr r0, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, iKernLockCount)); |
|
313 |
__ASM_CLI(); // all interrupts off |
|
314 |
asm("and r2, r8, #0x1f "); |
|
315 |
asm("ldr r1, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag)); |
|
316 |
asm("cmp r2, #0x10 "); // interrupted mode_usr ? |
|
317 |
asm("cmpne r2, #0x13 "); // if not, interrupted mode_svc ? |
|
318 |
asm("cmpeq r0, #0 "); // if mode_usr or mode_svc, is kernel locked? |
|
319 |
asm("str r7, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqNestCount)); |
|
320 |
asm("bne irq_kernel_locked_exit "); // if neither or if kernel locked, exit immediately |
|
321 |
asm("cmp r1, #0 "); // If not, IDFCs/reschedule pending? |
|
322 |
asm("beq irq_kernel_locked_exit "); // if not, exit |
|
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
323 |
|
0 | 324 |
asm("mov r1, #1 "); |
325 |
asm("str r1, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, iKernLockCount)); // lock the kernel |
|
326 |
__ASM_STI_MODE(MODE_SVC); // mode_svc, interrupts on |
|
327 |
||
328 |
// Saved registers are on top of mode_svc stack |
|
329 |
// reschedule - this also switches context if necessary |
|
330 |
// enter this function in mode_svc, interrupts on, kernel locked |
|
331 |
// exit this function in mode_svc, all interrupts off, kernel unlocked |
|
332 |
asm("irq_do_resched: "); |
|
333 |
asm("stmfd sp!, {r11,lr} "); // save user memory guard state, lr_svc |
|
334 |
asm("bl " CSM_ZN10TScheduler10RescheduleEv); // return with R3->current thread |
|
335 |
asm(".global irq_resched_return "); |
|
336 |
asm("irq_resched_return: "); |
|
337 |
||
338 |
asm("ldr r8, [sp, #%a0]" : : "i" (_FOFF(SThreadExcStack,iCPSR)+8)); // have UMG, lr_svc on stack as well |
|
339 |
asm("ldr r4, [r3, #%a0]" : : "i" _FOFF(NThreadBase,iUserModeCallbacks)); |
|
340 |
asm("mov r9, r3 "); |
|
341 |
||
342 |
// need to send any outstanding reschedule IPIs |
|
343 |
asm("cmp r12, #0 "); |
|
344 |
asm("blne " CSM_CFUNC(send_accumulated_resched_ipis)); |
|
345 |
asm("tst r8, #0x0f "); // returning to user mode? |
|
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
346 |
asm("bne irq_post_resched_exit "); // if not, we don't check locks or run callbacks |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
347 |
|
0 | 348 |
#ifdef __CHECK_LOCK_STATE__ |
349 |
asm("bl " CSM_CFUNC(check_lock_state)); |
|
350 |
#endif |
|
351 |
asm("cmp r4, #3 "); // callbacks? |
|
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
352 |
asm("blhs run_user_mode_callbacks "); // run them; NB trashes most registers (R0-R12, R14) |
0 | 353 |
|
354 |
asm("irq_post_resched_exit: "); |
|
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
355 |
asm("ldmfd sp!, {r11,lr} "); // restore UMG, lr_svc |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
356 |
USER_MEMORY_GUARD_RESTORE(r11,r12); |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
357 |
RECORD_STATE(); |
0 | 358 |
asm("ldmia sp, {r0-r14}^ "); // restore R0-R12, R13_usr, R14_usr |
359 |
asm("nop "); // don't access banked register immediately after |
|
360 |
asm("add sp, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR15)); |
|
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
361 |
USER_MEMORY_GUARD_CHECK(); // check UMG is off if returning to user mode |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
362 |
RFEIAW(13); // restore PC and CPSR - return from Exec function |
0 | 363 |
|
364 |
asm("irq_kernel_locked_exit: "); |
|
365 |
#ifdef __CHECK_LOCK_STATE__ |
|
366 |
asm("tst r8, #0x0f "); |
|
367 |
asm("bleq " CSM_CFUNC(check_lock_state)); |
|
368 |
#endif |
|
369 |
#ifdef BTRACE_CPU_USAGE |
|
370 |
asm("cmp r10, #0 "); |
|
371 |
asm("blne btrace_irq_exit "); |
|
372 |
#endif |
|
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
373 |
USER_MEMORY_GUARD_RESTORE(r11,r12); |
0 | 374 |
__ASM_CLI_MODE(MODE_SVC); // mode_svc, interrupts off |
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
375 |
RECORD_STATE(); |
0 | 376 |
asm("ldmia sp, {r0-r14}^ "); // restore R0-R12, R13_usr, R14_usr |
377 |
asm("nop "); // don't access banked register immediately after |
|
378 |
asm("add sp, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR15)); |
|
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
379 |
USER_MEMORY_GUARD_CHECK(); // check UMG is off if returning to user mode |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
380 |
RFEIAW(13); // restore PC and CPSR - return from Exec function |
0 | 381 |
|
382 |
asm("nested_irq_exit: "); |
|
383 |
__ASM_CLI1(); |
|
384 |
asm("str r7, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqNestCount)); |
|
385 |
#ifdef BTRACE_CPU_USAGE |
|
386 |
asm("cmp r10, #0 "); |
|
387 |
asm("blne btrace_irq_exit "); |
|
388 |
#endif |
|
389 |
asm("ldmia sp!, {r0-r12,r14} "); // restore r0-r12, r14_sys |
|
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
390 |
USER_MEMORY_GUARD_CHECK(); // check UMG is off if returning to user mode |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
391 |
RFEIAW(13); // restore PC and CPSR - return from Exec function |
0 | 392 |
|
393 |
asm("__BTraceCpuUsageFilter: "); |
|
394 |
asm(".word %a0" : : "i" ((TInt)&BTraceData.iFilter[BTrace::ECpuUsage])); |
|
395 |
} |
|
396 |
||
397 |
||
398 |
/****************************************************************************** |
|
399 |
* FIQ Postamble |
|
400 |
* This routine is called after the FIQ has been dispatched |
|
401 |
* spsr_fiq, r0-r3 are unmodified |
|
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
402 |
* Return address is on the top of the FIQ stack -- except that if user memory |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
403 |
* guards are in use, the saved DACR was pushed afterwards, so that's on top |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
404 |
* of the stack and the return address is next |
0 | 405 |
******************************************************************************/ |
406 |
||
407 |
extern "C" __NAKED__ void __ArmVectorFiq() |
|
408 |
{ |
|
409 |
#ifdef __FAULT_ON_FIQ__ |
|
410 |
asm(".word 0xe7f10f10 "); |
|
411 |
#endif |
|
412 |
// IRQs and FIQs disabled here |
|
413 |
// r0-r7 are unaltered from when FIQ occurred |
|
414 |
GET_RWNO_TID(,r9); |
|
415 |
asm("mrs r8, spsr "); // check interrupted mode |
|
416 |
asm("and r10, r8, #0x1f "); |
|
417 |
asm("cmp r10, #0x10 "); // check for mode_usr |
|
418 |
asm("ldr r11, [r9, #%a0]" : : "i" _FOFF(TSubScheduler, iKernLockCount)); |
|
419 |
asm("cmpne r10, #0x13 "); // or mode_svc |
|
420 |
asm("ldreq r10, [r9, #%a0]" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag)); |
|
421 |
asm("cmpeq r11, #0 "); // and check if kernel locked |
|
422 |
asm("bne FiqExit0 "); // if wrong mode or kernel locked, return immediately |
|
423 |
asm("cmp r10, #0 "); // check if reschedule needed |
|
424 |
asm("beq FiqExit0 "); // if not, return from interrupt |
|
425 |
||
426 |
// we interrupted mode_usr or mode_svc, kernel unlocked, reschedule needed |
|
427 |
#ifdef __USER_MEMORY_GUARDS_ENABLED__ |
|
428 |
asm("ldr r8, [sp], #4 "); // r8_fiq = UMG state |
|
429 |
#endif |
|
430 |
asm("ldr r14, [sp], #4 "); // r14_fiq = return address |
|
431 |
asm("add r11, r11, #1 "); |
|
432 |
asm("str r11, [r9, #%a0]" : : "i" _FOFF(TSubScheduler, iKernLockCount)); // lock the kernel |
|
433 |
SRSDBW(MODE_SVC); // save return address and return CPSR to supervisor stack |
|
434 |
CPSCHM(MODE_SVC); // switch to mode_svc, all interrupts off |
|
435 |
asm("sub sp, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR15)); |
|
436 |
asm("stmia sp, {r0-r14}^ "); // save R0-R12, R13_usr, R14_usr |
|
437 |
asm("mov r0, #%a0" : : "i" ((TInt)SThreadExcStack::EFiq)); |
|
438 |
#ifdef __USER_MEMORY_GUARDS_ENABLED__ |
|
439 |
CPSCHM(MODE_FIQ); // back to mode_fiq, all interrupts off |
|
440 |
asm("mov r1, r8 "); // retrieve UMG state |
|
441 |
CPSCHM(MODE_SVC); // switch to mode_svc, all interrupts off |
|
442 |
asm("mov r11, r1 "); // UMG state into R11 |
|
443 |
#endif |
|
444 |
asm("str r0, [sp, #%a0]" : : "i" _FOFF(SThreadExcStack,iExcCode)); // word describing exception type |
|
445 |
__ASM_STI(); // interrupts on |
|
446 |
asm("b irq_do_resched "); // do reschedule and return from interrupt |
|
447 |
||
448 |
asm("FiqExit0: "); |
|
449 |
#ifdef BTRACE_CPU_USAGE |
|
450 |
asm("ldr r8, __BTraceCpuUsageFilter "); |
|
451 |
asm("ldrb r8, [r8] "); |
|
452 |
asm("cmp r8, #0 "); |
|
453 |
asm("beq 1f "); |
|
454 |
asm("stmfd sp!, {r0-r3} "); |
|
455 |
asm("bl btrace_fiq_exit "); |
|
456 |
asm("ldmfd sp!, {r0-r3} "); |
|
457 |
asm("1: "); |
|
458 |
#endif |
|
459 |
#ifdef __USER_MEMORY_GUARDS_ENABLED__ |
|
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
460 |
asm("ldr r8, [sp], #4 "); |
0 | 461 |
#endif |
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
462 |
USER_MEMORY_GUARD_RESTORE(r8,r12); |
0 | 463 |
asm("ldmfd sp!, {pc}^ "); // return from interrupt |
464 |
||
465 |
asm("__TheScheduler: "); |
|
466 |
asm(".word TheScheduler "); |
|
467 |
} |
|
468 |
||
469 |
||
470 |
/****************************************************************************** |
|
471 |
* Abort handler |
|
472 |
* This routine is called in response to a data abort, prefetch abort or |
|
473 |
* undefined instruction exception. |
|
474 |
******************************************************************************/ |
|
475 |
||
476 |
extern "C" __NAKED__ void __ArmVectorAbortData() |
|
477 |
{ |
|
478 |
__ASM_CLI(); // disable all interrupts |
|
479 |
asm("sub lr, lr, #8 "); // lr now points to aborted instruction |
|
480 |
SRSDBW( MODE_ABT); // save it along with aborted CPSR |
|
481 |
asm("sub sp, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR15)); |
|
482 |
asm("stmia sp, {r0-r14}^ "); // save R0-R12, R13_usr, R14_usr |
|
483 |
GET_RWNO_TID(,r11); |
|
484 |
asm("mov r1, #%a0 " : : "i" ((TInt)EArmExceptionDataAbort)); |
|
485 |
asm("str r1, [sp, #%a0]" : : "i" _FOFF(SThreadExcStack,iExcCode)); // word describing exception type |
|
486 |
||
487 |
asm("handle_exception: "); |
|
488 |
// We are in exception mode (abt/und) with registers stacked as follows: |
|
489 |
// R13_abt/R13_und -> R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13_usr R14_usr ExcCode PC CPSR |
|
490 |
#if defined(__CPU_ARM_HAS_WORKING_CLREX) |
|
491 |
CLREX // reset exclusive monitor |
|
492 |
#elif defined(__CPU_ARM_HAS_LDREX_STREX) |
|
493 |
STREX(12,0,13); // dummy STREX to reset exclusivity monitor |
|
494 |
#endif |
|
495 |
||
496 |
#if 0 // minimum-dependency exception handling |
|
497 |
asm("ldr r0, [sp, #%a0]" : : "i" _FOFF(SThreadExcStack,iCPSR)); |
|
498 |
asm("mrs r4, cpsr "); |
|
499 |
asm("orr r1, r0, #0xc0 "); |
|
500 |
asm("msr cpsr, r1 "); // back to original mode |
|
501 |
asm("mov r2, sp "); |
|
502 |
asm("mov r3, lr "); |
|
503 |
asm("msr cpsr, r4 "); // back to mode_abt or mode_und |
|
504 |
asm("stmfd sp!, {r2,r3} "); // now have R13 R14 R0-R12 R13_usr R14_usr ExcCode PC CPSR |
|
505 |
asm("mrc p15, 0, r1, c5, c0, 0 "); // DFSR |
|
506 |
asm("mrc p15, 0, r2, c5, c0, 1 "); // IFSR |
|
507 |
asm("mrc p15, 0, r0, c6, c0, 0 "); // DFAR |
|
508 |
asm("stmfd sp!, {r0-r2} "); // now have DFAR DFSR IFSR R13 R14 R0-R12 R13_usr R14_usr ExcCode PC CPSR |
|
509 |
asm("mov r0, sp "); |
|
510 |
asm(".extern hw_init_exc "); |
|
511 |
asm("bl hw_init_exc "); |
|
512 |
asm("add sp, sp, #20 "); |
|
513 |
asm("ldmia sp, {r0-r14}^ "); // restore R0-R12, R13_usr, R14_usr |
|
514 |
asm("nop "); // don't access banked register immediately after |
|
515 |
asm("add sp, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR15)); |
|
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
516 |
RFEIAW(13); // restore PC and CPSR - return from Exec function |
0 | 517 |
#endif |
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
518 |
|
0 | 519 |
asm("ldr r0, [sp, #%a0]" : : "i" _FOFF(SThreadExcStack,iCPSR)); |
520 |
asm("mrs r12, cpsr "); |
|
521 |
asm("and r3, r0, #0x1f "); // r3=processor mode when abort occurred |
|
522 |
asm("bic r12, r12, #0xc0 "); |
|
523 |
asm("cmp r3, #0x10 "); // aborted in user mode? |
|
524 |
asm("cmpne r3, #0x13 "); // if not, aborted in mode_svc? |
|
525 |
asm("bne fatal_exception_mode "); // if neither, fault |
|
526 |
asm("cmp r11, #0 "); |
|
527 |
asm("beq fatal_exception_mode "); // if subscheduler not yet set up, fault |
|
528 |
asm("ldr r5, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount)); |
|
529 |
__ASM_STI(); // reenable interrupts - rescheduling disabled by mode_abt/mode_und |
|
530 |
asm("mov r10, sp "); // r10 points to saved registers |
|
531 |
asm("cmp r5, #0 "); // exception with kernel locked? |
|
532 |
asm("bne fatal_exception_mode "); // if so, fault |
|
533 |
asm("add r5, r5, #1 "); // lock the kernel |
|
534 |
asm("str r5, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount)); |
|
535 |
CPSCHM(MODE_SVC); // mode_svc, interrupts on, kernel locked |
|
536 |
||
537 |
asm("ldr r5, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread)); |
|
538 |
asm("add r5, r5, #%a0" : : "i" _FOFF(NThread,iStackBase)); |
|
539 |
asm("ldmia r5, {r2,r5} "); // r2=supervisor stack area base, r5=size |
|
540 |
asm("subs r2, sp, r2 "); // r2=amount of mode_svc stack remaining |
|
541 |
asm("blo fatal_exception_stack "); // if stack pointer invalid, fault |
|
542 |
asm("cmp r2, r5 "); |
|
543 |
asm("bhi fatal_exception_stack "); |
|
544 |
asm("cmp r2, #128 "); // check enough stack to handle exception |
|
545 |
asm("blo fatal_exception_stack "); // if not, fault |
|
546 |
||
547 |
// At this point we are in mode_svc with interrupts enabled and the kernel locked. |
|
548 |
// We know the supervisor stack is valid and has enough free space to store the exception info. |
|
549 |
// Registers: R0=aborted cpsr, R10 points to saved registers, R11->TSubScheduler |
|
550 |
// on mode_abt or mode_und stack, R12 holds mode of exception (mode_abt or mode_und). |
|
551 |
||
552 |
asm("add r1, r10, #%a0" : : "i" _FOFF(SThreadExcStack,iR8)); |
|
553 |
asm("ldmia r1, {r0-r9} "); // get saved R8,R9,R10,R11,R12,R13_usr,R14_usr,exccode,PC,CPSR |
|
554 |
__ASM_CLI(); |
|
555 |
asm("mov r12, sp "); // save original R13_svc |
|
556 |
asm("bic sp, sp, #4 "); // align R13_svc to 8 byte boundary |
|
557 |
asm("stmfd sp!, {r0-r9} "); // save on supervisor stack |
|
558 |
asm("ldmia r10, {r0-r6,r10} "); // get saved R0-R7 |
|
559 |
asm("stmfd sp!, {r0-r6,r10} "); // save on supervisor stack |
|
560 |
// leave R7=exccode, R8=aborted instruction address, R9=aborted CPSR |
|
561 |
asm("cmp r7, #%a0 " : : "i" ((TInt)EArmExceptionUndefinedOpcode)); |
|
562 |
asm("moveq r0, #0x1b "); // mode_und |
|
563 |
asm("movne r0, #0x17 "); // mode_abt |
|
564 |
asm("msr cpsr, r0 "); // mode_abt or mode_und, interrupts on |
|
565 |
asm("add sp, sp, #%a0 " : : "i" ((TInt)sizeof(SThreadExcStack))); // restore exception stack balance |
|
566 |
CPSCHM(MODE_SVC); // back into mode_svc, interrupts on |
|
567 |
||
568 |
asm("ldr r4, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread)); |
|
569 |
asm("cmp r7, #%a0 " : : "i" ((TInt)EArmExceptionPrefetchAbort)); |
|
570 |
asm("mrceq p15, 0, r1, c5, c0, 1 "); // r1=instruction fault status |
|
571 |
asm("mrcne p15, 0, r1, c5, c0, 0 "); // r1=data fault status |
|
572 |
#ifdef __CPU_ARM_HAS_CP15_IFAR |
|
573 |
asm("mrceq p15, 0, r0, c6, c0, 2 "); // r0 = IFAR fault address |
|
574 |
#else |
|
575 |
asm("moveq r0, r8 "); // else fault address for prefetch abort = instruction address |
|
576 |
#endif // __CPU_ARM_HAS_CP15_IFAR |
|
577 |
asm("mrcne p15, 0, r0, c6, c0, 0 "); // r0= DFAR fault address |
|
578 |
asm("mrs r2, spsr "); // r2 = spsr_svc |
|
579 |
asm("mov r3, #0 "); // spare word |
|
580 |
// r12 = original R13_svc |
|
581 |
asm("ldr r5, [r4, #%a0]" : : "i" _FOFF(NThread,iHandlers)); // r5 -> SNThreadHandlers |
|
582 |
asm("stmfd sp!, {r0-r3,r12,r14} "); // save FAR, FSR, SPSR_SVC, 0, R13_svc, R14_svc |
|
583 |
||
584 |
USER_MEMORY_GUARD_ON(,r6,r0); |
|
585 |
||
586 |
// Now we can unlock the kernel and process the exception |
|
587 |
asm("bl " CSM_ZN5NKern6UnlockEv ); |
|
588 |
||
589 |
// R4 points to the current thread |
|
590 |
// Get the handler address |
|
591 |
asm("ldr r5, [r5, #%a0]" : : "i" _FOFF(SNThreadHandlers,iExceptionHandler)); // r5 -> exception handler |
|
592 |
||
593 |
// Kernel is now unlocked so we can retrieve the opcode for an undefined instruction trap |
|
594 |
// We might take a page fault doing this but that's OK since the original instruction |
|
595 |
// fetch might have taken a page fault and we no longer have any more locks than were |
|
596 |
// held at that time. |
|
597 |
asm("cmp r7, #%a0 " : : "i" ((TInt)EArmExceptionUndefinedOpcode)); |
|
598 |
asm("beq exc_undef "); |
|
599 |
||
600 |
// call the exception dispatcher |
|
601 |
asm("exc_dispatch: "); |
|
602 |
asm("mov r1, r4 "); // pass address of current thread |
|
603 |
asm("mov r0, sp "); // pass address of TArmExcInfo |
|
604 |
asm("adr lr, exc_return "); |
|
605 |
__JUMP(, r5); // call exception handler |
|
606 |
||
607 |
// Undefined instruction - get the opcode |
|
608 |
// R4->current thread, R8=address of aborted instruction, R9=CPSR at time of abort, SP->TArmExcInfo |
|
609 |
asm("exc_undef: "); |
|
610 |
asm("tst r9, #0x20 "); // THUMB? |
|
611 |
asm("bne exc_undef_thumb "); // branch if so |
|
612 |
asm("tst r9, #0x00800000 "); // J=1 ? |
|
613 |
asm("bne exc_dispatch "); // T=0, J=1 -> dispatch normally |
|
614 |
asm("tst r9, #0x0f "); // ARM - mode_usr ? |
|
615 |
asm("ldrne r0, [r8] "); // If not, get opcode |
|
616 |
USER_MEMORY_GUARD_OFF(eq,r0,r0); |
|
617 |
asm("ldreqt r0, [r8] "); // else get opcode with user permissions |
|
618 |
USER_MEMORY_GUARD_ON(eq,r1,r1); |
|
619 |
asm("str r0, [sp, #%a0]" : : "i" _FOFF(TArmExcInfo,iFaultStatus)); // save opcode |
|
620 |
||
621 |
// ARM opcode in R0 - check for coprocessor or special UNDEF opcode |
|
622 |
// Special undef *7F***F* |
|
623 |
asm("orr r1, r0, #0xF000000F "); // *7F***F* -> F7F***FF |
|
624 |
asm("orr r1, r1, #0x000FF000 "); // *7F***F* -> F7FFF*FF |
|
625 |
asm("orr r1, r1, #0x00000F00 "); // *7F***F* -> F7FFFFFF |
|
626 |
asm("cmn r1, #0x08000001 "); // check |
|
627 |
asm("moveq r1, #32 "); |
|
628 |
asm("beq special_undef_arm "); // branch if special undef |
|
629 |
||
630 |
// Coprocessor *X***N** X=C,D,E N=coprocessor number |
|
631 |
// Advanced SIMD F2****** F3****** F4X***** (X even) |
|
632 |
asm("and r1, r0, #0x0F000000 "); // *C****** -> 0C000000 |
|
633 |
asm("add r1, r1, #0xF4000000 "); // *C****** -> 00000000 |
|
634 |
asm("cmp r1, #0x03000000 "); |
|
635 |
asm("movlo r1, r0, lsr #8 "); |
|
636 |
asm("andlo r1, r1, #0x0f "); // r1 = coprocessor number |
|
637 |
asm("blo undef_coproc_arm "); |
|
638 |
asm("add r1, r0, #0x0E000000 "); // F2****** -> 00****** |
|
639 |
asm("cmp r1, #0x02000000 "); |
|
640 |
asm("blo undef_coproc_arm "); |
|
641 |
asm("cmp r1, #0x03000000 "); |
|
642 |
asm("bhs exc_dispatch "); // if not coproc/AdvSIMD, dispatch normally |
|
643 |
asm("tst r0, #0x00100000 "); |
|
644 |
asm("bne exc_dispatch "); // if not coproc/AdvSIMD, dispatch normally |
|
645 |
asm("mov r1, #16 "); // CP=16 for non-coprocessor AdvSIMD |
|
646 |
asm("b undef_coproc_arm "); |
|
647 |
||
648 |
asm("exc_undef_thumb: "); |
|
649 |
asm("tst r9, #0x0f "); // THUMB - mode_usr ? |
|
650 |
USER_MEMORY_GUARD_OFF(eq,r0,r0); |
|
651 |
asm("ldreqbt r0, [r8], #1 "); // yes - get low 8 bits |
|
652 |
asm("ldreqbt r1, [r8], #1 "); // get high 8 bits |
|
653 |
USER_MEMORY_GUARD_ON(eq,r2,r2); |
|
654 |
asm("ldrneh r0, [r8], #2 "); // no - get first 16 bits of opcode |
|
655 |
asm("orreq r0, r0, r1, lsl #8 "); // user mode - r0 = first 16 bits of opcode |
|
656 |
#ifdef __CPU_THUMB2 |
|
657 |
// must check for a 32 bit instruction and get second half if necessary |
|
658 |
asm("cmp r0, #0xe800 "); |
|
659 |
asm("blo exc_undef_thumb_16 "); // skip if 16 bit |
|
660 |
asm("tst r9, #0x0f "); // mode_usr ? |
|
661 |
USER_MEMORY_GUARD_OFF(eq,r1,r1); |
|
662 |
asm("ldreqbt r1, [r8], #1 "); // yes - get low 8 bits |
|
663 |
asm("ldreqbt r2, [r8], #1 "); // get high 8 bits |
|
664 |
USER_MEMORY_GUARD_ON(eq,r3,r3); |
|
665 |
asm("ldrneh r1, [r8], #2 "); // no - get second 16 bits of opcode |
|
666 |
asm("orreq r1, r1, r2, lsl #8 "); // user mode - r1 = second 16 bits of opcode |
|
667 |
asm("orr r0, r1, r0, lsl #16 "); // first half of opcode into top of R0 |
|
668 |
asm("exc_undef_thumb_16: "); |
|
669 |
#endif |
|
670 |
asm("str r0, [sp, #%a0]" : : "i" _FOFF(TArmExcInfo,iFaultStatus)); // save opcode |
|
671 |
||
672 |
// THUMB opcode in R0 - check for coprocessor operation or special UNDEF opcode |
|
673 |
// Special undef DE**, F7F*A*** |
|
674 |
asm("sub r1, r0, #0xde00 "); |
|
675 |
asm("cmp r1, #0x100 "); |
|
676 |
asm("movlo r1, #33 "); |
|
677 |
asm("blo special_undef_thumb "); // branch if THUMB1 special undef |
|
678 |
asm("orr r1, r0, #0x000000FF "); // F7F*A*** -> F7F*A*FF |
|
679 |
asm("orr r1, r1, #0x00000F00 "); // F7F*A*** -> F7F*AFFF |
|
680 |
asm("orr r1, r1, #0x000F0000 "); // F7F*A*** -> F7FFAFFF |
|
681 |
asm("add r1, r1, #0x00005000 "); // F7F*A*** -> F7FFFFFF |
|
682 |
asm("cmn r1, #0x08000001 "); // check |
|
683 |
asm("moveq r1, #34 "); |
|
684 |
asm("beq special_undef_thumb2 "); // branch if THUMB2 special undef |
|
685 |
||
686 |
// Check for THUMB2 Coprocessor instruction |
|
687 |
// 111x 11yy xxxx xxxx | xxxx nnnn xxxx xxxx nnnn=coprocessor number, yy=00,01,10 |
|
688 |
// 111x 1111 xxxx xxxx | xxxx xxxx xxxx xxxx Advanced SIMD |
|
689 |
// 1111 1001 xxx0 xxxx | xxxx xxxx xxxx xxxx Advanced SIMD |
|
690 |
asm("orr r1, r0, #0x10000000 "); |
|
691 |
asm("cmn r1, #0x01000000 "); |
|
692 |
asm("movcs r1, #16 "); // CP=16 for non-coprocessor AdvSIMD |
|
693 |
asm("bcs undef_coproc_thumb "); |
|
694 |
asm("cmp r1, #0xFC000000 "); |
|
695 |
asm("movcs r1, r0, lsr #8 "); |
|
696 |
asm("andcs r1, r1, #0x0f "); // r1 = coprocessor number |
|
697 |
asm("bcs undef_coproc_thumb "); |
|
698 |
asm("and r1, r0, #0xFF000000 "); |
|
699 |
asm("cmp r1, #0xF9000000 "); |
|
700 |
asm("tsteq r0, #0x00100000 "); |
|
701 |
asm("bne exc_dispatch "); // if not coproc/AdvSIMD, dispatch normally |
|
702 |
asm("mov r1, #16 "); // CP=16 for non-coprocessor AdvSIMD |
|
703 |
||
704 |
asm("special_undef_arm: "); |
|
705 |
asm("special_undef_thumb: "); |
|
706 |
asm("special_undef_thumb2: "); |
|
707 |
asm("undef_coproc_thumb: "); |
|
708 |
asm("undef_coproc_arm: "); |
|
709 |
asm("mov r0, sp "); |
|
710 |
asm("bl " CSM_CFUNC(HandleSpecialOpcode)); |
|
711 |
asm("cmp r0, #0 "); |
|
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
712 |
asm("beq exc_dispatch "); // if not handled, dispatch normally, else return |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
713 |
|
0 | 714 |
// return from exception |
715 |
// R4 points to current thread, R11->TSubScheduler, SP->TArmExcInfo |
|
716 |
asm("exc_return: "); |
|
717 |
__ASM_CLI(); |
|
718 |
asm("ldr r0, [sp, #%a0]" : : "i" _FOFF(TArmExcInfo,iCpsr)); |
|
719 |
asm("ldr r1, [r4, #%a0]" : : "i" _FOFF(NThreadBase,iUserModeCallbacks)); |
|
720 |
asm("mov r9, r4 "); |
|
721 |
asm("tst r0, #0x0f "); // returning to user mode? |
|
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
722 |
asm("bne exc_return2 "); // if not, we don't check locks or run callbacks |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
723 |
|
0 | 724 |
#ifdef __CHECK_LOCK_STATE__ |
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
725 |
asm("bl " CSM_CFUNC(check_lock_state)); |
0 | 726 |
#endif |
727 |
asm("cmp r1, #3 "); // callbacks? |
|
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
728 |
asm("blo exc_return2 "); |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
729 |
asm("stmfd sp!, {r6} "); \ |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
730 |
asm("bl run_user_mode_callbacks "); // run them; NB trashes most registers (R0-R12, R14) |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
731 |
asm("ldmfd sp!, {r6} "); \ |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
732 |
|
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
733 |
asm("exc_return2: "); |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
734 |
RECORD_STATE_EXC(); |
0 | 735 |
USER_MEMORY_GUARD_RESTORE(r6,r0); |
736 |
||
737 |
asm("add r7, sp, #%a0" : : "i" _FOFF(TArmExcInfo,iSpsrSvc)); // r7->saved spsr_svc |
|
738 |
asm("ldmia r7!, {r0-r2,r14} "); // r0=original spsr_svc, r2=original sp_svc, restore lr_svc |
|
739 |
asm("add r6, sp, #%a0" : : "i" _FOFF(TArmExcInfo,iR15)); // r6->saved PC, CPSR |
|
740 |
asm("msr spsr, r0 "); // restore spsr_svc |
|
741 |
asm("ldmia r6, {r0,r1} "); |
|
742 |
asm("stmdb r2!, {r0,r1} "); // move saved PC, CPSR so sp_svc ends up at original place |
|
743 |
asm("str r2, [r6, #-4] "); // overwrite iExcCode with original sp_svc - 8 |
|
744 |
asm("ldmia r7, {r0-r14}^ "); // restore R0-R12, R13_usr, R14_usr |
|
745 |
asm("nop "); // don't touch banked register immediately afterwards |
|
746 |
asm("ldr sp, [sp, #%a0]" : : "i" _FOFF(TArmExcInfo,iExcCode)); // R13_svc = original R13_svc - 8 |
|
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
747 |
USER_MEMORY_GUARD_CHECK(); // check UMG is off if returning to user mode |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
748 |
RFEIAW(13); // restore PC and CPSR - return from Exec function |
0 | 749 |
|
750 |
// get here if exception occurred in mode other than usr or svc |
|
751 |
// we are in mode_abt or mode_und with IRQs disabled |
|
752 |
// R0=original CPSR R10->saved registers on exception stack R11->TSubScheduler |
|
753 |
// R12=processor mode of exception (abt/und) |
|
754 |
asm("fatal_exception_mode: "); |
|
755 |
asm("ldr r2, __TheScheduler "); |
|
756 |
asm("ldr lr, [r2, #%a0]" : : "i" _FOFF(TScheduler,iMonitorExceptionHandler)); |
|
757 |
asm("cmp lr, #0 "); |
|
758 |
__JUMP(ne, lr); // if crash debugger running, let it handle exception |
|
759 |
||
760 |
// get here if mode_svc stack has overflowed |
|
761 |
// we are in mode_svc with interrupts enabled and the kernel locked |
|
762 |
// R0=original CPSR R10->saved registers on exception stack R11->TSubScheduler |
|
763 |
// R12=processor mode of exception (abt/und) |
|
764 |
asm("fatal_exception_stack: "); |
|
765 |
asm("orr r3, r12, #0xC0 "); |
|
766 |
asm("msr cpsr, r3 "); // back to exception mode, all interrupts off |
|
767 |
asm("mov r2, r0 "); |
|
768 |
asm("cmp r11, #0 "); |
|
769 |
asm("ldreq r11, __SS0 "); |
|
770 |
asm("ldr r0, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,i_Regs)); // pass in address of stored registers |
|
771 |
asm("cmp r0, #0 "); |
|
772 |
asm("ldreq r0, __DefaultRegs "); |
|
773 |
asm("bl " CSM_ZN3Arm9SaveStateER14SFullArmRegSet ); |
|
774 |
asm("ldmia sp!, {r4-r9} "); // get original R0-R5 |
|
775 |
asm("stmia r0!, {r4-r9} "); // save original R0-R5 |
|
776 |
asm("ldmia sp!, {r4-r9} "); // get original R6-R11 |
|
777 |
asm("stmia r0!, {r4-r9} "); // save original R6-R11 |
|
778 |
asm("ldmia sp!, {r4-r9} "); // get original R12 R13_usr R14_usr iExcCode PC CPSR |
|
779 |
asm("stmia r0!, {r4-r6} "); // save original R12 R13_usr R14_usr |
|
780 |
asm("sub r0, r0, #60 "); // R0 back to where it was (6+6+3 = 15 words saved) |
|
781 |
asm("str r7, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet,iExcCode)); |
|
782 |
asm("str r8, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet,iN.iR15)); |
|
783 |
asm("str r9, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet,iN.iFlags)); |
|
784 |
asm("mov r1, #13 "); // r1 = regnum |
|
785 |
asm("mrs r2, cpsr "); // r2 = mode |
|
786 |
asm("mov r4, r0 "); |
|
787 |
asm("bl " CSM_ZN3Arm3RegER14SFullArmRegSetim ); // r0 = pointer to exception mode R13 |
|
788 |
asm("str sp, [r0] "); // save correct original value for exception mode R13 |
|
789 |
||
790 |
// call the exception fault dispatcher |
|
791 |
asm("mov r0, #0 "); |
|
792 |
asm("b ExcFault "); |
|
793 |
||
794 |
asm("__SS0: "); |
|
795 |
asm(".word %a0" : : "i" ((TInt)&TheSubSchedulers[0])); |
|
796 |
asm("__DefaultRegs: "); |
|
797 |
asm(".word %a0" : : "i" ((TInt)&DefaultRegSet)); |
|
798 |
} |
|
799 |
||
800 |
extern "C" __NAKED__ void __ArmVectorAbortPrefetch() |
|
801 |
{ |
|
802 |
__ASM_CLI(); // disable all interrupts |
|
803 |
asm("sub lr, lr, #4"); // lr now points to instruction whose prefetch was aborted |
|
804 |
SRSDBW( MODE_ABT); // save it along with aborted CPSR |
|
805 |
asm("sub sp, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR15)); |
|
806 |
asm("stmia sp, {r0-r14}^ "); // save R0-R12, R13_usr, R14_usr |
|
807 |
GET_RWNO_TID(,r11); |
|
808 |
asm("mov r1, #%a0 " : : "i" ((TInt)EArmExceptionPrefetchAbort)); |
|
809 |
asm("str r1, [sp, #%a0]" : : "i" _FOFF(SThreadExcStack,iExcCode)); // word describing exception type |
|
810 |
asm("b handle_exception "); |
|
811 |
} |
|
812 |
||
813 |
extern "C" __NAKED__ void __ArmVectorUndef() |
|
814 |
{ |
|
815 |
__ASM_CLI(); // disable all interrupts |
|
816 |
asm("sub lr, lr, #4"); // lr now points to undefined instruction |
|
817 |
SRSDBW( MODE_UND); // save it along with aborted CPSR |
|
818 |
asm("sub sp, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR15)); |
|
819 |
asm("stmia sp, {r0-r14}^ "); // save R0-R12, R13_usr, R14_usr |
|
820 |
GET_RWNO_TID(,r11); |
|
821 |
asm("mov r1, #%a0 " : : "i" ((TInt)EArmExceptionUndefinedOpcode)); |
|
822 |
asm("str r1, [sp, #%a0]" : : "i" _FOFF(SThreadExcStack,iExcCode)); // word describing exception type |
|
823 |
asm("mrs r0, spsr "); // r0=CPSR at time of exception |
|
824 |
asm("tst r0, #0x20 "); // exception in THUMB mode? |
|
825 |
asm("addne lr, lr, #2 "); // if so, correct saved return address |
|
826 |
asm("strne lr, [sp, #%a0]" : : "i" _FOFF(SThreadExcStack,iR15)); |
|
827 |
asm("b handle_exception "); |
|
828 |
} |
|
829 |
||
830 |
/****************************************************************************** |
|
831 |
* Kick other CPUs as necessary to process TGenericIPI |
|
832 |
******************************************************************************/ |
|
833 |
extern "C" __NAKED__ void send_generic_ipis(TUint32 /*aMask*/) |
|
834 |
{ |
|
835 |
asm("movs r0, r0, lsl #16 "); // CPU mask into bits 16-23 - any bits set in aMask? |
|
836 |
GET_RWNO_TID(ne,r3); |
|
837 |
asm("ldrne r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr)); // we assume i_GicDistAddr is the same for all CPUs |
|
838 |
__DATA_SYNC_BARRIER_Z__(r1); // need DSB before sending any IPI |
|
839 |
asm("orrne r0, r0, #%a0" : : "i" ((TInt)GENERIC_IPI_VECTOR)); |
|
840 |
asm("strne r0, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq)); // trigger IPIs if any |
|
841 |
__JUMP(,lr); |
|
842 |
} |
|
843 |
||
844 |
/****************************************************************************** |
|
845 |
* Handle a crash IPI |
|
846 |
* Enter in mode_sys or mode_fiq |
|
847 |
* If in mode_sys, R7 = nest count, in which case: |
|
848 |
* If R7>0 nested IRQ so mode_sys stack contains R0...R12 R14sys PC CPSR |
|
849 |
* If R7=0 first IRQ, R5 points to top of mode_svc stack, which contains |
|
850 |
* R0...R12 R13usr R14usr iExcCode PC CPSR |
|
851 |
* If in mode_fiq, FIQ stack contains R0...R7 R8usr...R14usr iExcCode PC CPSR |
|
852 |
******************************************************************************/ |
|
853 |
extern "C" __NAKED__ void handle_crash_ipi() |
|
854 |
{ |
|
855 |
GET_RWNO_TID(,r0); |
|
856 |
asm("ldr r0, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_Regs)); |
|
857 |
asm("ldr r0, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet, iExcCode)); |
|
858 |
asm("cmp r0, #0 "); |
|
859 |
asm("bge state_already_saved "); // skip if this CPU has already saved its state (i.e. already crashed) |
|
860 |
GET_RWNO_TID(,r0); |
|
861 |
asm("ldr r0, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_Regs)); |
|
862 |
asm("bl " CSM_ZN3Arm9SaveStateER14SFullArmRegSet ); // save machine state (NOTE: R0 trashed) |
|
863 |
asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet, iN.iFlags)); // mode on entry |
|
864 |
asm("and r1, r1, #0x1f "); |
|
865 |
asm("cmp r1, #0x11 "); // mode_fiq? |
|
866 |
asm("ldreq r1, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet, iN.iR13Fiq)); // yes - take registers from FIQ stack |
|
867 |
asm("beq 1f "); |
|
868 |
asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet, iN.iR7)); // nest count |
|
869 |
asm("cmp r1, #0 "); // nested? |
|
870 |
asm("ldreq r1, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet, iN.iR5)); // no - take registers from SVC stack (R5 points to it) |
|
871 |
asm("beq 2f "); |
|
872 |
asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet, iN.iR13)); // nested - take R0...R12 R14usr PC CPSR from mode_sys stack |
|
873 |
asm("ldmia r1!, {r2-r11} "); |
|
874 |
asm("stmia r0!, {r2-r11} "); // save original R0-R9 |
|
875 |
asm("ldmia r1!, {r2-r7} "); // R2=original R10, R3=orig R11, R4=orig R12 R5=orig R14usr R6=orig PC R7=orig CPSR |
|
876 |
asm("stmia r0!, {r2-r4} "); // save original R10-R12 |
|
877 |
asm("stmia r0!, {r1,r5,r6,r7} "); // save original R13usr, R14usr, PC, CPSR |
|
878 |
asm("sub r0, r0, #68 "); // R0 back to i_Regs |
|
879 |
asm("mov r4, r0 "); |
|
880 |
asm("b 0f "); |
|
881 |
||
882 |
asm("1: "); // R1 points to R0...R12 R13usr R14usr iExcCode PC CPSR |
|
883 |
asm("ldmia r1!, {r2-r11} "); |
|
884 |
asm("stmia r0!, {r2-r11} "); // save original R0-R9 |
|
885 |
asm("ldmia r1!, {r2-r9} "); // R2=original R10, R3=orig R11, R4=orig R12 R5=orig R13usr R6=orig R14usr R8=orig PC R9=orig CPSR |
|
886 |
asm("stmia r0!, {r2-r6,r8,r9} "); // save original R10-R12 R13usr R14usr PC CPSR |
|
887 |
asm("sub r0, r0, #68 "); // R0 back to i_Regs |
|
888 |
asm("mov r4, r0 "); |
|
889 |
asm("str r1, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet, iN.iR13Fiq)); // save original R13Fiq |
|
890 |
asm("b 0f "); |
|
891 |
||
892 |
asm("2: "); // R1 points to R0...R12 R13usr R14usr iExcCode PC CPSR |
|
893 |
asm("ldmia r1!, {r2-r11} "); |
|
894 |
asm("stmia r0!, {r2-r11} "); // save original R0-R9 |
|
895 |
asm("ldmia r1!, {r2-r9} "); // R2=original R10, R3=orig R11, R4=orig R12 R5=orig R13usr R6=orig R14usr R8=orig PC R9=orig CPSR |
|
896 |
asm("stmia r0!, {r2-r6,r8,r9} "); // save original R10-R12 R13usr R14usr PC CPSR |
|
897 |
asm("sub r0, r0, #68 "); // R0 back to i_Regs |
|
898 |
asm("mov r4, r0 "); |
|
899 |
asm("str r1, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet, iN.iR13Svc)); // restore original R13Svc |
|
900 |
||
901 |
asm("0: "); |
|
902 |
asm("state_already_saved: "); |
|
903 |
__DATA_SYNC_BARRIER_Z__(r6); |
|
904 |
||
905 |
USER_MEMORY_GUARD_OFF(,r0,r0); |
|
906 |
asm("mov r0, #0 "); |
|
907 |
asm("mov r1, #0 "); |
|
908 |
asm("mov r2, #0 "); |
|
909 |
asm("bl NKCrashHandler "); // call NKCrashHandler(0,0,0) |
|
910 |
||
911 |
__DATA_SYNC_BARRIER__(r6); |
|
912 |
GET_RWNO_TID(,r0); |
|
913 |
asm("ldr r7, __CrashStateOut "); |
|
914 |
asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuMask)); |
|
915 |
asm("7: "); |
|
916 |
LDREX(1,7); |
|
917 |
asm("bic r1, r1, r2 "); |
|
918 |
STREX(3,1,7); // atomic { CrashStateOut &= ~iCpuMask; } |
|
919 |
asm("cmp r3, #0 "); |
|
920 |
asm("bne 7b "); |
|
921 |
asm("1: "); |
|
922 |
ARM_WFE; |
|
923 |
asm("b 1b "); // all done, just wait to be reset |
|
924 |
||
925 |
asm("__CrashStateOut: "); |
|
926 |
asm(".word CrashStateOut "); |
|
927 |
} |
|
928 |
||
929 |
||
930 |
/****************************************************************************** |
|
931 |
* Run TUserModeCallbacks when a thread is about to return to user mode |
|
932 |
* |
|
933 |
* On entry: |
|
934 |
* CPU in mode_svc, interrupts disabled, kernel unlocked, thread not in CS |
|
935 |
* R9 points to current NThread |
|
936 |
* We know there is at least one callback on the list |
|
937 |
* Stack not necessarily 8 byte aligned |
|
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
938 |
* User memory guards on (if in use) |
0 | 939 |
* On return: |
940 |
* CPU in mode_svc, interrupts disabled, kernel unlocked, thread not in CS |
|
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
941 |
* No TUserModeCallbacks outstanding at the point where interrupts were disabled. |
0 | 942 |
* R0-R12,R14 modified |
943 |
******************************************************************************/ |
|
944 |
extern "C" __NAKED__ void DoRunUserModeCallbacks() |
|
945 |
{ |
|
946 |
asm(".global run_user_mode_callbacks "); |
|
947 |
asm("run_user_mode_callbacks: "); |
|
948 |
||
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
949 |
USER_MEMORY_GUARD_ASSERT_ON(r12); |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
950 |
|
0 | 951 |
#ifdef __CHECK_LOCK_STATE__ |
952 |
asm("ldr r0, [r9, #%a0]" : : "i" _FOFF(NThreadBase,iCsCount)); |
|
953 |
asm("cmp r0, #0 "); |
|
954 |
asm("beq 0f "); |
|
955 |
__ASM_CRASH(); |
|
956 |
#endif |
|
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
957 |
|
0 | 958 |
asm("0: "); |
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
959 |
__ASM_STI(); // enable interrupts |
0 | 960 |
asm("mov r10, sp "); // save stack pointer |
961 |
asm("mov r11, lr "); // save return address |
|
962 |
asm("add r8, r9, #%a0" : : "i" _FOFF(NThreadBase,iUserModeCallbacks)); |
|
963 |
asm("mov r0, #1 "); // shouldn't have been in CS to begin with |
|
964 |
asm("bic sp, sp, #4 "); // align stack to 8 byte boundary |
|
965 |
asm("str r0, [r9, #%a0]" : : "i" _FOFF(NThreadBase,iCsCount)); // EnterCS() |
|
966 |
||
967 |
asm("1: "); |
|
968 |
LDREX( 7,8); // r7 = iUserModeCallbacks |
|
969 |
asm("mov r6, #0 "); |
|
970 |
STREX( 12,6,8); // iUserModeCallbacks = 0 if not changed |
|
971 |
asm("cmp r12, #0 "); |
|
972 |
asm("bne 1b "); |
|
973 |
__DATA_MEMORY_BARRIER__(r6); |
|
974 |
||
975 |
asm("2: "); |
|
976 |
asm("movs r0, r7 "); // r0 = pointer to callback |
|
977 |
asm("beq 3f "); // branch out if reached end of list |
|
978 |
asm("ldmia r7, {r7, r12} "); // r7 = callback->iNext, r12 = callback->iFunc |
|
979 |
asm("mov r1, #%a0" : : "i" ((TInt)KUserModeCallbackUnqueued)); |
|
980 |
asm("str r1, [r0, #0] "); // callback->iNext = 1 |
|
981 |
__DATA_MEMORY_BARRIER__(r6); |
|
982 |
asm("adr lr, 2b "); // return to beginning of loop |
|
983 |
asm("mov r1, #%a0" : : "i" ((TInt)EUserModeCallbackRun)); |
|
984 |
__JUMP(, r12); // (*callback->iFunc)(callback, EUserModeCallbackRun); |
|
985 |
||
986 |
asm("3: "); |
|
987 |
__ASM_CLI(); // turn off interrupts |
|
988 |
__DATA_MEMORY_BARRIER__(r6); |
|
989 |
asm("ldr r0, [r9, #%a0]" : : "i" _FOFF(NThreadBase,iCsFunction)); |
|
990 |
asm("ldr r1, [r8] "); |
|
991 |
asm("cmp r0, #0 "); // anything to do in LeaveCS() ? |
|
992 |
asm("bne 5f "); // if yes, jump to slow path |
|
993 |
asm("cmp r1, #0 "); // no - any more callbacks? |
|
994 |
asm("bne 4f "); |
|
995 |
||
996 |
// no more callbacks, no CsFunction so just LeaveCS() and return |
|
997 |
asm("str r6, [r9, #%a0]" : : "i" _FOFF(NThreadBase,iCsCount)); |
|
998 |
asm("mov sp, r10 "); // restore stack pointer |
|
999 |
__JUMP(, r11); |
|
1000 |
||
1001 |
// more callbacks have been queued so loop round and do them |
|
1002 |
asm("4: "); |
|
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1003 |
__ASM_STI(); // enable interrupts |
0 | 1004 |
asm("b 1b "); |
1005 |
||
1006 |
// CsFunction outstanding so do it |
|
1007 |
asm("5: "); |
|
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1008 |
__ASM_STI(); // enable interrupts |
0 | 1009 |
asm("bl ThreadLeaveCS__5NKern "); |
1010 |
__ASM_CLI(); // turn off interrupts |
|
1011 |
__DATA_MEMORY_BARRIER__(r6); |
|
1012 |
asm("ldr r1, [r8] "); |
|
1013 |
asm("mov sp, r10 "); |
|
1014 |
asm("mov lr, r11 "); |
|
1015 |
asm("cmp r1, #0 "); // any more callbacks queued? |
|
1016 |
asm("bne 0b "); // yes - go right back to the beginning and do them |
|
1017 |
__JUMP(, r11); // else return |
|
1018 |
} |
|
1019 |
||
1020 |
||
1021 |