author | William Roberts <williamr@symbian.org> |
Mon, 28 Jun 2010 11:25:30 +0100 | |
branch | GCC_SURGE |
changeset 184 | 0e2270015475 |
parent 31 | 56f325a607ea |
permissions | -rw-r--r-- |
0 | 1 |
// Copyright (c) 1996-2009 Nokia Corporation and/or its subsidiary(-ies). |
2 |
// All rights reserved. |
|
3 |
// This component and the accompanying materials are made available |
|
4 |
// under the terms of the License "Eclipse Public License v1.0" |
|
5 |
// which accompanies this distribution, and is available |
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 |
// |
|
8 |
// Initial Contributors: |
|
9 |
// Nokia Corporation - initial contribution. |
|
10 |
// |
|
11 |
// Contributors: |
|
12 |
// |
|
13 |
// Description: |
|
14 |
// e32\kernel\arm\cexec.cia |
|
15 |
// |
|
16 |
// |
|
17 |
||
18 |
#include <e32cia.h> |
|
19 |
#include <arm.h> |
|
20 |
#include <kernel/cache.h> |
|
21 |
||
22 |
#include "nk_priv.h" |
|
23 |
||
24 |
GLREF_C TInt CalcKernelHeapUsed(); |
|
25 |
GLREF_C void InvalidFastExec(); |
|
26 |
||
27 |
void GetLatencyValues(TInt aMode, TInt& aCount, TInt* aDest); |
|
28 |
void KernMsgTest(); |
|
29 |
void InvalidExecHandler(); |
|
30 |
void PreprocessHandler(); |
|
31 |
||
32 |
#define __GEN_KERNEL_EXEC_CODE__ |
|
33 |
||
34 |
#include "execs.h" |
|
35 |
||
36 |
/*********************************************************************************** |
|
37 |
* User-side executive handlers |
|
38 |
***********************************************************************************/ |
|
39 |
||
40 |
#ifdef __ATOMIC64_USE_FAST_EXEC__ |
|
41 |
__NAKED__ void ExecHandler::FastAtomicAxo64(SAtomicOpInfo64*) |
|
42 |
{ |
|
43 |
USER_MEMORY_GUARD_OFF(,r12,r12); |
|
44 |
asm("ldrt r12, [r0], #8 "); // r12 = a |
|
45 |
asm("stmfd sp!, {r4-r7} "); |
|
46 |
asm("ldrt r4, [r0], #4 "); // r5:r4 = u |
|
47 |
asm("ldrt r5, [r0], #4 "); |
|
48 |
asm("ldrt r2, [r12], #4 "); // r3:r2 = *a = oldv |
|
49 |
asm("ldrt r3, [r12], #-4 "); |
|
50 |
asm("ldrt r6, [r0], #4 "); // r7:r6 = v |
|
51 |
asm("ldrt r7, [r0], #-20 "); |
|
52 |
asm("strt r2, [r0], #4 "); // return oldv |
|
53 |
asm("strt r3, [r0], #4 "); |
|
54 |
asm("and r2, r2, r4 "); |
|
55 |
asm("and r3, r3, r5 "); // r3:r2 = oldv & u |
|
56 |
asm("eor r2, r2, r6 "); |
|
57 |
asm("eor r3, r3, r7 "); // r3:r2 = (oldv & u) ^ v |
|
58 |
asm("strt r2, [r12], #4 "); // write back to *a |
|
59 |
asm("strt r3, [r12], #-4 "); |
|
60 |
asm("ldmfd sp!, {r4-r7} "); |
|
61 |
USER_MEMORY_GUARD_ON(,r12,r12); |
|
62 |
__JUMP(,lr); |
|
63 |
} |
|
64 |
||
65 |
__NAKED__ TBool ExecHandler::FastAtomicCas64(SAtomicOpInfo64*) |
|
66 |
{ |
|
67 |
USER_MEMORY_GUARD_OFF(,r12,r12); |
|
68 |
asm("ldrt r12, [r0], #4 "); // r12 = a |
|
69 |
asm("ldrt r1, [r0], #4 "); // r1 = q |
|
70 |
asm("stmfd sp!, {r4-r5} "); |
|
71 |
asm("ldrt r2, [r12], #4 "); // r3:r2 = *a |
|
72 |
asm("ldrt r3, [r12], #-4 "); |
|
73 |
asm("ldrt r4, [r1], #4 "); // r5:r4 = *q |
|
74 |
asm("ldrt r5, [r1], #-4 "); |
|
75 |
asm("cmp r2, r4 "); |
|
76 |
asm("cmpeq r3, r5 "); |
|
77 |
asm("ldreqt r2, [r0], #4 "); // if equal r3:r2 = v |
|
78 |
asm("ldreqt r3, [r0], #4 "); |
|
79 |
asm("strnet r2, [r1], #4 "); // if not equal *q = *a |
|
80 |
asm("strnet r3, [r1], #-4 "); |
|
81 |
asm("streqt r2, [r12], #4 "); // if equal *a = v |
|
82 |
asm("streqt r3, [r12], #-4 "); |
|
83 |
asm("ldmfd sp!, {r4-r5} "); |
|
84 |
asm("movne r0, #0 "); |
|
85 |
asm("moveq r0, #1 "); |
|
86 |
USER_MEMORY_GUARD_ON(,r12,r12); |
|
87 |
__JUMP(,lr); |
|
88 |
} |
|
89 |
||
90 |
__NAKED__ void ExecHandler::FastAtomicAdd64(SAtomicOpInfo64*) |
|
91 |
{ |
|
92 |
USER_MEMORY_GUARD_OFF(,r12,r12); |
|
93 |
asm("ldrt r12, [r0], #8 "); // r12 = a |
|
94 |
asm("stmfd sp!, {r4-r5} "); |
|
95 |
asm("ldrt r2, [r12], #4 "); // r3:r2 = *a = oldv |
|
96 |
asm("ldrt r3, [r12], #-4 "); |
|
97 |
asm("ldrt r4, [r0], #4 "); // r5:r4 = v |
|
98 |
asm("ldrt r5, [r0], #-12 "); |
|
99 |
asm("strt r2, [r0], #4 "); // return oldv |
|
100 |
asm("strt r3, [r0], #-4 "); |
|
101 |
asm("adds r2, r2, r4 "); // r3:r2 = oldv + v |
|
102 |
asm("adcs r3, r3, r5 "); |
|
103 |
asm("strt r2, [r12], #4 "); // write back to *a |
|
104 |
asm("strt r3, [r12], #-4 "); |
|
105 |
asm("ldmfd sp!, {r4-r5} "); |
|
106 |
USER_MEMORY_GUARD_ON(,r12,r12); |
|
107 |
__JUMP(,lr); |
|
108 |
} |
|
109 |
||
110 |
__NAKED__ void ExecHandler::FastAtomicTau64(SAtomicOpInfo64*) |
|
111 |
{ |
|
112 |
USER_MEMORY_GUARD_OFF(,r12,r12); |
|
113 |
asm("ldrt r12, [r0] "); // r12 = a |
|
114 |
asm("stmfd sp!, {r4-r5} "); |
|
115 |
asm("ldrt r2, [r12], #4 "); // r3:r2 = *a = oldv |
|
116 |
asm("ldrt r3, [r12], #-4 "); |
|
117 |
asm("strt r2, [r0], #4 "); // return oldv |
|
118 |
asm("strt r3, [r0], #4 "); |
|
119 |
asm("ldrt r4, [r0], #4 "); // r5:r4 = t |
|
120 |
asm("ldrt r5, [r0], #4 "); |
|
121 |
asm("cmp r2, r4 "); // oldv - t |
|
122 |
asm("sbcs r1, r3, r5 "); |
|
123 |
asm("addcc r0, r0, #8 "); // if oldv<t r0->v else r0->u |
|
124 |
asm("ldrt r4, [r0], #4 "); // r5:r4 = u or v |
|
125 |
asm("ldrt r5, [r0], #4 "); |
|
126 |
asm("adds r2, r2, r4 "); // r3:r2 = oldv + u or v |
|
127 |
asm("adcs r3, r3, r5 "); |
|
128 |
asm("strt r2, [r12], #4 "); // write back to *a |
|
129 |
asm("strt r3, [r12], #-4 "); |
|
130 |
asm("ldmfd sp!, {r4-r5} "); |
|
131 |
USER_MEMORY_GUARD_ON(,r12,r12); |
|
132 |
__JUMP(,lr); |
|
133 |
} |
|
134 |
||
135 |
__NAKED__ void ExecHandler::FastAtomicTas64(SAtomicOpInfo64*) |
|
136 |
{ |
|
137 |
USER_MEMORY_GUARD_OFF(,r12,r12); |
|
138 |
asm("ldrt r12, [r0] "); // r12 = a |
|
139 |
asm("stmfd sp!, {r4-r5} "); |
|
140 |
asm("ldrt r2, [r12], #4 "); // r3:r2 = *a = oldv |
|
141 |
asm("ldrt r3, [r12], #-4 "); |
|
142 |
asm("strt r2, [r0], #4 "); // return oldv |
|
143 |
asm("strt r3, [r0], #4 "); |
|
144 |
asm("ldrt r4, [r0], #4 "); // r5:r4 = t |
|
145 |
asm("ldrt r5, [r0], #4 "); |
|
146 |
asm("cmp r2, r4 "); // oldv - t |
|
147 |
asm("sbcs r1, r3, r5 "); |
|
148 |
asm("addlt r0, r0, #8 "); // if oldv<t r0->v else r0->u |
|
149 |
asm("ldrt r4, [r0], #4 "); // r5:r4 = u or v |
|
150 |
asm("ldrt r5, [r0], #4 "); |
|
151 |
asm("adds r2, r2, r4 "); // r3:r2 = oldv + u or v |
|
152 |
asm("adcs r3, r3, r5 "); |
|
153 |
asm("strt r2, [r12], #4 "); // write back to *a |
|
154 |
asm("strt r3, [r12], #-4 "); |
|
155 |
asm("ldmfd sp!, {r4-r5} "); |
|
156 |
USER_MEMORY_GUARD_ON(,r12,r12); |
|
157 |
__JUMP(,lr); |
|
158 |
} |
|
159 |
#endif |
|
160 |
||
161 |
#ifdef __ATOMIC64_USE_SLOW_EXEC__ |
|
162 |
#ifndef __CPU_ARM_HAS_CPS |
|
163 |
#error Only need slow exec atomics on ARMv6 |
|
164 |
#endif |
|
165 |
||
166 |
||
167 |
// Attempt to load and store the lsb of data. If interrupts are re-enabled |
|
168 |
// during the ldr or str then a page fault occurred so retry. |
|
169 |
// The exception handler prevents the strt instruction writing old data to |
|
170 |
// memory by skipping that instruction if a page fault occurs. |
|
171 |
// Note only need to ldr and str one word as the values will be 8 byte |
|
172 |
// aligned and therefore won't span page boundaries. |
|
173 |
#define ENSURE_PAGED_IN_64(rAddr, rLsbs, rMsbs, rCpsr, tag) \ |
|
174 |
asm("retry_atomic64_"#tag": "); \ |
|
175 |
CPSIDAIF; \ |
|
176 |
asm(".global magic_atomic64_ldrt_"#tag" "); \ |
|
177 |
asm("magic_atomic64_ldrt_"#tag": "); \ |
|
178 |
asm("ldrt r"#rLsbs", [r"#rAddr"]"); \ |
|
179 |
asm("mrs r"#rCpsr", cpsr "); \ |
|
180 |
asm("and r"#rCpsr", r"#rCpsr", #%a0 " : : "i" ((TInt)KAllInterruptsMask)); \ |
|
181 |
asm("cmp r"#rCpsr", #%a0 " : : "i" ((TInt)KAllInterruptsMask)); \ |
|
182 |
asm("bne retry_atomic64_"#tag" "); \ |
|
183 |
asm(".global magic_atomic64_strt_"#tag" "); \ |
|
184 |
asm("magic_atomic64_strt_"#tag": "); \ |
|
185 |
asm("strt r"#rLsbs", [r"#rAddr"], #4 "); \ |
|
186 |
asm("mrs r"#rCpsr", cpsr "); \ |
|
187 |
asm("and r"#rCpsr", r"#rCpsr", #%a0 " : : "i" ((TInt)KAllInterruptsMask)); \ |
|
188 |
asm("cmp r"#rCpsr", #%a0 " : : "i" ((TInt)KAllInterruptsMask)); \ |
|
189 |
asm("bne retry_atomic64_"#tag" "); \ |
|
190 |
asm("ldrt r"#rMsbs", [r"#rAddr"], #-4") |
|
191 |
||
192 |
||
193 |
__NAKED__ void ExecHandler::SlowAtomicAxo64(SAtomicOpInfo64*) |
|
194 |
{ |
|
195 |
USER_MEMORY_GUARD_OFF(,r12,r12); |
|
196 |
asm("stmfd sp!, {r4-r8} "); |
|
197 |
asm("ldrt r12, [r0], #8 "); // r12 = a |
|
198 |
asm("ldrt r4, [r0], #4 "); // r5:r4 = u |
|
199 |
asm("ldrt r5, [r0], #4 "); |
|
200 |
asm("ldrt r6, [r0], #4 "); // r7:r6 = v |
|
201 |
asm("ldrt r7, [r0], #-20 "); |
|
202 |
||
203 |
// Disable interrupts and ensure the 64-bit data is paged in with write permissions. |
|
204 |
ENSURE_PAGED_IN_64(12,2,3,8,axo); |
|
205 |
// Data paged in so perform the operation. |
|
206 |
asm("and r4, r2, r4 "); |
|
207 |
asm("and r5, r3, r5 "); // r5:r4 = oldv & u |
|
208 |
asm("eor r4, r4, r6 "); |
|
209 |
asm("eor r5, r5, r7 "); // r5:r4 = (oldv & u) ^ v |
|
210 |
asm("strt r4, [r12], #4 "); // write back to *a |
|
211 |
asm("strt r5, [r12], #-4 "); |
|
212 |
CPSIEIF; |
|
213 |
asm("strt r2, [r0], #4 "); // return oldv |
|
214 |
asm("strt r3, [r0], #-4 "); |
|
215 |
asm("ldmfd sp!, {r4-r8} "); |
|
216 |
USER_MEMORY_GUARD_ON(,r12,r12); |
|
217 |
__JUMP(,lr); |
|
218 |
} |
|
219 |
||
220 |
__NAKED__ TBool ExecHandler::SlowAtomicCas64(SAtomicOpInfo64*) |
|
221 |
{ |
|
222 |
USER_MEMORY_GUARD_OFF(,r12,r12); |
|
223 |
asm("stmfd sp!, {r4-r8} "); |
|
224 |
asm("ldrt r12, [r0], #4 "); // r12 = a |
|
225 |
asm("ldrt r1, [r0], #4 "); // r1 = q |
|
226 |
asm("ldrt r4, [r0], #4 "); // r5:r4 = v |
|
227 |
asm("ldrt r5, [r0], #-12 "); |
|
228 |
asm("ldrt r6, [r1], #4 "); // r7:r6 = *q |
|
229 |
asm("ldrt r7, [r1], #-4 "); |
|
230 |
||
231 |
// Disable interrupts and ensure the 64-bit data is paged in with write permissions. |
|
232 |
ENSURE_PAGED_IN_64(12,2,3,8,cas); |
|
233 |
// Data paged in so perform the operation. |
|
234 |
asm("cmp r2, r6 "); |
|
235 |
asm("cmpeq r3, r7 "); |
|
236 |
asm("streqt r4, [r12], #4 "); // if oldv==*q, *a=v |
|
237 |
asm("streqt r5, [r12], #-4 "); |
|
238 |
CPSIEIF; |
|
239 |
asm("strnet r2, [r1], #4 "); // if oldv!=*q, *q=oldv |
|
240 |
asm("strnet r3, [r1], #-4 "); |
|
241 |
asm("ldmfd sp!, {r4-r8} "); |
|
242 |
asm("movne r0, #0 "); // return 0 if oldv!=*q |
|
243 |
asm("moveq r0, #1 "); // return 1 if oldv==*q |
|
244 |
USER_MEMORY_GUARD_ON(,r12,r12); |
|
245 |
__JUMP(,lr); |
|
246 |
} |
|
247 |
||
248 |
__NAKED__ void ExecHandler::SlowAtomicAdd64(SAtomicOpInfo64*) |
|
249 |
{ |
|
250 |
USER_MEMORY_GUARD_OFF(,r12,r12); |
|
251 |
asm("stmfd sp!, {r4-r6} "); |
|
252 |
asm("ldrt r12, [r0], #8 "); // r12 = a |
|
253 |
asm("ldrt r4, [r0], #4 "); // r5:r4 = v |
|
254 |
asm("ldrt r5, [r0], #-12 "); |
|
255 |
||
256 |
// Disable interrupts and ensure the 64-bit data is paged in with write permissions. |
|
257 |
ENSURE_PAGED_IN_64(12,2,3,6,add); |
|
258 |
// Data paged in so perform the operation. |
|
259 |
asm("adds r4, r2, r4 "); |
|
260 |
asm("adcs r5, r3, r5 "); // r5:r4 = oldv + v |
|
261 |
asm("strt r4, [r12], #4 "); // write back to *a |
|
262 |
asm("strt r5, [r12], #-4 "); |
|
263 |
CPSIEIF; |
|
264 |
asm("strt r2, [r0], #4 "); // return oldv |
|
265 |
asm("strt r3, [r0], #-4 "); |
|
266 |
asm("ldmfd sp!, {r4-r6} "); |
|
267 |
USER_MEMORY_GUARD_ON(,r12,r12); |
|
268 |
__JUMP(,lr); |
|
269 |
} |
|
270 |
||
271 |
__NAKED__ void ExecHandler::SlowAtomicTau64(SAtomicOpInfo64*) |
|
272 |
{ |
|
273 |
USER_MEMORY_GUARD_OFF(,r12,r12); |
|
274 |
asm("stmfd sp!, {r4-r10} "); |
|
275 |
asm("ldrt r12, [r0], #8 "); // r12 = a |
|
276 |
asm("ldrt r4, [r0], #4 "); // r5:r4 = t |
|
277 |
asm("ldrt r5, [r0], #4 "); |
|
278 |
asm("ldrt r6, [r0], #4 "); // r7:r6 = u |
|
279 |
asm("ldrt r7, [r0], #4 "); |
|
280 |
asm("ldrt r8, [r0], #4 "); // r9:r8 = v |
|
281 |
asm("ldrt r9, [r0], #-28 "); |
|
282 |
||
283 |
// Disable interrupts and ensure the 64-bit data is paged in with write permissions. |
|
284 |
ENSURE_PAGED_IN_64(12,2,3,10,tau); |
|
285 |
// Data paged in so perform the operation. |
|
286 |
asm("cmp r2, r4 "); |
|
287 |
asm("sbcs r1, r3, r5 "); // oldv - t |
|
288 |
asm("movcc r6, r8 "); // if oldv<t r7:r6=v |
|
289 |
asm("movcc r7, r9 "); |
|
290 |
asm("adds r4, r2, r6 "); // r5:r4 = oldv + u or v |
|
291 |
asm("adcs r5, r3, r7 "); |
|
292 |
asm("strt r4, [r12], #4 "); // write back to *a |
|
293 |
asm("strt r5, [r12], #-4 "); |
|
294 |
CPSIEIF; |
|
295 |
asm("strt r2, [r0], #4 "); // return oldv |
|
296 |
asm("strt r3, [r0], #-4 "); |
|
297 |
asm("ldmfd sp!, {r4-r10} "); |
|
298 |
USER_MEMORY_GUARD_ON(,r12,r12); |
|
299 |
__JUMP(,lr); |
|
300 |
} |
|
301 |
||
302 |
__NAKED__ void ExecHandler::SlowAtomicTas64(SAtomicOpInfo64*) |
|
303 |
{ |
|
304 |
USER_MEMORY_GUARD_OFF(,r12,r12); |
|
305 |
asm("stmfd sp!, {r4-r10} "); |
|
306 |
asm("ldrt r12, [r0], #8 "); // r12 = a |
|
307 |
asm("ldrt r4, [r0], #4 "); // r5:r4 = t |
|
308 |
asm("ldrt r5, [r0], #4 "); |
|
309 |
asm("ldrt r6, [r0], #4 "); // r7:r6 = u |
|
310 |
asm("ldrt r7, [r0], #4 "); |
|
311 |
asm("ldrt r8, [r0], #4 "); // r9:r8 = v |
|
312 |
asm("ldrt r9, [r0], #-28 "); |
|
313 |
||
314 |
// Disable interrupts and ensure the 64-bit data is paged in with write permissions. |
|
315 |
ENSURE_PAGED_IN_64(12,2,3,10,tas); |
|
316 |
// Data paged in so perform the operation. |
|
317 |
asm("cmp r2, r4 "); |
|
318 |
asm("sbcs r1, r3, r5 "); // oldv - t |
|
319 |
asm("movlt r6, r8 "); // if oldv<t r7:r6=v |
|
320 |
asm("movlt r7, r9 "); |
|
321 |
asm("adds r4, r2, r6 "); // r5:r4 = oldv + u or v |
|
322 |
asm("adcs r5, r3, r7 "); |
|
323 |
asm("strt r4, [r12], #4 "); // write back to *a |
|
324 |
asm("strt r5, [r12], #-4 "); |
|
325 |
CPSIEIF; |
|
326 |
asm("strt r2, [r0], #4 "); // return oldv |
|
327 |
asm("strt r3, [r0], #-4 "); |
|
328 |
asm("ldmfd sp!, {r4-r10} "); |
|
329 |
USER_MEMORY_GUARD_ON(,r12,r12); |
|
330 |
__JUMP(,lr); |
|
331 |
} |
|
332 |
#endif |
|
333 |
||
334 |
#ifdef __ATOMIC_USE_FAST_EXEC__ |
|
335 |
__NAKED__ TUint32 ExecHandler::FastAtomicAxo32(SAtomicOpInfo32*) |
|
336 |
{ |
|
337 |
USER_MEMORY_GUARD_OFF(,r12,r12); |
|
338 |
asm("ldrt r12, [r0], #4 "); // r12 = a |
|
339 |
asm("ldrt r2, [r0], #4 "); // r2 = u |
|
340 |
asm("ldrt r3, [r0], #4 "); // r3 = v |
|
341 |
asm("ldrt r0, [r12] "); // r0 = *a = oldv |
|
342 |
asm("and r1, r0, r2 "); |
|
343 |
asm("eor r1, r1, r3 "); |
|
344 |
asm("strt r1, [r12] "); // *a = (oldv & u) ^ v |
|
345 |
USER_MEMORY_GUARD_ON(,r12,r12); |
|
346 |
__JUMP(,lr); // return oldv |
|
347 |
} |
|
348 |
||
349 |
__NAKED__ TBool ExecHandler::FastAtomicCas32(SAtomicOpInfo32*) |
|
350 |
{ |
|
351 |
USER_MEMORY_GUARD_OFF(,r12,r12); |
|
352 |
asm("ldrt r12, [r0], #4 "); // r12 = a |
|
353 |
asm("ldrt r1, [r0], #4 "); // r1 = q |
|
354 |
asm("ldrt r2, [r12] "); // r2 = *a = oldv |
|
355 |
asm("ldrt r3, [r1] "); // r3 = *q |
|
356 |
asm("cmp r2, r3 "); |
|
357 |
asm("ldreqt r2, [r0], #4 "); // if (oldv==*q) *a=v |
|
358 |
asm("strnet r2, [r1] "); // if (oldv!=*q) *q=oldv |
|
359 |
asm("streqt r2, [r12] "); |
|
360 |
asm("movne r0, #0 "); |
|
361 |
asm("moveq r0, #1 "); |
|
362 |
USER_MEMORY_GUARD_ON(,r12,r12); |
|
363 |
__JUMP(,lr); // return oldv==*q |
|
364 |
} |
|
365 |
||
366 |
__NAKED__ TUint32 ExecHandler::FastAtomicAdd32(SAtomicOpInfo32*) |
|
367 |
{ |
|
368 |
USER_MEMORY_GUARD_OFF(,r12,r12); |
|
369 |
asm("ldrt r12, [r0], #4 "); // r12 = a |
|
370 |
asm("ldrt r2, [r0], #4 "); // r2 = v |
|
371 |
asm("ldrt r0, [r12] "); // r0 = *a = oldv |
|
372 |
asm("add r1, r0, r2 "); |
|
373 |
asm("strt r1, [r12] "); // *a = oldv + v |
|
374 |
USER_MEMORY_GUARD_ON(,r12,r12); |
|
375 |
__JUMP(,lr); // return oldv |
|
376 |
} |
|
377 |
||
378 |
__NAKED__ TUint32 ExecHandler::FastAtomicTau32(SAtomicOpInfo32*) |
|
379 |
{ |
|
380 |
USER_MEMORY_GUARD_OFF(,r12,r12); |
|
381 |
asm("mov r3, r0 "); |
|
382 |
asm("ldrt r12, [r3], #4 "); // r12 = a |
|
383 |
asm("ldrt r2, [r3], #4 "); // r2 = t |
|
384 |
asm("ldrt r0, [r12] "); // r0 = *a = oldv |
|
385 |
asm("cmp r0, r2 "); // oldv - t |
|
386 |
asm("addcc r3, r3, #4 "); // if oldv<t r3->v else r3->u |
|
387 |
asm("ldrt r1, [r3] "); // r1 = u or v |
|
388 |
asm("add r1, r0, r1 "); |
|
389 |
asm("strt r1, [r12] "); // *a = oldv + u or v |
|
390 |
USER_MEMORY_GUARD_ON(,r12,r12); |
|
391 |
__JUMP(,lr); // return oldv |
|
392 |
} |
|
393 |
||
394 |
__NAKED__ TInt32 ExecHandler::FastAtomicTas32(SAtomicOpInfo32*) |
|
395 |
{ |
|
396 |
USER_MEMORY_GUARD_OFF(,r12,r12); |
|
397 |
asm("mov r3, r0 "); |
|
398 |
asm("ldrt r12, [r3], #4 "); // r12 = a |
|
399 |
asm("ldrt r2, [r3], #4 "); // r2 = t |
|
400 |
asm("ldrt r0, [r12] "); // r0 = *a = oldv |
|
401 |
asm("cmp r0, r2 "); // oldv - t |
|
402 |
asm("addlt r3, r3, #4 "); // if oldv<t r3->v else r3->u |
|
403 |
asm("ldrt r1, [r3] "); // r1 = u or v |
|
404 |
asm("add r1, r0, r1 "); |
|
405 |
asm("strt r1, [r12] "); // *a = oldv + u or v |
|
406 |
USER_MEMORY_GUARD_ON(,r12,r12); |
|
407 |
__JUMP(,lr); // return oldv |
|
408 |
} |
|
409 |
||
410 |
__NAKED__ TUint16 ExecHandler::FastAtomicAxo16(SAtomicOpInfo32*) |
|
411 |
{ |
|
412 |
USER_MEMORY_GUARD_OFF(,r12,r12); |
|
413 |
asm("ldrt r12, [r0], #4 "); // r12 = a |
|
414 |
asm("ldrt r2, [r0], #4 "); // r2 = u |
|
415 |
asm("ldrt r3, [r0], #4 "); // r3 = v |
|
416 |
asm("ldrbt r0, [r12], #1 "); // r0 = *a = oldv |
|
417 |
asm("ldrbt r1, [r12], #-1 "); |
|
418 |
asm("orr r0, r0, r1, lsl #8 "); |
|
419 |
asm("and r1, r0, r2 "); |
|
420 |
asm("eor r1, r1, r3 "); |
|
421 |
asm("strbt r1, [r12], #1 "); // *a = (oldv & u) ^ v |
|
422 |
asm("mov r1, r1, lsr #8 "); |
|
423 |
asm("strbt r1, [r12] "); |
|
424 |
USER_MEMORY_GUARD_ON(,r12,r12); |
|
425 |
__JUMP(,lr); // return oldv |
|
426 |
} |
|
427 |
||
428 |
__NAKED__ TBool ExecHandler::FastAtomicCas16(SAtomicOpInfo32*) |
|
429 |
{ |
|
430 |
USER_MEMORY_GUARD_OFF(,r12,r12); |
|
431 |
asm("ldrt r12, [r0], #4 "); // r12 = a |
|
432 |
asm("ldrt r1, [r0], #4 "); // r1 = q |
|
433 |
asm("stmfd sp!, {r4-r5} "); |
|
434 |
asm("ldrbt r2, [r12], #1 "); // r3:r2 = *a = oldv |
|
435 |
asm("ldrbt r3, [r12], #-1 "); // |
|
436 |
asm("ldrbt r4, [r1], #1 "); // r5:r4 = *q |
|
437 |
asm("ldrbt r5, [r1], #-1 "); |
|
438 |
asm("cmp r2, r4 "); |
|
439 |
asm("cmpeq r3, r5 "); |
|
440 |
asm("ldreqbt r2, [r0], #1 "); // if (oldv==*q) *a=v |
|
441 |
asm("ldreqbt r3, [r0], #-1 "); // if (oldv==*q) *a=v |
|
442 |
asm("strnebt r2, [r1], #1 "); // if (oldv!=*q) *q=oldv |
|
443 |
asm("strnebt r3, [r1], #-1 "); |
|
444 |
asm("streqbt r2, [r12], #1 "); |
|
445 |
asm("streqbt r3, [r12], #-1 "); |
|
446 |
asm("movne r0, #0 "); |
|
447 |
asm("moveq r0, #1 "); |
|
448 |
asm("ldmfd sp!, {r4-r5} "); |
|
449 |
USER_MEMORY_GUARD_ON(,r12,r12); |
|
450 |
__JUMP(,lr); // return oldv==*q |
|
451 |
} |
|
452 |
||
453 |
__NAKED__ TUint16 ExecHandler::FastAtomicAdd16(SAtomicOpInfo32*) |
|
454 |
{ |
|
455 |
USER_MEMORY_GUARD_OFF(,r12,r12); |
|
456 |
asm("ldrt r12, [r0], #4 "); // r12 = a |
|
457 |
asm("ldrt r2, [r0], #4 "); // r2 = v |
|
458 |
asm("ldrbt r0, [r12], #1 "); // r0 = *a = oldv |
|
459 |
asm("ldrbt r1, [r12], #-1 "); |
|
460 |
asm("orr r0, r0, r1, lsl #8 "); |
|
461 |
asm("add r1, r0, r2 "); |
|
462 |
asm("strbt r1, [r12], #1 "); // *a = oldv + v |
|
463 |
asm("mov r1, r1, lsr #8 "); |
|
464 |
asm("strbt r1, [r12], #-1 "); |
|
465 |
USER_MEMORY_GUARD_ON(,r12,r12); |
|
466 |
__JUMP(,lr); // return oldv |
|
467 |
} |
|
468 |
||
469 |
__NAKED__ TUint16 ExecHandler::FastAtomicTau16(SAtomicOpInfo32*) |
|
470 |
{ |
|
471 |
USER_MEMORY_GUARD_OFF(,r12,r12); |
|
472 |
asm("mov r3, r0 "); |
|
473 |
asm("ldrt r12, [r3], #4 "); // r12 = a |
|
474 |
asm("ldrt r2, [r3], #4 "); // r2 = t |
|
475 |
asm("ldrbt r0, [r12], #1 "); // r0 = *a = oldv |
|
476 |
asm("ldrbt r1, [r12], #-1 "); |
|
477 |
asm("orr r0, r0, r1, lsl #8 "); |
|
478 |
asm("cmp r0, r2 "); // oldv - t |
|
479 |
asm("addcc r3, r3, #4 "); // if oldv<t r3->v else r3->u |
|
480 |
asm("ldrt r1, [r3] "); // r1 = u or v |
|
481 |
asm("add r1, r0, r1 "); |
|
482 |
asm("strbt r1, [r12], #1 "); // *a = oldv + u or v |
|
483 |
asm("mov r1, r1, lsr #8 "); |
|
484 |
asm("strbt r1, [r12], #-1 "); |
|
485 |
USER_MEMORY_GUARD_ON(,r12,r12); |
|
486 |
__JUMP(,lr); // return oldv |
|
487 |
} |
|
488 |
||
489 |
__NAKED__ TInt16 ExecHandler::FastAtomicTas16(SAtomicOpInfo32*) |
|
490 |
{ |
|
491 |
USER_MEMORY_GUARD_OFF(,r12,r12); |
|
492 |
asm("mov r3, r0 "); |
|
493 |
asm("ldrt r12, [r3], #4 "); // r12 = a |
|
494 |
asm("ldrt r2, [r3], #4 "); // r2 = t |
|
495 |
asm("ldrbt r0, [r12], #1 "); // r0 = *a = oldv |
|
496 |
asm("ldrbt r1, [r12], #-1 "); |
|
497 |
asm("orr r0, r0, r1, lsl #8 "); |
|
498 |
asm("mov r0, r0, lsl #16 "); |
|
499 |
asm("cmp r0, r2, lsl #16 "); // oldv - t |
|
500 |
asm("addlt r3, r3, #4 "); // if oldv<t r3->v else r3->u |
|
501 |
asm("ldrt r1, [r3] "); // r1 = u or v |
|
502 |
asm("add r1, r1, r0, asr #16 "); |
|
503 |
asm("strbt r1, [r12], #1 "); // *a = oldv + u or v |
|
504 |
asm("mov r1, r1, lsr #8 "); |
|
505 |
asm("strbt r1, [r12], #-1 "); |
|
506 |
asm("mov r0, r0, asr #16 "); |
|
507 |
USER_MEMORY_GUARD_ON(,r12,r12); |
|
508 |
__JUMP(,lr); // return oldv |
|
509 |
} |
|
510 |
||
511 |
__NAKED__ TUint8 ExecHandler::FastAtomicAxo8(SAtomicOpInfo32*) |
|
512 |
{ |
|
513 |
USER_MEMORY_GUARD_OFF(,r12,r12); |
|
514 |
asm("ldrt r12, [r0], #4 "); // r12 = a |
|
515 |
asm("ldrt r2, [r0], #4 "); // r2 = u |
|
516 |
asm("ldrt r3, [r0], #4 "); // r3 = v |
|
517 |
asm("ldrbt r0, [r12] "); // r0 = *a = oldv |
|
518 |
asm("and r1, r0, r2 "); |
|
519 |
asm("eor r1, r1, r3 "); |
|
520 |
asm("strbt r1, [r12] "); // *a = (oldv & u) ^ v |
|
521 |
USER_MEMORY_GUARD_ON(,r12,r12); |
|
522 |
__JUMP(,lr); // return oldv |
|
523 |
} |
|
524 |
||
525 |
__NAKED__ TBool ExecHandler::FastAtomicCas8(SAtomicOpInfo32*) |
|
526 |
{ |
|
527 |
USER_MEMORY_GUARD_OFF(,r12,r12); |
|
528 |
asm("ldrt r12, [r0], #4 "); // r12 = a |
|
529 |
asm("ldrt r1, [r0], #4 "); // r1 = q |
|
530 |
asm("ldrbt r2, [r12] "); // r2 = *a = oldv |
|
531 |
asm("ldrbt r3, [r1] "); // r3 = *q |
|
532 |
asm("cmp r2, r3 "); |
|
533 |
asm("ldreqt r2, [r0], #4 "); // if (oldv==*q) *a=v |
|
534 |
asm("strnebt r2, [r1] "); // if (oldv!=*q) *q=oldv |
|
535 |
asm("streqbt r2, [r12] "); |
|
536 |
asm("movne r0, #0 "); |
|
537 |
asm("moveq r0, #1 "); |
|
538 |
USER_MEMORY_GUARD_ON(,r12,r12); |
|
539 |
__JUMP(,lr); // return oldv==*q |
|
540 |
} |
|
541 |
||
542 |
__NAKED__ TUint8 ExecHandler::FastAtomicAdd8(SAtomicOpInfo32*) |
|
543 |
{ |
|
544 |
USER_MEMORY_GUARD_OFF(,r12,r12); |
|
545 |
asm("ldrt r12, [r0], #4 "); // r12 = a |
|
546 |
asm("ldrt r2, [r0], #4 "); // r2 = v |
|
547 |
asm("ldrbt r0, [r12] "); // r0 = *a = oldv |
|
548 |
asm("add r1, r0, r2 "); |
|
549 |
asm("strbt r1, [r12] "); // *a = oldv + v |
|
550 |
USER_MEMORY_GUARD_ON(,r12,r12); |
|
551 |
__JUMP(,lr); // return oldv |
|
552 |
} |
|
553 |
||
554 |
__NAKED__ TUint8 ExecHandler::FastAtomicTau8(SAtomicOpInfo32*) |
|
555 |
{ |
|
556 |
USER_MEMORY_GUARD_OFF(,r12,r12); |
|
557 |
asm("mov r3, r0 "); |
|
558 |
asm("ldrt r12, [r3], #4 "); // r12 = a |
|
559 |
asm("ldrt r2, [r3], #4 "); // r2 = t |
|
560 |
asm("ldrbt r0, [r12] "); // r0 = *a = oldv |
|
561 |
asm("cmp r0, r2 "); // oldv - t |
|
562 |
asm("addcc r3, r3, #4 "); // if oldv<t r3->v else r3->u |
|
563 |
asm("ldrt r1, [r3] "); // r1 = u or v |
|
564 |
asm("add r1, r0, r1 "); |
|
565 |
asm("strbt r1, [r12] "); // *a = oldv + u or v |
|
566 |
USER_MEMORY_GUARD_ON(,r12,r12); |
|
567 |
__JUMP(,lr); // return oldv |
|
568 |
} |
|
569 |
||
570 |
__NAKED__ TInt8 ExecHandler::FastAtomicTas8(SAtomicOpInfo32*) |
|
571 |
{ |
|
572 |
USER_MEMORY_GUARD_OFF(,r12,r12); |
|
573 |
asm("mov r3, r0 "); |
|
574 |
asm("ldrt r12, [r3], #4 "); // r12 = a |
|
575 |
asm("ldrt r2, [r3], #4 "); // r2 = t |
|
576 |
asm("ldrbt r0, [r12] "); // r0 = *a = oldv |
|
577 |
asm("mov r0, r0, lsl #24 "); |
|
578 |
asm("cmp r0, r2, lsl #24 "); // oldv - t |
|
579 |
asm("addlt r3, r3, #4 "); // if oldv<t r3->v else r3->u |
|
580 |
asm("ldrt r1, [r3] "); // r1 = u or v |
|
581 |
asm("add r1, r1, r0, asr #24 "); |
|
582 |
asm("strbt r1, [r12] "); // *a = oldv + u or v |
|
583 |
asm("mov r0, r0, asr #24 "); |
|
584 |
USER_MEMORY_GUARD_ON(,r12,r12); |
|
585 |
__JUMP(,lr); // return oldv |
|
586 |
} |
|
587 |
#endif |
|
588 |
||
589 |
#ifdef __FASTEXEC_MACHINE_CODED__ |
|
590 |
__NAKED__ RAllocator* ExecHandler::Heap() |
|
591 |
{ |
|
592 |
asm("ldr r0, [r1, #%a0]" : : "i" (_FOFF(DThread,iAllocator)-_FOFF(DThread,iNThread))); |
|
593 |
__JUMP(,lr); |
|
594 |
} |
|
595 |
||
31
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
596 |
__NAKED__ TTrapHandler* ExecHandler::PushTrapFrame(TTrap* /*aFrame*/ /* r1=TheCurrentThread */) |
0 | 597 |
// |
598 |
// Push a new trap frame. |
|
599 |
// For user code only, kernel code should not use TRAP/Leave. |
|
600 |
// |
|
601 |
{ |
|
602 |
#ifdef __LEAVE_EQUALS_THROW__ |
|
603 |
asm("b " CSM_Z15InvalidFastExecv); |
|
604 |
#else |
|
31
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
605 |
asm("ldr r2, [r1, #%a0]" : : "i" (_FOFF(DThread,iFrame)-_FOFF(DThread,iNThread))); // r2=TheCurrentThread->iFrame |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
606 |
asm("ldr r3, [r1, #%a0]" : : "i" (_FOFF(DThread,iTrapHandler)-_FOFF(DThread,iNThread)));// r3=TheCurrentThread->iTrapHandler |
0 | 607 |
USER_MEMORY_GUARD_OFF(,r12,r12); |
31
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
608 |
asm("add r12, r0, #%a0" : : "i" _FOFF(TTrap,iNext)); // r12->aFrame.iNext (in user space) |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
609 |
asm("strt r2, [r12] "); // aFrame.iNext=TheCurrentThread->iFrame |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
610 |
asm("add r12, r0, #%a0" : : "i" _FOFF(TTrap,iHandler)); // r12->aFrame.iHandler (in user space) |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
611 |
asm("strt r3, [r12] "); // aFrame.iHandler=TheCurrentThread->iTrapHandler |
0 | 612 |
USER_MEMORY_GUARD_ON(,r12,r12); |
31
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
613 |
asm("str r0, [r1, #%a0]" : : "i" (_FOFF(DThread,iFrame)-_FOFF(DThread,iNThread))); // TheCurrentThread->iFrame=aFrame |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
614 |
asm("mov r0, r3 "); // return TheCurrentThread->iTrapHandler |
0 | 615 |
__JUMP(,lr); |
616 |
#endif |
|
617 |
} |
|
618 |
||
619 |
__NAKED__ TTrap* ExecHandler::PopTrapFrame() |
|
620 |
// |
|
621 |
// Pop the current frame. |
|
622 |
// For user code only, kernel code should not use TRAP/Leave. |
|
623 |
// |
|
624 |
{ |
|
625 |
#ifdef __LEAVE_EQUALS_THROW__ |
|
626 |
asm("b " CSM_Z15InvalidFastExecv); |
|
627 |
#else |
|
628 |
asm("ldr r0, [r1, #%a0]" : : "i" (_FOFF(DThread,iFrame)-_FOFF(DThread,iNThread))); // r0=TheCurrentThread->iFrame |
|
31
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
629 |
asm("cmp r0, #0 "); // ignore rest of code if NULL |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
630 |
USER_MEMORY_GUARD_OFF(ne,r12,r12); |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
631 |
asm("addne r12, r0, #%a0" : : "i" _FOFF(TTrap,iNext)); // r12->iFrame.iNext (in user space) |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
632 |
asm("ldrnet r2, [r12] "); // r2=iFrame->iNext |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
633 |
USER_MEMORY_GUARD_ON(ne,r12,r12); |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
634 |
asm("strne r2, [r1, #%a0]" : : "i" (_FOFF(DThread,iFrame)-_FOFF(DThread,iNThread))); // iFrame=iFrame->iNext |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
635 |
__JUMP(,lr); // returning old iFrame |
0 | 636 |
#endif |
637 |
} |
|
638 |
||
639 |
__NAKED__ CActiveScheduler* ExecHandler::ActiveScheduler() |
|
640 |
// |
|
641 |
// Return the address of the current active scheduler |
|
642 |
// |
|
643 |
{ |
|
644 |
asm("ldr r0, [r1, #%a0]" : : "i" (_FOFF(DThread,iScheduler)-_FOFF(DThread,iNThread))); |
|
645 |
__JUMP(,lr); |
|
646 |
} |
|
647 |
||
648 |
__NAKED__ void ExecHandler::SetActiveScheduler(CActiveScheduler* /*aScheduler*/) |
|
649 |
// |
|
650 |
// Set the address of the current active scheduler |
|
651 |
// |
|
652 |
{ |
|
653 |
asm("str r0, [r1, #%a0]" : : "i" (_FOFF(DThread,iScheduler)-_FOFF(DThread,iNThread))); |
|
654 |
__JUMP(,lr); |
|
655 |
} |
|
656 |
||
657 |
__NAKED__ TTrapHandler* ExecHandler::TrapHandler() |
|
658 |
// |
|
659 |
// Return the current trap handler. |
|
660 |
// |
|
661 |
{ |
|
662 |
asm("ldr r0, [r1, #%a0]" : : "i" (_FOFF(DThread,iTrapHandler)-_FOFF(DThread,iNThread))); |
|
663 |
__JUMP(,lr); |
|
664 |
} |
|
665 |
||
666 |
__NAKED__ TTrapHandler* ExecHandler::SetTrapHandler(TTrapHandler* /*aHandler*/) |
|
667 |
// |
|
668 |
// Set the current trap handler. |
|
669 |
// |
|
670 |
{ |
|
671 |
asm("ldr r2, [r1, #%a0]" : : "i" (_FOFF(DThread,iTrapHandler)-_FOFF(DThread,iNThread))); |
|
672 |
asm("str r0, [r1, #%a0]" : : "i" (_FOFF(DThread,iTrapHandler)-_FOFF(DThread,iNThread))); |
|
673 |
asm("mov r0, r2 "); |
|
674 |
__JUMP(,lr); |
|
675 |
} |
|
676 |
||
677 |
__NAKED__ void ExecHandler::SetReentryPoint(TLinAddr) |
|
678 |
{ |
|
679 |
asm("ldr r2, [r1, #%a0]" : : "i" (_FOFF(DThread,iOwningProcess)-_FOFF(DThread,iNThread))); |
|
680 |
asm("str r0, [r2, #%a0]" : : "i" _FOFF(DProcess,iReentryPoint)); |
|
681 |
__JUMP(,lr); |
|
682 |
} |
|
683 |
#endif |
|
684 |
||
685 |
/*********************************************************************************** |
|
686 |
* Exec dispatch code |
|
687 |
***********************************************************************************/ |
|
688 |
||
689 |
__NAKED__ void InvalidFastExec() |
|
690 |
{ |
|
691 |
asm("mov r0, #0x13 "); |
|
692 |
asm("msr cpsr, r0 "); |
|
693 |
asm("b " CSM_Z18InvalidExecHandlerv); |
|
694 |
} |
|
695 |
||
696 |
||
697 |
/*************************************************************************** |
|
698 |
* Look up a handle in r0 in the current thread or process handles array |
|
699 |
* On entry r5=0xY000000X, where bits 0-5 indicate the type of object referenced. |
|
700 |
* Also r10 points to K::TheScheduler, r9 to the current NThread |
|
701 |
* Can use registers r0,r4,r7,r8,ip. Preserve r1,r2,r3,r5,r6,r9-r11 |
|
702 |
* Return r0=address of object referenced, or NULL if handle invalid |
|
703 |
* Enter and leave with system locked |
|
704 |
***************************************************************************/ |
|
705 |
__NAKED__ void PreprocessHandler() |
|
706 |
{ |
|
707 |
asm("tst r5, #0x20 "); // message lookup? |
|
708 |
asm("bne lookup_message "); |
|
709 |
asm("ands r7, r5, #0x1f "); // r7 = object container number+1 = DObjectCon uniqueID |
|
710 |
asm("mvneq r7, #0 "); // r7=-1 if any type ok |
|
711 |
asm("adds ip, r0, r0 "); // check for special handle (bit 31 set => special) |
|
712 |
asm("bcs lookup_special "); // if not, N flag indicates local handle |
|
713 |
asm("ldrpl ip, [r9, #%a0]" : : "i" (_FOFF(DThread,iOwningProcess)-_FOFF(DThread,iNThread))); // if not local, ip=aThread->iOwningProcess |
|
714 |
asm("addmi ip, r9, #%a0" : : "i" (_FOFF(DThread,iHandles)-_FOFF(DThread,iNThread))); // if local, ip=&aThread->iHandles |
|
715 |
asm("addpl ip, ip, #%a0" : : "i" _FOFF(DProcess,iHandles)); // if not, ip=&aThread->iOwningProcess->iHandles |
|
716 |
||
717 |
#ifdef __HANDLES_USE_RW_SPIN_LOCK__ |
|
718 |
asm("stmfd sp!, {r0,ip,lr} "); // save r0, ip and lr |
|
719 |
asm("mov r0, ip "); // move RObjectIx this(ip) to r0 |
|
720 |
asm("BL _ZN9RObjectIx15AcquireReadLockEv") // call RObjectIx::AquireReadLock |
|
721 |
asm("ldmfd sp!, {r0,ip,lr} "); // restore r0, ip and lr |
|
722 |
#else |
|
723 |
// assumes system lock is held |
|
724 |
#endif |
|
725 |
||
726 |
asm("mov r4, r0, lsl #17 "); // r4=r0<<17 = index(aHandle)<<17 |
|
727 |
asm("mov r0, r0, lsl #2 "); // r0=instance(Handle)<<18 |
|
728 |
asm("ldr r8, [ip, #%a0]" : : "i" _FOFF(RObjectIx,iCount)); // r8=iCount |
|
729 |
asm("ldr ip, [ip, #%a0]" : : "i" _FOFF(RObjectIx,iSlots)); // ip=iSlots |
|
730 |
asm("mov r0, r0, lsr #18 "); // r0=instance(Handle) |
|
731 |
asm("cmp r8, r4, lsr #17 "); // compare iCount with index(aHandle) |
|
732 |
asm("ldrgt r8, [ip, r4, lsr #14]! "); // if count>index, r8=pS->uniqueID(bits14-19):pS->instance(bits0-13), ip=iObjects+index(Handle)=pS |
|
733 |
asm("ble lookup_handle_bad "); // if count<=index, bad handle |
|
734 |
asm("cmn r7, #1 "); // check if any type of object is ok |
|
735 |
asm("orreq r8, r8, r7, lsl #14 "); // if it is, (therefore r7=-1) set top 18 bits of r8 |
|
736 |
asm("orr r0, r0, r7, lsl #14 "); // r0=aUniqueID(bits14-19):instance(Handle)(bits0-13) |
|
737 |
asm("mov r0, r0, lsl #12 "); // only interested in comparing lower 20 bits... |
|
738 |
asm("mov r8, r8, lsl #12 "); |
|
739 |
asm("cmp r0, r8 "); // check instance, and unique ID if necessary |
|
740 |
asm("ldreq r0, [ip, #4] "); // if OK return pointer to CObject |
|
741 |
asm("movne r0, #0"); // else r0 = 0 |
|
742 |
asm("andeq r0, r0, #%a0" : : "i" (RObjectIx::EObjRObjMask)); // r0 = (pointer & EObjRObjMask); |
|
743 |
||
744 |
#ifdef __HANDLES_USE_RW_SPIN_LOCK__ |
|
745 |
asm("stmfd sp!, {r0,ip,lr} "); // save r0, ip and lr |
|
746 |
asm("mov r0, ip "); // move RObjectIx this(ip) to r0 |
|
747 |
asm("bl _ZN9RObjectIx15ReleaseReadLockEv") // call RObjectIx::ReleaseReadLock |
|
748 |
asm("ldmfd sp!, {r0,ip,lr} "); // restore r0, ip and lr |
|
749 |
#else |
|
750 |
// system lock is held, nothing to do |
|
751 |
#endif |
|
752 |
||
753 |
asm("cmp r0, #0 "); |
|
754 |
__JUMP(ne,lr); |
|
755 |
asm("b lookup_handle_bad "); // if NULL, bad handle |
|
756 |
||
757 |
asm("lookup_special: "); // r12=handle<<1 (valid values are fffx0000 and fffx0002, where x=e or f) |
|
758 |
asm("bic ip, ip, #0x10000 "); // clear 'no close' flag |
|
759 |
#ifdef __OBSOLETE_V1_IPC_SUPPORT__ |
|
760 |
asm("cmp ip, #0x10000000 "); |
|
761 |
asm("blo lookup_thread_pseudo "); // if handle<0x88000000, it's an IPC client thread pseudo handle |
|
762 |
#endif |
|
763 |
asm("add ip, ip, #0x20000 "); // valid values now 0 and 2 |
|
764 |
asm("cmp r7, #2 "); // r7=container number+1 or -1 if any object OK (can only be -1,1-31) |
|
765 |
asm("bgt lookup_handle_bad "); |
|
766 |
asm("beq 1f "); |
|
767 |
asm("cmp ip, #2 "); |
|
768 |
asm("subeq r0, r9, #%a0" : : "i" _FOFF(DThread,iNThread)); |
|
769 |
__JUMP(eq,lr); |
|
770 |
asm("cmn r7, #1 "); // r7=-1 means any type of object will do |
|
771 |
asm("bne lookup_handle_bad "); |
|
772 |
asm("1: "); |
|
773 |
asm("cmp ip, #0 "); |
|
774 |
asm("ldreq r0, [r9, #%a0]" : : "i" (_FOFF(DThread,iOwningProcess)-_FOFF(DThread,iNThread))); |
|
775 |
__JUMP(eq,lr); |
|
776 |
asm("b lookup_handle_bad "); |
|
777 |
||
778 |
#ifdef __OBSOLETE_V1_IPC_SUPPORT__ |
|
779 |
asm("lookup_thread_pseudo: "); |
|
780 |
asm("ldr r4, __KernMsgInfo "); // r4->msg chunk info |
|
781 |
asm("cmp r7, #1 "); |
|
782 |
asm("bgt lookup_handle_bad "); // object type must be thread or unspecified |
|
783 |
asm("mov r0, ip, lsl #16 "); // demangle handle; r0 = low half of handle |
|
784 |
asm("add r7, ip, r0, lsr #16 "); // demangle handle; r7 = purported offset of RMessageK in msg chunk |
|
785 |
asm("ldr r0, [r4, #%a0]" : : "i" _FOFF(K::SMsgInfo,iBase)); // r0 = base address of kernel msg chunk |
|
786 |
asm("add r12, r7, #%a0" : : "i" ((TInt)sizeof(RMessageK))); // r12 = offset + sizeof(RMessageK) |
|
787 |
asm("ldr r4, [r4, #%a0]" : : "i" _FOFF(K::SMsgInfo,iMaxSize)); // r4 = max size of kernel msg chunk |
|
788 |
asm("add r0, r0, r7 "); // demangle handle; r0 = pointer to RMessageK (if good handle) |
|
789 |
asm("cmp r4, r12 "); // is this off the end of the msg chunk? |
|
790 |
asm("blo lookup_handle_bad "); // if so, bad handle |
|
791 |
asm("b lookup_message_client "); // if not, get client |
|
792 |
#endif |
|
793 |
||
794 |
asm("lookup_message: "); // r0 points to RMessageK, validate it |
|
795 |
asm("ldr r4, __KernMsgInfo "); // r4->msg chunk info |
|
796 |
asm("ldr r7, [r4, #%a0]" : : "i" _FOFF(K::SMsgInfo,iBase)); // r7 = base address of kernel msg chunk |
|
797 |
asm("ldr r4, [r4, #%a0]" : : "i" _FOFF(K::SMsgInfo,iMaxSize)); // r4 = max size of kernel msg chunk |
|
798 |
asm("subs r7, r0, r7 "); // r7 = aMsg - base of kernel msg chunk |
|
799 |
asm("addhs r12, r7, #%a0" : : "i" ((TInt)sizeof(RMessageK))); // if >=, r12 = offset+sizeof(RMessageK) |
|
800 |
asm("cmphs r4, r12 "); // compare max size to this value |
|
801 |
asm("blo bad_message_handle "); // if offset < 0 or max size < offset+sizeof(RMessageK), panic |
|
802 |
||
803 |
#ifdef __OBSOLETE_V1_IPC_SUPPORT__ |
|
804 |
asm("lookup_message_client: "); |
|
805 |
#endif |
|
806 |
// r0 is pointer to message, r7 is offset within chunk |
|
807 |
asm("tst r7, #%a0" : : "i" (RMessageK::KMessageSize-1)); // check alignment |
|
808 |
asm("ldr r8, [r9, #%a0]" : : "i" (_FOFF(DThread,iOwningProcess)-_FOFF(DThread,iNThread))); // r8->current process |
|
809 |
asm("bne bad_message_handle "); // reject if misaligned |
|
810 |
asm("adds ip, r0, #%a0" : : "i" _FOFF(RMessageK, iServerLink)); |
|
811 |
// NOTE: Z flag clear here after 'adds' above |
|
812 |
asm(".global __magic_address_msg_lookup_1 "); |
|
813 |
asm("__magic_address_msg_lookup_1: "); // this instruction is magically immune from exceptions |
|
814 |
// Warning: HARDCODED OFFSET(RMessageK) - assumes next 3 words are from class RMessageKBase |
|
815 |
asm("ldmia ip, {r4,r7,ip} "); // get message iNext, iPrev, iFunction, set Z if bad |
|
816 |
// should have r4=~r0, r7=~r8 if OK |
|
817 |
asm("beq bad_message_handle "); // if exception, panic |
|
818 |
asm("eor r4, r4, r0 "); // should be 0xffffffff |
|
819 |
asm("eor r7, r7, r8 "); // should be 0xffffffff |
|
820 |
asm("and r4, r4, r7 "); // should be 0xffffffff |
|
821 |
asm("cmn r4, #1 "); |
|
822 |
asm("bne bad_message_handle "); // if no good, panic |
|
823 |
asm("and r7, r5, #0x3f "); |
|
824 |
asm("cmp r7, #%a0" : : "i" ((TInt)EIpcMessageD)); // EIpcMessageD requested? |
|
825 |
__JUMP(eq, lr); // if so, finished |
|
826 |
asm("cmp ip, #%a0" : : "i" ((TInt)RMessage2::EDisConnect)); // else check iFunction != RMessage2::EDisConnect |
|
827 |
asm("beq bad_message_handle "); // if it is, no good |
|
828 |
asm("cmp r7, #%a0" : : "i" ((TInt)EIpcMessage)); // EIpcMessage requested? |
|
829 |
asm("ldrne r0, [r0, #%a0]" : : "i" _FOFF(RMessageK, iClient)); // if not, r0=message->iClient |
|
830 |
__JUMP(,lr); |
|
831 |
||
832 |
asm("lookup_handle_bad: "); |
|
833 |
asm("mov r0, #%a0" : : "i" (EBadHandle)); |
|
834 |
asm("b " CSM_ZN1K18PanicCurrentThreadEi); |
|
835 |
||
836 |
asm("bad_message_handle: "); |
|
837 |
asm("mov r0, #%a0" : : "i" (EBadMessageHandle)); |
|
838 |
asm("b " CSM_ZN1K18PanicCurrentThreadEi); |
|
839 |
||
840 |
asm("__KernMsgInfo: "); |
|
184
0e2270015475
Identify (and sometimes fix) remaining uses of old-style GCC name mangling in cia files - Bug 3115
William Roberts <williamr@symbian.org>
parents:
31
diff
changeset
|
841 |
asm(".word " CSM_ZN1K7MsgInfoE); |
0 | 842 |
} |
843 |
||
844 |
||
845 |
/** Execute a HAL function |
|
846 |
||
847 |
Executes a HAL function in the group specified. |
|
848 |
Kern::HalFunction is the way to call the HAL functions from kernel code. |
|
849 |
||
850 |
@param aGroup The HAL group this function belonmgs to. Defined in THalFunctionGroup. |
|
851 |
@param aFunction The function within the specified group |
|
852 |
@param a1 HAL Function parameter |
|
853 |
@param a2 HAL Function parameter |
|
854 |
@param aDeviceNumber The device number (eg. screen number) |
|
855 |
@return KErrNone, if successful, KErrNotSupported if aGroup or aFunction is out of range, |
|
856 |
or if there is no function registered to handle the group or any of the other system errors. |
|
857 |
@see Kern::AddHalEntry |
|
858 |
@see THalFunctionGroup |
|
859 |
@see KMaxHalGroups |
|
860 |
||
861 |
@pre No fast mutex can be held. |
|
862 |
@pre Kernel must be unlocked. |
|
863 |
@pre Call in a thread context. |
|
864 |
@pre Interrupts must be enabled. |
|
865 |
*/ |
|
866 |
EXPORT_C __NAKED__ TInt Kern::HalFunction(TInt /*aGroup*/, TInt /*aFunction*/, TAny* /*a1*/, TAny* /*a2*/,TInt /*aDeviceNumber*/) |
|
867 |
{ |
|
868 |
ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC|MASK_NO_FAST_MUTEX); |
|
869 |
||
870 |
asm("ldr ip, [sp, #0] "); |
|
871 |
asm("orr r0, r0, ip, lsl #16 "); |
|
872 |
} |
|
873 |
||
874 |
/** Execute a HAL function |
|
875 |
||
876 |
Executes a HAL function in the group specified. |
|
877 |
Kern::HalFunction is the way to call the HAL functions from kernel code. |
|
878 |
||
879 |
@param aGroup The HAL group this function belonmgs to. Defined in THalFunctionGroup. |
|
880 |
@param aFunction The function within the specified group |
|
881 |
@param a1 HAL Function parameter |
|
882 |
@param a2 HAL Function parameter |
|
883 |
@return KErrNone, if successful, KErrNotSupported if aGroup or aFunction is out of range, |
|
884 |
or if there is no function registered to handle the group or any of the other system errors. |
|
885 |
@see Kern::AddHalEntry |
|
886 |
@see THalFunctionGroup |
|
887 |
@see KMaxHalGroups |
|
888 |
||
889 |
@pre No fast mutex can be held. |
|
890 |
@pre Kernel must be unlocked. |
|
891 |
@pre Call in a thread context. |
|
892 |
@pre Interrupts must be enabled. |
|
893 |
*/ |
|
894 |
EXPORT_C __NAKED__ TInt Kern::HalFunction(TInt /*aGroup*/, TInt /*aFunction*/, TAny* /*a1*/, TAny* /*a2*/) |
|
895 |
// This must be done as a SWI to get the correct permissions when calling from supervisor mode. |
|
896 |
{ |
|
897 |
ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC|MASK_NO_FAST_MUTEX); |
|
898 |
||
899 |
asm("mrs ip, spsr "); |
|
900 |
asm("stmfd sp!, {ip, lr} "); // swi will trash lr and spsr when called in svc mode |
|
901 |
asm("swi %a0" : : "i" (EExecHalFunction|EXECUTIVE_SLOW)); |
|
902 |
asm("ldmfd sp!, {ip, lr} "); |
|
903 |
asm("msr spsr, ip"); |
|
904 |
#if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571622_FIXED) |
|
905 |
asm("nop "); // ARM Cortex-A9 MPCore erratum 571622 workaround |
|
906 |
asm("nop "); // Insert nops so branch doesn't occur in 2nd or 3rd position after a msr spsr |
|
907 |
asm("nop "); |
|
908 |
#endif |
|
909 |
__JUMP(, lr); |
|
910 |
||
911 |
// Assembler version of ExecHandler::ThreadRequestSignal |
|
912 |
// Enter with r0->DThread |
|
913 |
// r9->current NThread |
|
914 |
// Can trash r0-r4, r6-r8, r12 |
|
915 |
asm("_asm_exec_ThreadRequestSignal: "); |
|
916 |
asm("ldr r6, [r0, #%a0]" : : "i" _FOFF(DThread,iOwningProcess)); // r6->target process |
|
917 |
asm("ldr r12, [r9, #%a0]" : : "i" (_FOFF(DThread,iOwningProcess)-_FOFF(DThread,iNThread))); // r12->current process |
|
918 |
asm("add r0, r0, #%a0" : : "i" _FOFF(DThread,iNThread)); // r0->target NThread |
|
919 |
asm("mov r1, #0 "); // Mutex arg for NKern::ThreadRequestSignal |
|
920 |
asm("cmp r6, r12 "); // target process = current process? |
|
921 |
asm("beq " CSM_ZN5NKern19ThreadRequestSignalEP7NThreadP10NFastMutex ); // NKern::ThreadRequestSignal |
|
922 |
asm("b " CSM_ZN1K27LockedPlatformSecurityPanicEv); |
|
923 |
} |
|
924 |
||
925 |