|
1 // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of the License "Eclipse Public License v1.0" |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: |
|
14 // e32\nkernsmp\arm\ncutilf.cia |
|
15 // |
|
16 // |
|
17 |
|
18 #include <e32cia.h> |
|
19 #include <arm.h> |
|
20 #include <arm_gic.h> |
|
21 #include <arm_tmr.h> |
|
22 |
|
23 |
|
24 |
|
25 __NAKED__ void Arm::GetUserSpAndLr(TAny*) |
|
26 { |
|
27 asm("stmia r0, {r13, r14}^ "); |
|
28 asm("mov r0, r0"); // NOP needed between stm^ and banked register access |
|
29 __JUMP(, lr); |
|
30 } |
|
31 |
|
32 __NAKED__ void Arm::SetUserSpAndLr(TAny*) |
|
33 { |
|
34 asm("ldmia r0, {r13, r14}^ "); |
|
35 asm("mov r0, r0"); // NOP needed between ldm^ and banked register access |
|
36 __JUMP(, lr); |
|
37 } |
|
38 |
|
39 __NAKED__ TUint32 Arm::Dacr() |
|
40 { |
|
41 asm("mrc p15, 0, r0, c3, c0, 0 "); |
|
42 __JUMP(, lr); |
|
43 } |
|
44 |
|
45 __NAKED__ void Arm::SetDacr(TUint32) |
|
46 { |
|
47 asm("mcr p15, 0, r0, c3, c0, 0 "); |
|
48 __INST_SYNC_BARRIER_Z__(r1); |
|
49 __JUMP(, lr); |
|
50 } |
|
51 |
|
52 __NAKED__ TUint32 Arm::ModifyDacr(TUint32, TUint32) |
|
53 { |
|
54 asm("mrc p15, 0, r2, c3, c0, 0 "); |
|
55 asm("bic r2, r2, r0 "); |
|
56 asm("orr r2, r2, r1 "); |
|
57 asm("mcr p15, 0, r2, c3, c0, 0 "); |
|
58 __INST_SYNC_BARRIER_Z__(r3); |
|
59 asm("mov r0, r2 "); |
|
60 __JUMP(, lr); |
|
61 } |
|
62 |
|
63 __NAKED__ void Arm::SetCar(TUint32) |
|
64 { |
|
65 SET_CAR(, r0); |
|
66 __JUMP(, lr); |
|
67 } |
|
68 |
|
69 |
|
70 |
|
71 /** Get the CPU's coprocessor access register value |
|
72 |
|
73 @return The value of the CAR, 0 if CPU doesn't have CAR |
|
74 |
|
75 @publishedPartner |
|
76 @released |
|
77 */ |
|
78 EXPORT_C __NAKED__ TUint32 Arm::Car() |
|
79 { |
|
80 GET_CAR(, r0); |
|
81 __JUMP(, lr); |
|
82 } |
|
83 |
|
84 |
|
85 |
|
86 /** Modify the CPU's coprocessor access register value |
|
87 Does nothing if CPU does not have CAR. |
|
88 |
|
89 @param aClearMask Mask of bits to clear (1 = clear this bit) |
|
90 @param aSetMask Mask of bits to set (1 = set this bit) |
|
91 @return The original value of the CAR, 0 if CPU doesn't have CAR |
|
92 |
|
93 @publishedPartner |
|
94 @released |
|
95 */ |
|
96 EXPORT_C __NAKED__ TUint32 Arm::ModifyCar(TUint32 /*aClearMask*/, TUint32 /*aSetMask*/) |
|
97 { |
|
98 GET_CAR(, r2); |
|
99 asm("bic r0, r2, r0 "); |
|
100 asm("orr r0, r0, r1 "); |
|
101 SET_CAR(, r0); |
|
102 asm("mov r0, r2 "); |
|
103 __JUMP(, lr); |
|
104 } |
|
105 |
|
106 |
|
107 #ifdef __CPU_HAS_VFP |
|
108 __NAKED__ void Arm::SetFpExc(TUint32) |
|
109 { |
|
110 #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_351912_FIXED) |
|
111 // If we are about to enable VFP, disable dynamic branch prediction |
|
112 // If we are about to disable VFP, enable dynamic branch prediction if return stack prediction is enabled |
|
113 asm("mrs r3, cpsr "); |
|
114 __ASM_CLI(); |
|
115 asm("mrc p15, 0, r1, c1, c0, 1 "); |
|
116 asm("tst r0, #%a0" : : "i" ((TInt)VFP_FPEXC_EN) ); |
|
117 asm("bic r1, r1, #2 "); // clear DB bit (disable dynamic prediction) |
|
118 asm("and r2, r1, #1 "); // r2 bit 0 = RS bit (1 if return stack enabled) |
|
119 asm("orreq r1, r1, r2, lsl #1 "); // if VFP is being disabled set DB = RS |
|
120 asm("mcr p15, 0, r1, c1, c0, 1 "); |
|
121 asm("mcr p15, 0, r2, c7, c5, 6 "); // flush BTAC |
|
122 VFP_FMXR(, VFP_XREG_FPEXC,0); |
|
123 __INST_SYNC_BARRIER_Z__(r12); |
|
124 asm("msr cpsr, r3 "); |
|
125 __JUMP(, lr); |
|
126 #else |
|
127 VFP_FMXR(, VFP_XREG_FPEXC,0); |
|
128 __JUMP(, lr); |
|
129 #endif |
|
130 } |
|
131 #endif |
|
132 |
|
133 |
|
134 |
|
135 /** Get the value of the VFP FPEXC register |
|
136 |
|
137 @return The value of FPEXC, 0 if there is no VFP |
|
138 |
|
139 @publishedPartner |
|
140 @released |
|
141 */ |
|
142 EXPORT_C __NAKED__ TUint32 Arm::FpExc() |
|
143 { |
|
144 #ifdef __CPU_HAS_VFP |
|
145 VFP_FMRX(, 0,VFP_XREG_FPEXC); |
|
146 #else |
|
147 asm("mov r0, #0 "); |
|
148 #endif |
|
149 __JUMP(, lr); |
|
150 } |
|
151 |
|
152 |
|
153 |
|
154 /** Modify the VFP FPEXC register |
|
155 Does nothing if there is no VFP |
|
156 |
|
157 @param aClearMask Mask of bits to clear (1 = clear this bit) |
|
158 @param aSetMask Mask of bits to set (1 = set this bit) |
|
159 @return The original value of FPEXC, 0 if no VFP present |
|
160 |
|
161 @publishedPartner |
|
162 @released |
|
163 */ |
|
164 EXPORT_C __NAKED__ TUint32 Arm::ModifyFpExc(TUint32 /*aClearMask*/, TUint32 /*aSetMask*/) |
|
165 { |
|
166 #ifdef __CPU_HAS_VFP |
|
167 VFP_FMRX(, 12,VFP_XREG_FPEXC); |
|
168 asm("bic r0, r12, r0 "); |
|
169 asm("orr r0, r0, r1 "); |
|
170 |
|
171 #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_351912_FIXED) |
|
172 // If we are about to enable VFP, disable dynamic branch prediction |
|
173 // If we are about to disable VFP, enable dynamic branch prediction if return stack prediction is enabled |
|
174 asm("mrs r3, cpsr "); |
|
175 __ASM_CLI(); |
|
176 asm("mrc p15, 0, r1, c1, c0, 1 "); |
|
177 asm("tst r0, #%a0" : : "i" ((TInt)VFP_FPEXC_EN) ); |
|
178 asm("bic r1, r1, #2 "); // clear DB bit (disable dynamic prediction) |
|
179 asm("and r2, r1, #1 "); // r2 bit 0 = RS bit (1 if return stack enabled) |
|
180 asm("orreq r1, r1, r2, lsl #1 "); // if VFP is being disabled set DB = RS |
|
181 asm("mcr p15, 0, r1, c1, c0, 1 "); |
|
182 asm("mcr p15, 0, r2, c7, c5, 6 "); // flush BTAC |
|
183 VFP_FMXR(, VFP_XREG_FPEXC,0); |
|
184 __INST_SYNC_BARRIER_Z__(r12); |
|
185 asm("msr cpsr, r3 "); |
|
186 #else |
|
187 VFP_FMXR(, VFP_XREG_FPEXC,0); |
|
188 #endif // erratum 351912 |
|
189 |
|
190 asm("mov r0, r12 "); |
|
191 #else // no vfp |
|
192 asm("mov r0, #0 "); |
|
193 #endif |
|
194 __JUMP(, lr); |
|
195 } |
|
196 |
|
197 /** Get the value of the VFP FPSCR register |
|
198 |
|
199 @return The value of FPSCR, 0 if there is no VFP |
|
200 |
|
201 @publishedPartner |
|
202 @released |
|
203 */ |
|
204 EXPORT_C __NAKED__ TUint32 Arm::FpScr() |
|
205 { |
|
206 #ifdef __CPU_HAS_VFP |
|
207 VFP_FMRX(, 0,VFP_XREG_FPSCR); |
|
208 #else |
|
209 asm("mov r0, #0 "); |
|
210 #endif |
|
211 __JUMP(, lr); |
|
212 } |
|
213 |
|
214 |
|
215 |
|
216 /** Modify the VFP FPSCR register |
|
217 Does nothing if there is no VFP |
|
218 |
|
219 @param aClearMask Mask of bits to clear (1 = clear this bit) |
|
220 @param aSetMask Mask of bits to set (1 = set this bit) |
|
221 @return The original value of FPSCR, 0 if no VFP present |
|
222 |
|
223 @publishedPartner |
|
224 @released |
|
225 */ |
|
226 EXPORT_C __NAKED__ TUint32 Arm::ModifyFpScr(TUint32 /*aClearMask*/, TUint32 /*aSetMask*/) |
|
227 { |
|
228 #ifdef __CPU_HAS_VFP |
|
229 VFP_FMRX(, 2,VFP_XREG_FPSCR); |
|
230 asm("bic r0, r2, r0 "); |
|
231 asm("orr r0, r0, r1 "); |
|
232 VFP_FMXR(, VFP_XREG_FPSCR,0); |
|
233 asm("mov r0, r2 "); |
|
234 #else |
|
235 asm("mov r0, #0 "); |
|
236 #endif |
|
237 __JUMP(, lr); |
|
238 } |
|
239 |
|
240 |
|
241 /** Detect whether NEON is present |
|
242 |
|
243 @return ETrue if present, EFalse if not |
|
244 |
|
245 @internalTechnology |
|
246 @released |
|
247 */ |
|
248 #if defined(__CPU_HAS_VFP) && defined(__VFP_V3) |
|
249 __NAKED__ TBool Arm::NeonPresent() |
|
250 { |
|
251 asm("mov r0, #0 "); // Not present |
|
252 VFP_FMRX(, 1,VFP_XREG_FPEXC); // Save VFP state |
|
253 asm("orr r2, r1, #%a0" : : "i" ((TInt)VFP_FPEXC_EN)); |
|
254 VFP_FMXR(, VFP_XREG_FPEXC,1); // Enable VFP |
|
255 |
|
256 VFP_FMRX(, 2,VFP_XREG_MVFR0); // Read MVFR0 |
|
257 asm("tst r2, #%a0" : : "i" ((TInt)VFP_MVFR0_ASIMD32)); // Check to see if all 32 Advanced SIMD registers are present |
|
258 asm("beq 0f "); // Skip ahead if not |
|
259 GET_CAR(, r2); |
|
260 asm("tst r2, #%a0" : : "i" ((TInt)VFP_CPACR_ASEDIS)); // Check to see if ASIMD is disabled |
|
261 asm("bne 0f "); // Skip ahead if so |
|
262 asm("tst r2, #%a0" : : "i" ((TInt)VFP_CPACR_D32DIS)); // Check to see if the upper 16 registers are disabled |
|
263 asm("moveq r0, #1" ); // If not then eport NEON present |
|
264 |
|
265 asm("0: "); |
|
266 VFP_FMXR(,VFP_XREG_FPEXC,1); // Restore VFP state |
|
267 __JUMP(, lr); |
|
268 } |
|
269 #endif |
|
270 |
|
271 |
|
272 #ifdef __CPU_HAS_MMU |
|
273 __NAKED__ TBool Arm::MmuActive() |
|
274 { |
|
275 asm("mrc p15, 0, r0, c1, c0, 0 "); |
|
276 asm("and r0, r0, #1 "); |
|
277 __JUMP(, lr); |
|
278 } |
|
279 |
|
280 // Returns the content of Translate Table Base Register 0. |
|
281 // To get physical address of the level 1 table, on some platforms this must be orred with 0xffff8000 (to get rid of table walk cache attributes) |
|
282 __NAKED__ TUint32 Arm::MmuTTBR0() |
|
283 { |
|
284 asm("mrc p15, 0, r0, c2, c0, 0 "); |
|
285 __JUMP(, lr); |
|
286 } |
|
287 #endif |
|
288 |
|
289 |
|
290 |
|
291 /** Get the current value of the system timestamp |
|
292 |
|
293 @publishedPartner |
|
294 @prototype |
|
295 */ |
|
296 EXPORT_C __NAKED__ TUint64 NKern::Timestamp() |
|
297 { |
|
298 asm("ldr r3, __TheScheduler "); |
|
299 asm("mrs r12, cpsr "); // r12 = saved interrupt mask |
|
300 asm("ldr r2, [r3, #%a0]" : : "i" _FOFF(TScheduler,i_LocalTimerAddr)); // r2 points to local timer |
|
301 __ASM_CLI(); // disable all interrupts |
|
302 GET_RWNO_TID(,r3); // r3 -> TSubScheduler |
|
303 asm("ldr r1, [r2, #%a0]" : : "i" _FOFF(ArmLocalTimer,iTimerCount)); // r1 = current timer counter |
|
304 asm("ldr r0, [r3, #%a0]" : : "i" _FOFF(TSubScheduler,i_LastTimerSet)); // r0 = last value written to timer counter |
|
305 asm("ldr r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimerMultI)); // r2 = scaling factor |
|
306 asm("sub r0, r0, r1 "); // elapsed timer ticks since last timestamp sync |
|
307 asm("umull r1, r2, r0, r2 "); // r2:r1 = elapsed ticks * scaling factor |
|
308 asm("ldr r0, [r3, #%a0]!" : : "i" _FOFF(TSubScheduler,iLastTimestamp64)); // r0 = last timestamp sync point, low word |
|
309 asm("ldr r3, [r3, #4] "); // r3 = last timestamp sync point, high word |
|
310 asm("adds r1, r1, #0x00800000 "); // add 2^23 (rounding) |
|
311 asm("adcs r2, r2, #0 "); |
|
312 asm("mov r1, r1, lsr #24 "); // divide by 2^24 |
|
313 asm("orr r1, r1, r2, lsl #8 "); // r1 = elapsed time since last timestamp sync |
|
314 asm("msr cpsr, r12 "); // restore interrupts |
|
315 asm("adds r0, r0, r1 "); // r1:r0 = last timestamp sync point + elapsed time since last timestamp sync |
|
316 asm("adcs r1, r3, #0 "); |
|
317 __JUMP(,lr); |
|
318 asm("__TheScheduler: "); |
|
319 asm(".word %a0" : : "i" ((TInt)&TheScheduler)); |
|
320 } |
|
321 |
|
322 |
|
323 extern "C" __NAKED__ TLinAddr get_sp_svc() |
|
324 { |
|
325 asm("mrs r1, cpsr "); |
|
326 __ASM_CLI_MODE(MODE_SVC); |
|
327 asm("mov r0, sp "); |
|
328 asm("msr cpsr, r1 "); |
|
329 __JUMP(, lr); |
|
330 } |
|
331 |
|
332 extern "C" __NAKED__ TLinAddr get_lr_svc() |
|
333 { |
|
334 asm("mrs r1, cpsr "); |
|
335 __ASM_CLI_MODE(MODE_SVC); |
|
336 asm("mov r0, lr "); |
|
337 asm("msr cpsr, r1 "); |
|
338 __JUMP(, lr); |
|
339 } |
|
340 |
|
341 |
|
342 /** Get the return address from an ISR |
|
343 |
|
344 Call only from an ISR |
|
345 |
|
346 @internalTechnology |
|
347 */ |
|
348 EXPORT_C __NAKED__ TLinAddr Arm::IrqReturnAddress() |
|
349 { |
|
350 asm("mrs r1, cpsr "); |
|
351 __ASM_CLI(); |
|
352 asm("and r0, r1, #0x1f "); |
|
353 asm("cmp r0, #0x11 "); // mode_fiq ? |
|
354 asm("beq 1f "); |
|
355 __ASM_CLI_MODE(MODE_SVC); |
|
356 asm("ldr r0, [sp, #%a0]" : : "i" _FOFF(SThreadExcStack,iR15)); |
|
357 asm("msr cpsr, r1 "); |
|
358 __JUMP(, lr); |
|
359 |
|
360 asm("1: "); |
|
361 GET_RWNO_TID(,r3); |
|
362 asm("ldr r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler,i_FiqStackTop)); // if so, r2->top of FIQ stack |
|
363 asm("ldr r0, [r2, #-4] "); // get return address |
|
364 asm("msr cpsr, r1 "); |
|
365 __JUMP(, lr); |
|
366 } |
|
367 |
|
368 #if defined(__INCLUDE_SPIN_LOCK_CHECKS__) |
|
369 #define __ASM_CALL(func) \ |
|
370 asm("str lr, [sp, #-4]! "); \ |
|
371 asm("bl " CSM_CFUNC(func)); \ |
|
372 asm("ldr lr, [sp], #4 "); |
|
373 |
|
374 #define SPIN_LOCK_ENTRY_CHECK() __ASM_CALL(spin_lock_entry_check) |
|
375 #define SPIN_LOCK_MARK_ACQ() __ASM_CALL(spin_lock_mark_acq) |
|
376 #define SPIN_UNLOCK_ENTRY_CHECK() __ASM_CALL(spin_unlock_entry_check) |
|
377 |
|
378 #define RWSPIN_RLOCK_ENTRY_CHECK() __ASM_CALL(rwspin_rlock_entry_check) |
|
379 #define RWSPIN_RLOCK_MARK_ACQ() __ASM_CALL(rwspin_rlock_mark_acq) |
|
380 #define RWSPIN_RUNLOCK_ENTRY_CHECK() __ASM_CALL(rwspin_runlock_entry_check) |
|
381 |
|
382 #define RWSPIN_WLOCK_ENTRY_CHECK() __ASM_CALL(rwspin_wlock_entry_check) |
|
383 #define RWSPIN_WLOCK_MARK_ACQ() __ASM_CALL(rwspin_wlock_mark_acq) |
|
384 #define RWSPIN_WUNLOCK_ENTRY_CHECK() __ASM_CALL(rwspin_wunlock_entry_check) |
|
385 |
|
386 #else |
|
387 #define SPIN_LOCK_ENTRY_CHECK() |
|
388 #define SPIN_LOCK_MARK_ACQ() |
|
389 #define SPIN_UNLOCK_ENTRY_CHECK() |
|
390 |
|
391 #define RWSPIN_RLOCK_ENTRY_CHECK() |
|
392 #define RWSPIN_RLOCK_MARK_ACQ() |
|
393 #define RWSPIN_RUNLOCK_ENTRY_CHECK() |
|
394 |
|
395 #define RWSPIN_WLOCK_ENTRY_CHECK() |
|
396 #define RWSPIN_WLOCK_MARK_ACQ() |
|
397 #define RWSPIN_WUNLOCK_ENTRY_CHECK() |
|
398 |
|
399 #endif |
|
400 |
|
401 |
|
402 /****************************************************************************** |
|
403 * Spin locks |
|
404 * |
|
405 * [this+0] in count (byte) |
|
406 * [this+1] out count (byte) |
|
407 * [this+6] order (byte) |
|
408 * [this+7] holding CPU (byte) |
|
409 ******************************************************************************/ |
|
410 |
|
411 #if defined(__INCLUDE_SPIN_LOCK_CHECKS__) |
|
412 extern "C" __NAKED__ void spin_lock_entry_check() |
|
413 { |
|
414 /* R0 points to lock */ |
|
415 asm("stmfd sp!, {r1,r2,r3,r12} "); |
|
416 asm("mrs r12, cpsr "); |
|
417 __ASM_CLI(); |
|
418 GET_RWNO_TID(, r1); /* R1->SubScheduler */ |
|
419 asm("cmp r1, #0 "); |
|
420 asm("beq slec_ok "); /* Skip checks if subscheduler not yet initialised */ |
|
421 asm("ldrh r2, [r0, #6] "); /* R2[8:15]=holding CPU, R2[0:7]=order */ |
|
422 asm("tst r2, #0xE0 "); |
|
423 asm("bne slec_preemption "); /* This lock requires preemption to be disabled */ |
|
424 |
|
425 /* check interrupts disabled, if interrupts/preemption is not disabled |
|
426 there is a risk of same core deadlock occuring, hence this check and |
|
427 run-time assert to ensure code stays safe */ |
|
428 asm("and r3, r12, #%a0" : : "i" ((TInt)KAllInterruptsMask)); |
|
429 asm("cmp r3, #%a0" : : "i" ((TInt)KAllInterruptsMask)); /* Check interrupts masked */ |
|
430 asm("beq slec_1 "); /* Yes - OK */ |
|
431 __ASM_CRASH(); /* No - die */ |
|
432 |
|
433 asm("slec_preemption: "); |
|
434 asm("and r3, r2, #0xFF "); |
|
435 asm("cmp r3, #0xFF "); /* check for EOrderNone */ |
|
436 asm("beq slec_1 "); /* EOrderNone - don't check interrupts or preemption */ |
|
437 asm("and r3, r12, #0x1F "); |
|
438 asm("cmp r3, #0x13 "); /* Make sure we're in mode_svc */ |
|
439 asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iKernLockCount)); |
|
440 asm("bne slec_preemption_die "); /* If not, die */ |
|
441 asm("cmp r3, #0 "); |
|
442 asm("bne slec_1 "); /* Preemption disabled - OK */ |
|
443 asm("slec_preemption_die: "); |
|
444 __ASM_CRASH(); /* Preemption enabled - die */ |
|
445 |
|
446 asm("slec_1: "); |
|
447 asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuNum)); |
|
448 asm("cmp r3, r2, lsr #8 "); /* Test if held by current CPU */ |
|
449 asm("bne slec_2 "); /* Not already held by this CPU - OK */ |
|
450 __ASM_CRASH(); /* Already held by this CPU - die */ |
|
451 |
|
452 asm("slec_2: "); |
|
453 asm("ldr r3, [r1, #%a0]!" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck)); |
|
454 asm("ldr r1, [r1, #4] "); /* r3=low word of iSpinLockOrderCheck, r1=high word */ |
|
455 asm("cmp r3, #0 "); |
|
456 asm("addeq r2, r2, #0x20000000 "); /* if low word zero, add 32 to LS1 index ... */ |
|
457 asm("moveq r3, r1 "); /* ... and r3=high word ... */ |
|
458 asm("subs r1, r3, #1 "); /* R1 = R3 with all bits up to and including LS1 flipped */ |
|
459 asm("beq slec_ok "); /* If all bits zero, no locks held so OK */ |
|
460 asm("eor r3, r3, r1 "); /* Clear all bits above LS1 */ |
|
461 CLZ(1,3); /* R1 = 31 - bit number of LS1 */ |
|
462 asm("rsb r1, r1, #31 "); /* R1 = bit number of LS1 */ |
|
463 asm("add r1, r1, r2, lsr #24 "); /* add 32 if we were looking at high word */ |
|
464 asm("mov r2, r2, lsl #24 "); /* this lock's order value into R2 high byte */ |
|
465 asm("cmp r1, r2, asr #24 "); /* compare current lowest order lock to sign-extended order value */ |
|
466 asm("bgt slec_ok "); /* if this lock's order < current lowest, OK */ |
|
467 __ASM_CRASH(); /* otherwise die */ |
|
468 |
|
469 asm("slec_ok: "); |
|
470 asm("msr cpsr, r12 "); /* restore interrupts */ |
|
471 asm("ldmfd sp!, {r1,r2,r3,r12} "); |
|
472 __JUMP(,lr); |
|
473 } |
|
474 |
|
475 extern "C" __NAKED__ void spin_lock_mark_acq() |
|
476 { |
|
477 /* R0 points to lock */ |
|
478 asm("stmfd sp!, {r1,r2,r3,r12} "); |
|
479 asm("mrs r12, cpsr "); |
|
480 __ASM_CLI(); |
|
481 GET_RWNO_TID(, r1); /* R1->SubScheduler */ |
|
482 asm("cmp r1, #0 "); |
|
483 asm("beq slma_ok "); /* Skip checks if subscheduler not yet initialised */ |
|
484 asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuNum)); |
|
485 asm("ldrb r2, [r0, #6] "); /* R2 = lock order value */ |
|
486 asm("add r1, r1, #%a0" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck)); |
|
487 asm("strb r3, [r0, #7] "); /* set byte 7 to holding CPU number */ |
|
488 asm("cmp r2, #0x40 "); |
|
489 asm("bhs slma_ok "); /* if EOrderNone, done */ |
|
490 asm("cmp r2, #0x20 "); |
|
491 asm("addhs r1, r1, #4 "); |
|
492 asm("and r2, r2, #0x1f "); |
|
493 asm("mov r3, #1 "); |
|
494 asm("mov r3, r3, lsl r2 "); /* r3 = bit to set */ |
|
495 asm("ldr r2, [r1] "); |
|
496 asm("orr r2, r2, r3 "); |
|
497 asm("str r2, [r1] "); /* set bit in iSpinLockOrderCheck corresponding to lock order */ |
|
498 |
|
499 asm("slma_ok: "); |
|
500 asm("msr cpsr, r12 "); /* restore interrupts */ |
|
501 asm("ldmfd sp!, {r1,r2,r3,r12} "); |
|
502 __JUMP(,lr); |
|
503 } |
|
504 |
|
505 extern "C" __NAKED__ void spin_unlock_entry_check() |
|
506 { |
|
507 /* R0 points to lock */ |
|
508 asm("stmfd sp!, {r1,r2,r3,r12} "); |
|
509 asm("mrs r12, cpsr "); |
|
510 __ASM_CLI(); |
|
511 GET_RWNO_TID(, r1); /* R1->SubScheduler */ |
|
512 asm("cmp r1, #0 "); |
|
513 asm("beq suec_ok "); /* Skip checks if subscheduler not yet initialised */ |
|
514 asm("ldrh r2, [r0, #6] "); /* R2[8:15]=holding CPU, R2[0:7]=order */ |
|
515 asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuNum)); |
|
516 asm("eor r2, r2, r3, lsl #8 "); /* R2[8:15]=holding CPU^current CPU, R2[0:7]=order */ |
|
517 asm("tst r2, #0xE0 "); |
|
518 asm("bne suec_preemption "); /* This lock requires preemption to be disabled */ |
|
519 |
|
520 /* check interrupts disabled */ |
|
521 asm("and r3, r12, #%a0" : : "i" ((TInt)KAllInterruptsMask)); |
|
522 asm("cmp r3, #%a0" : : "i" ((TInt)KAllInterruptsMask)); /* Check interrupts masked */ |
|
523 asm("beq suec_1 "); /* Yes - OK */ |
|
524 __ASM_CRASH(); /* No - die */ |
|
525 |
|
526 asm("suec_preemption: "); |
|
527 asm("and r3, r2, #0xFF "); |
|
528 asm("cmp r3, #0xFF "); /* check for EOrderNone */ |
|
529 asm("ldrne r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iKernLockCount)); |
|
530 asm("beq suec_1 "); /* EOrderNone - don't check interrupts or preemption */ |
|
531 asm("cmp r3, #0 "); |
|
532 asm("bne suec_1 "); /* Preemption disabled - OK */ |
|
533 __ASM_CRASH(); /* Preemption enabled - die */ |
|
534 |
|
535 asm("suec_1: "); |
|
536 asm("tst r2, #0xFF00 "); /* Check if holding CPU ^ current CPU number == 0 */ |
|
537 asm("beq suec_2 "); /* Held by this CPU - OK */ |
|
538 __ASM_CRASH(); /* Not held by this CPU - die */ |
|
539 |
|
540 asm("suec_2: "); |
|
541 asm("add r1, r1, #%a0" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck)); |
|
542 asm("mov r3, #0xFF "); |
|
543 asm("strb r3, [r0, #7] "); /* reset holding CPU */ |
|
544 asm("cmp r2, #0x40 "); |
|
545 asm("bhs suec_ok "); /* if EOrderNone, done */ |
|
546 asm("cmp r2, #0x20 "); |
|
547 asm("addhs r1, r1, #4 "); |
|
548 asm("and r2, r2, #0x1F "); |
|
549 asm("mov r3, #1 "); |
|
550 asm("mov r3, r3, lsl r2 "); /* r3 = bit to clear */ |
|
551 asm("ldr r2, [r1] "); |
|
552 asm("tst r2, r3 "); /* test bit originally set */ |
|
553 asm("bic r2, r2, r3 "); |
|
554 asm("str r2, [r1] "); /* clear bit in iSpinLockOrderCheck corresponding to lock order */ |
|
555 asm("bne suec_ok "); /* if originally set, OK */ |
|
556 __ASM_CRASH(); /* if not, die - something must have got corrupted */ |
|
557 |
|
558 asm("suec_ok: "); |
|
559 asm("msr cpsr, r12 "); /* restore interrupts */ |
|
560 asm("ldmfd sp!, {r1,r2,r3,r12} "); |
|
561 __JUMP(,lr); |
|
562 } |
|
563 #endif |
|
564 |
|
565 |
|
566 /****************************************************************************** |
|
567 * Plain old spin lock |
|
568 * |
|
569 * Fundamental algorithm: |
|
570 * lock() { old_in = in++; while(out!=old_in) __chill(); } |
|
571 * unlock() { ++out; } |
|
572 * |
|
573 * [this+0] out count (byte) |
|
574 * [this+1] in count (byte) |
|
575 * |
|
576 ******************************************************************************/ |
|
577 __NAKED__ EXPORT_C void TSpinLock::LockIrq() |
|
578 { |
|
579 __ASM_CLI(); /* Disable interrupts */ |
|
580 SPIN_LOCK_ENTRY_CHECK() |
|
581 asm("1: "); |
|
582 LDREXH(1,0); |
|
583 asm("mov r2, r1, lsr #8 "); /* R2 = original in count */ |
|
584 asm("add r1, r1, #0x100 "); |
|
585 STREXH(3,1,0); |
|
586 asm("cmp r3, #0 "); |
|
587 asm("bne 1b "); |
|
588 asm("and r1, r1, #0xFF "); /* R1 = out count */ |
|
589 asm("3: "); |
|
590 asm("cmp r2, r1 "); /* out = original in ? */ |
|
591 asm("bne 2f "); /* no - must wait */ |
|
592 SPIN_LOCK_MARK_ACQ() |
|
593 __DATA_MEMORY_BARRIER__(r3); /* we have got the lock */ |
|
594 __JUMP(,lr); |
|
595 |
|
596 asm("2: "); |
|
597 ARM_WFE; |
|
598 asm("ldrb r1, [r0, #0] "); /* read out count again */ |
|
599 asm("b 3b "); |
|
600 } |
|
601 |
|
602 __NAKED__ EXPORT_C void TSpinLock::UnlockIrq() |
|
603 { |
|
604 SPIN_UNLOCK_ENTRY_CHECK() |
|
605 __DATA_MEMORY_BARRIER_Z__(r1); /* Ensure accesses don't move outside locked section */ |
|
606 asm("ldrb r2, [r0, #0] "); |
|
607 asm("add r2, r2, #1 "); |
|
608 asm("strb r2, [r0, #0] "); /* ++out */ |
|
609 __DATA_SYNC_BARRIER__(r1); /* Ensure write to out completes before SEV */ |
|
610 ARM_SEV; /* Wake up any waiting processors */ |
|
611 __ASM_STI(); /* Enable interrupts */ |
|
612 __JUMP(,lr); |
|
613 } |
|
614 |
|
615 __NAKED__ EXPORT_C TBool TSpinLock::FlashIrq() |
|
616 { |
|
617 GET_RWNO_TID(,r12); /* r12 -> TSubScheduler */ |
|
618 asm("ldr r12, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,i_GicCpuIfcAddr)); |
|
619 asm("ldrh r1, [r0, #0] "); |
|
620 asm("ldr r3, [r12, #%a0]" : : "i" _FOFF(GicCpuIfc,iHighestPending)); |
|
621 asm("sub r1, r1, r1, lsr #8 "); /* r1 low byte = (out - in) mod 256 */ |
|
622 asm("and r1, r1, #0xFF "); |
|
623 asm("cmp r1, #0xFF "); /* if out - in = -1, no-one else waiting */ |
|
624 asm("addeq r3, r3, #1 "); |
|
625 asm("cmpeq r3, #1024 "); /* if no-one waiting for lock, check for pending interrupt */ |
|
626 asm("bne 1f "); /* branch if someone else waiting */ |
|
627 asm("mov r0, #0 "); /* else return FALSE */ |
|
628 __JUMP(,lr); |
|
629 |
|
630 asm("1: "); |
|
631 asm("str lr, [sp, #-4]! "); |
|
632 asm("bl " CSM_ZN9TSpinLock9UnlockIrqEv); |
|
633 asm("bl " CSM_ZN9TSpinLock7LockIrqEv); |
|
634 asm("mov r0, #1 "); |
|
635 asm("ldr pc, [sp], #4 "); |
|
636 } |
|
637 |
|
638 |
|
639 __NAKED__ EXPORT_C void TSpinLock::LockOnly() |
|
640 { |
|
641 SPIN_LOCK_ENTRY_CHECK() |
|
642 asm("1: "); |
|
643 LDREXH(1,0); |
|
644 asm("mov r2, r1, lsr #8 "); /* R2 = original in count */ |
|
645 asm("add r1, r1, #0x100 "); |
|
646 STREXH(3,1,0); |
|
647 asm("cmp r3, #0 "); |
|
648 asm("bne 1b "); |
|
649 asm("and r1, r1, #0xFF "); /* R1 = out count */ |
|
650 asm("3: "); |
|
651 asm("cmp r2, r1 "); /* out = original in ? */ |
|
652 asm("bne 2f "); /* no - must wait */ |
|
653 SPIN_LOCK_MARK_ACQ() |
|
654 __DATA_MEMORY_BARRIER__(r3); /* we have got the lock */ |
|
655 __JUMP(,lr); |
|
656 |
|
657 asm("2: "); |
|
658 ARM_WFE; |
|
659 asm("ldrb r1, [r0, #0] "); /* read out count again */ |
|
660 asm("b 3b "); |
|
661 } |
|
662 |
|
663 __NAKED__ EXPORT_C void TSpinLock::UnlockOnly() |
|
664 { |
|
665 SPIN_UNLOCK_ENTRY_CHECK() |
|
666 __DATA_MEMORY_BARRIER_Z__(r1); /* Ensure accesses don't move outside locked section */ |
|
667 asm("ldrb r2, [r0, #0] "); |
|
668 asm("add r2, r2, #1 "); |
|
669 asm("strb r2, [r0, #0] "); /* ++out */ |
|
670 __DATA_SYNC_BARRIER__(r1); /* Ensure write to out completes before SEV */ |
|
671 ARM_SEV; /* Wake up any waiting processors */ |
|
672 __JUMP(,lr); |
|
673 } |
|
674 |
|
675 __NAKED__ EXPORT_C TBool TSpinLock::FlashOnly() |
|
676 { |
|
677 asm("ldrh r1, [r0, #0] "); |
|
678 asm("sub r1, r1, r1, lsr #8 "); /* r1 low byte = (out - in) mod 256 */ |
|
679 asm("and r1, r1, #0xFF "); |
|
680 asm("cmp r1, #0xFF "); /* if out - in = -1, no-one else waiting */ |
|
681 asm("bne 1f "); /* branch if someone else waiting */ |
|
682 asm("mov r0, #0 "); /* else return FALSE */ |
|
683 __JUMP(,lr); |
|
684 |
|
685 asm("1: "); |
|
686 asm("str lr, [sp, #-4]! "); |
|
687 asm("bl " CSM_ZN9TSpinLock10UnlockOnlyEv); |
|
688 asm("bl " CSM_ZN9TSpinLock8LockOnlyEv); |
|
689 asm("mov r0, #1 "); |
|
690 asm("ldr pc, [sp], #4 "); |
|
691 } |
|
692 |
|
693 |
|
694 __NAKED__ EXPORT_C TInt TSpinLock::LockIrqSave() |
|
695 { |
|
696 asm("mrs r12, cpsr "); |
|
697 __ASM_CLI(); /* Disable interrupts */ |
|
698 SPIN_LOCK_ENTRY_CHECK() |
|
699 asm("1: "); |
|
700 LDREXH(1,0); |
|
701 asm("mov r2, r1, lsr #8 "); /* R2 = original in count */ |
|
702 asm("add r1, r1, #0x100 "); |
|
703 STREXH(3,1,0); |
|
704 asm("cmp r3, #0 "); |
|
705 asm("bne 1b "); |
|
706 asm("and r1, r1, #0xFF "); /* R1 = out count */ |
|
707 asm("3: "); |
|
708 asm("cmp r2, r1 "); /* out = original in ? */ |
|
709 asm("bne 2f "); /* no - must wait */ |
|
710 SPIN_LOCK_MARK_ACQ() |
|
711 __DATA_MEMORY_BARRIER__(r3); /* we have got the lock */ |
|
712 asm("and r0, r12, #%a0" : : "i" ((TInt)KAllInterruptsMask)); /* return original CPSR I and F bits */ |
|
713 __JUMP(,lr); |
|
714 |
|
715 asm("2: "); |
|
716 ARM_WFE; |
|
717 asm("ldrb r1, [r0, #0] "); /* read out count again */ |
|
718 asm("b 3b "); |
|
719 } |
|
720 |
|
721 __NAKED__ EXPORT_C void TSpinLock::UnlockIrqRestore(TInt) |
|
722 { |
|
723 SPIN_UNLOCK_ENTRY_CHECK() |
|
724 __DATA_MEMORY_BARRIER_Z__(r3); /* Ensure accesses don't move outside locked section */ |
|
725 asm("ldrb r2, [r0, #0] "); |
|
726 asm("mrs r12, cpsr "); |
|
727 asm("add r2, r2, #1 "); |
|
728 asm("bic r12, r12, #%a0" : : "i" ((TInt)KAllInterruptsMask)); |
|
729 asm("strb r2, [r0, #0] "); /* ++out */ |
|
730 __DATA_SYNC_BARRIER__(r3); /* Ensure write to out completes before SEV */ |
|
731 ARM_SEV; /* Wake up any waiting processors */ |
|
732 asm("orr r1, r1, r12 "); |
|
733 asm("msr cpsr, r1 "); /* restore interrupts */ |
|
734 __JUMP(,lr); |
|
735 } |
|
736 |
|
737 __NAKED__ EXPORT_C TBool TSpinLock::FlashIrqRestore(TInt) |
|
738 { |
|
739 GET_RWNO_TID(,r12); /* r12 -> TSubScheduler */ |
|
740 asm("ldr r12, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,i_GicCpuIfcAddr)); |
|
741 asm("ldrh r2, [r0, #0] "); |
|
742 asm("ldr r3, [r12, #%a0]" : : "i" _FOFF(GicCpuIfc,iHighestPending)); |
|
743 asm("sub r2, r2, r2, lsr #8 "); /* r2 low byte = (out - in) mod 256 */ |
|
744 asm("and r2, r2, #0xFF "); |
|
745 asm("cmp r2, #0xFF "); /* if out - in = -1, no-one else waiting */ |
|
746 asm("addeq r3, r3, #1 "); |
|
747 asm("cmpeq r3, #1024 "); /* if no-one waiting for lock, check for pending interrupt */ |
|
748 asm("bne 1f "); /* branch if someone else waiting */ |
|
749 asm("mov r0, #0 "); /* else return FALSE */ |
|
750 __JUMP(,lr); |
|
751 |
|
752 asm("1: "); |
|
753 asm("str lr, [sp, #-4]! "); |
|
754 asm("bl " CSM_ZN9TSpinLock16UnlockIrqRestoreEi); |
|
755 asm("bl " CSM_ZN9TSpinLock7LockIrqEv); |
|
756 asm("mov r0, #1 "); |
|
757 asm("ldr pc, [sp], #4 "); |
|
758 } |
|
759 |
|
760 |
|
761 __NAKED__ EXPORT_C TBool TSpinLock::FlashPreempt() |
|
762 { |
|
763 asm("ldrh r2, [r0, #0] "); |
|
764 GET_RWNO_TID(,r12); /* r12 -> TSubScheduler */ |
|
765 asm("ldr r3, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag)); |
|
766 asm("sub r2, r2, r2, lsr #8 "); /* r2 low byte = (out - in) mod 256 */ |
|
767 asm("and r2, r2, #0xFF "); |
|
768 asm("cmp r2, #0xFF "); /* if out - in = -1, no-one else waiting */ |
|
769 asm("cmpeq r3, #0 "); /* if no-one else waiting, check if reschedule or IDFCs pending */ |
|
770 asm("bne 1f "); /* if so or someone else waiting, branch to release lock */ |
|
771 asm("mov r0, #0 "); /* else return FALSE */ |
|
772 __JUMP(,lr); |
|
773 |
|
774 asm("1: "); |
|
775 asm("stmfd sp!, {r0,lr} "); |
|
776 asm("bl " CSM_ZN9TSpinLock10UnlockOnlyEv); |
|
777 asm("bl " CSM_ZN5NKern15PreemptionPointEv); |
|
778 asm("ldr r0, [sp], #4 "); |
|
779 asm("bl " CSM_ZN9TSpinLock8LockOnlyEv); |
|
780 asm("mov r0, #1 "); |
|
781 asm("ldr pc, [sp], #4 "); |
|
782 } |
|
783 |
|
784 |
|
785 /****************************************************************************** |
|
786 * Read/Write Spin lock |
|
787 * |
|
788 * Structure ( (in.r,in.w) , (out.r,out.w) ) |
|
789 * Fundamental algorithm: |
|
790 * lockr() { old_in = (in.r++,in.w); while(out.w!=old_in.w) __chill(); } |
|
791 * unlockr() { ++out.r; } |
|
792 * lockw() { old_in = (in.r,in.w++); while(out!=old_in) __chill(); } |
|
793 * unlockw() { ++out.w; } |
|
794 * |
|
795 * [this+0] in.w |
|
796 * [this+1] in.r |
|
797 * [this+2] out.w |
|
798 * [this+3] out.r |
|
799 * [this+4] Bit mask of CPUs which hold read locks |
|
800 * [this+6] order value |
|
801 * [this+7] CPU number which holds write lock, 0xFF if none |
|
802 * |
|
803 ******************************************************************************/ |
|
804 |
|
805 #if defined(__INCLUDE_SPIN_LOCK_CHECKS__) |
|
806 extern "C" __NAKED__ void rwspin_rlock_entry_check() |
|
807 { |
|
808 /* R0 points to lock */ |
|
809 asm("stmfd sp!, {r1,r2,r3,r12} "); |
|
810 asm("mrs r12, cpsr "); |
|
811 __ASM_CLI(); /* Disable interrupts */ |
|
812 GET_RWNO_TID(, r1); /* R1->SubScheduler */ |
|
813 asm("cmp r1, #0 "); |
|
814 asm("beq rwrlec_ok "); /* Skip checks if subscheduler not yet initialised */ |
|
815 asm("ldr r2, [r0, #4] "); /* R2[24:31]=wcpu, R2[16:23]=order, R2[0:7]=rcpu mask */ |
|
816 asm("tst r2, #0x00E00000 "); |
|
817 asm("bne rwrlec_preemption "); /* This lock requires preemption to be disabled */ |
|
818 |
|
819 /* check interrupts disabled */ |
|
820 asm("and r3, r12, #%a0" : : "i" ((TInt)KAllInterruptsMask)); |
|
821 asm("cmp r3, #%a0" : : "i" ((TInt)KAllInterruptsMask)); /* Check interrupts masked */ |
|
822 asm("beq rwrlec_1 "); /* Yes - OK */ |
|
823 __ASM_CRASH(); /* No - die */ |
|
824 |
|
825 asm("rwrlec_preemption: "); |
|
826 asm("and r3, r2, #0x00FF0000 "); |
|
827 asm("cmp r3, #0x00FF0000 "); /* check for EOrderNone */ |
|
828 asm("beq rwrlec_1 "); /* EOrderNone - don't check interrupts or preemption */ |
|
829 asm("and r3, r12, #0x1F "); |
|
830 asm("cmp r3, #0x13 "); /* Make sure we're in mode_svc */ |
|
831 asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iKernLockCount)); |
|
832 asm("bne rwrlec_preemption_die "); /* If not, die */ |
|
833 asm("cmp r3, #0 "); |
|
834 asm("bne rwrlec_1 "); /* Preemption disabled - OK */ |
|
835 asm("rwrlec_preemption_die: "); |
|
836 __ASM_CRASH(); /* Preemption enabled - die */ |
|
837 |
|
838 asm("rwrlec_1: "); |
|
839 asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuNum)); |
|
840 asm("eor r3, r2, r3, lsl #24 "); |
|
841 asm("cmp r3, #0x01000000 "); /* Held by current CPU for write ? */ |
|
842 asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuMask)); |
|
843 asm("bhs rwrlec_2 "); /* No - OK */ |
|
844 __ASM_CRASH(); /* Already held by this CPU for write - die */ |
|
845 |
|
846 asm("rwrlec_2: "); |
|
847 asm("tst r2, r3 "); /* Held by current CPU for read ? */ |
|
848 asm("beq rwrlec_3 "); /* No - OK */ |
|
849 __ASM_CRASH(); /* Already held by this CPU for read - die */ |
|
850 |
|
851 asm("rwrlec_3: "); |
|
852 asm("ldr r3, [r1, #%a0]!" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck)); |
|
853 asm("mov r2, r2, lsr #16 "); |
|
854 asm("and r2, r2, #0xFF "); /* r2 = lock order */ |
|
855 asm("ldr r1, [r1, #4] "); /* r3=low word of iSpinLockOrderCheck, r1=high word */ |
|
856 asm("cmp r3, #0 "); |
|
857 asm("addeq r2, r2, #0x20000000 "); /* if low word zero, add 32 to LS1 index ... */ |
|
858 asm("moveq r3, r1 "); /* ... and r3=high word ... */ |
|
859 asm("subs r1, r3, #1 "); /* R1 = R3 with all bits up to and including LS1 flipped */ |
|
860 asm("beq rwrlec_ok "); /* If all bits zero, no locks held so OK */ |
|
861 asm("eor r3, r3, r1 "); /* Clear all bits above LS1 */ |
|
862 CLZ(1,3); /* R1 = 31 - bit number of LS1 */ |
|
863 asm("rsb r1, r1, #31 "); /* R1 = bit number of LS1 */ |
|
864 asm("add r1, r1, r2, lsr #24 "); /* add 32 if we were looking at high word */ |
|
865 asm("mov r2, r2, lsl #24 "); /* this lock's order value into R2 high byte */ |
|
866 asm("cmp r1, r2, asr #24 "); /* compare current lowest order lock to sign-extended order value */ |
|
867 asm("bgt rwrlec_ok "); /* if this lock's order < current lowest, OK */ |
|
868 __ASM_CRASH(); /* otherwise die */ |
|
869 |
|
870 asm("rwrlec_ok: "); |
|
871 asm("msr cpsr, r12 "); /* restore interrupts */ |
|
872 asm("ldmfd sp!, {r1,r2,r3,r12} "); |
|
873 __JUMP(,lr); |
|
874 } |
|
875 |
|
876 extern "C" __NAKED__ void rwspin_rlock_mark_acq() |
|
877 { |
|
878 /* R0 points to lock */ |
|
879 asm("stmfd sp!, {r1-r4,r12} "); |
|
880 asm("mrs r12, cpsr "); |
|
881 __ASM_CLI(); /* Disable interrupts */ |
|
882 GET_RWNO_TID(, r1); /* R1->SubScheduler */ |
|
883 asm("cmp r1, #0 "); |
|
884 asm("beq rwrlma_ok "); /* Skip checks if subscheduler not yet initialised */ |
|
885 asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuMask)); |
|
886 asm("add r0, r0, #4 "); |
|
887 asm("1: "); |
|
888 LDREXB(2,0); /* rcpu mask */ |
|
889 asm("orr r2, r2, r3 "); /* set bit corresponding to current CPU */ |
|
890 STREXB(4,2,0); |
|
891 asm("cmp r4, #0 "); |
|
892 asm("bne 1b "); |
|
893 asm("ldrb r2, [r0, #2] "); /* R2 = lock order value */ |
|
894 asm("sub r0, r0, #4 "); |
|
895 asm("add r1, r1, #%a0" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck)); |
|
896 asm("cmp r2, #0x40 "); |
|
897 asm("bhs rwrlma_ok "); /* if EOrderNone, done */ |
|
898 asm("cmp r2, #0x20 "); |
|
899 asm("addhs r1, r1, #4 "); |
|
900 asm("and r2, r2, #0x1f "); |
|
901 asm("mov r3, #1 "); |
|
902 asm("mov r3, r3, lsl r2 "); /* r3 = bit to set */ |
|
903 asm("ldr r2, [r1] "); |
|
904 asm("orr r2, r2, r3 "); |
|
905 asm("str r2, [r1] "); /* set bit in iSpinLockOrderCheck corresponding to lock order */ |
|
906 |
|
907 asm("rwrlma_ok: "); |
|
908 asm("msr cpsr, r12 "); /* restore interrupts */ |
|
909 asm("ldmfd sp!, {r1-r4,r12} "); |
|
910 __JUMP(,lr); |
|
911 } |
|
912 |
|
913 extern "C" __NAKED__ void rwspin_runlock_entry_check() |
|
914 { |
|
915 /* R0 points to lock */ |
|
916 asm("stmfd sp!, {r1-r4,r12} "); |
|
917 asm("mrs r12, cpsr "); |
|
918 __ASM_CLI(); /* Disable interrupts */ |
|
919 GET_RWNO_TID(, r1); /* R1->SubScheduler */ |
|
920 asm("cmp r1, #0 "); |
|
921 asm("beq rwruec_ok "); /* Skip checks if subscheduler not yet initialised */ |
|
922 asm("ldr r2, [r0, #4] "); /* R2[24:31]=wcpu, R2[16:23]=order, R2[0:7]=rcpu mask */ |
|
923 asm("tst r2, #0x00E00000 "); |
|
924 asm("bne rwruec_preemption "); /* This lock requires preemption to be disabled */ |
|
925 |
|
926 /* check interrupts disabled */ |
|
927 asm("and r3, r12, #%a0" : : "i" ((TInt)KAllInterruptsMask)); |
|
928 asm("cmp r3, #%a0" : : "i" ((TInt)KAllInterruptsMask)); /* Check interrupts masked */ |
|
929 asm("beq rwruec_1 "); /* Yes - OK */ |
|
930 __ASM_CRASH(); /* No - die */ |
|
931 |
|
932 asm("rwruec_preemption: "); |
|
933 asm("and r3, r2, #0x00FF0000 "); |
|
934 asm("cmp r3, #0x00FF0000 "); /* check for EOrderNone */ |
|
935 asm("ldrne r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iKernLockCount)); |
|
936 asm("beq rwruec_1 "); /* EOrderNone - don't check interrupts or preemption */ |
|
937 asm("cmp r3, #0 "); |
|
938 asm("bne rwruec_1 "); /* Preemption disabled - OK */ |
|
939 __ASM_CRASH(); /* Preemption enabled - die */ |
|
940 |
|
941 asm("rwruec_1: "); |
|
942 asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuMask)); |
|
943 asm("tst r2, r3 "); /* Check if current CPU holds read lock */ |
|
944 asm("bne rwruec_2 "); /* Read lock held by this CPU - OK */ |
|
945 __ASM_CRASH(); /* Not held by this CPU - die */ |
|
946 |
|
947 asm("rwruec_2: "); |
|
948 asm("add r0, r0, #4 "); |
|
949 asm("1: "); |
|
950 LDREX(2,0); /* rcpu mask */ |
|
951 asm("bic r2, r2, r3 "); /* clear bit corresponding to current CPU */ |
|
952 STREX(4,2,0); |
|
953 asm("cmp r4, #0 "); |
|
954 asm("bne 1b "); |
|
955 asm("sub r0, r0, #4 "); |
|
956 asm("add r1, r1, #%a0" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck)); |
|
957 asm("tst r2, #0x00C00000 "); |
|
958 asm("bne rwruec_ok "); /* if EOrderNone, done */ |
|
959 asm("tst r2, #0x00200000 "); |
|
960 asm("addne r1, r1, #4 "); |
|
961 asm("mov r2, r2, lsr #16 "); |
|
962 asm("and r2, r2, #0x1F "); |
|
963 asm("mov r3, #1 "); |
|
964 asm("mov r3, r3, lsl r2 "); /* r3 = bit to clear */ |
|
965 asm("ldr r2, [r1] "); |
|
966 asm("tst r2, r3 "); /* test bit originally set */ |
|
967 asm("bic r2, r2, r3 "); |
|
968 asm("str r2, [r1] "); /* clear bit in iSpinLockOrderCheck corresponding to lock order */ |
|
969 asm("bne rwruec_ok "); /* if originally set, OK */ |
|
970 __ASM_CRASH(); /* if not, die - something must have got corrupted */ |
|
971 |
|
972 asm("rwruec_ok: "); |
|
973 asm("msr cpsr, r12 "); /* restore interrupts */ |
|
974 asm("ldmfd sp!, {r1-r4,r12} "); |
|
975 __JUMP(,lr); |
|
976 } |
|
977 |
|
978 |
|
979 extern "C" __NAKED__ void rwspin_wlock_entry_check() |
|
980 { |
|
981 /* R0 points to lock */ |
|
982 asm("stmfd sp!, {r1,r2,r3,r12} "); |
|
983 asm("mrs r12, cpsr "); |
|
984 __ASM_CLI(); /* Disable interrupts */ |
|
985 GET_RWNO_TID(, r1); /* R1->SubScheduler */ |
|
986 asm("cmp r1, #0 "); |
|
987 asm("beq rwwlec_ok "); /* Skip checks if subscheduler not yet initialised */ |
|
988 asm("ldr r2, [r0, #4] "); /* R2[24:31]=wcpu, R2[16:23]=order, R2[0:7]=rcpu mask */ |
|
989 asm("tst r2, #0x00E00000 "); |
|
990 asm("bne rwwlec_preemption "); /* This lock requires preemption to be disabled */ |
|
991 |
|
992 /* check interrupts disabled */ |
|
993 asm("and r3, r12, #%a0" : : "i" ((TInt)KAllInterruptsMask)); |
|
994 asm("cmp r3, #%a0" : : "i" ((TInt)KAllInterruptsMask)); /* Check interrupts masked */ |
|
995 asm("beq rwwlec_1 "); /* Yes - OK */ |
|
996 __ASM_CRASH(); /* No - die */ |
|
997 |
|
998 asm("rwwlec_preemption: "); |
|
999 asm("and r3, r2, #0x00FF0000 "); |
|
1000 asm("cmp r3, #0x00FF0000 "); /* check for EOrderNone */ |
|
1001 asm("beq rwwlec_1 "); /* EOrderNone - don't check interrupts or preemption */ |
|
1002 asm("and r3, r12, #0x1F "); |
|
1003 asm("cmp r3, #0x13 "); /* Make sure we're in mode_svc */ |
|
1004 asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iKernLockCount)); |
|
1005 asm("bne rwwlec_preemption_die "); /* If not, die */ |
|
1006 asm("cmp r3, #0 "); |
|
1007 asm("bne rwwlec_1 "); /* Preemption disabled - OK */ |
|
1008 asm("rwwlec_preemption_die: "); |
|
1009 __ASM_CRASH(); /* Preemption enabled - die */ |
|
1010 |
|
1011 asm("rwwlec_1: "); |
|
1012 asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuMask)); |
|
1013 asm("tst r2, r3 "); /* Test if held by current CPU for read */ |
|
1014 asm("beq rwwlec_2 "); /* No - OK */ |
|
1015 __ASM_CRASH(); /* Yes - die */ |
|
1016 |
|
1017 asm("rwwlec_2: "); |
|
1018 asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuNum)); |
|
1019 asm("cmp r3, r2, lsr #24 "); /* Test if held by current CPU for write */ |
|
1020 asm("bne rwwlec_3 "); /* No - OK */ |
|
1021 __ASM_CRASH(); /* Yes - die */ |
|
1022 |
|
1023 asm("rwwlec_3: "); |
|
1024 asm("ldr r3, [r1, #%a0]!" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck)); |
|
1025 asm("mov r2, r2, lsr #16 "); |
|
1026 asm("and r2, r2, #0xFF "); /* r2 = lock order */ |
|
1027 asm("ldr r1, [r1, #4] "); /* r3=low word of iSpinLockOrderCheck, r1=high word */ |
|
1028 asm("cmp r3, #0 "); |
|
1029 asm("addeq r2, r2, #0x20000000 "); /* if low word zero, add 32 to LS1 index ... */ |
|
1030 asm("moveq r3, r1 "); /* ... and r3=high word ... */ |
|
1031 asm("subs r1, r3, #1 "); /* R1 = R3 with all bits up to and including LS1 flipped */ |
|
1032 asm("beq rwwlec_ok "); /* If all bits zero, no locks held so OK */ |
|
1033 asm("eor r3, r3, r1 "); /* Clear all bits above LS1 */ |
|
1034 CLZ(1,3); /* R1 = 31 - bit number of LS1 */ |
|
1035 asm("rsb r1, r1, #31 "); /* R1 = bit number of LS1 */ |
|
1036 asm("add r1, r1, r2, lsr #24 "); /* add 32 if we were looking at high word */ |
|
1037 asm("mov r2, r2, lsl #24 "); /* this lock's order value into R2 high byte */ |
|
1038 asm("cmp r1, r2, asr #24 "); /* compare current lowest order lock to sign-extended order value */ |
|
1039 asm("bgt rwwlec_ok "); /* if this lock's order < current lowest, OK */ |
|
1040 __ASM_CRASH(); /* otherwise die */ |
|
1041 |
|
1042 asm("rwwlec_ok: "); |
|
1043 asm("msr cpsr, r12 "); /* restore interrupts */ |
|
1044 asm("ldmfd sp!, {r1,r2,r3,r12} "); |
|
1045 __JUMP(,lr); |
|
1046 } |
|
1047 |
|
1048 extern "C" __NAKED__ void rwspin_wlock_mark_acq() |
|
1049 { |
|
1050 /* R0 points to lock */ |
|
1051 asm("stmfd sp!, {r1,r2,r3,r12} "); |
|
1052 asm("mrs r12, cpsr "); |
|
1053 __ASM_CLI(); /* Disable interrupts */ |
|
1054 GET_RWNO_TID(, r1); /* R1->SubScheduler */ |
|
1055 asm("cmp r1, #0 "); |
|
1056 asm("beq rwwlma_ok "); /* Skip checks if subscheduler not yet initialised */ |
|
1057 asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuNum)); |
|
1058 asm("ldrb r2, [r0, #6] "); /* R2 = lock order value */ |
|
1059 asm("add r1, r1, #%a0" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck)); |
|
1060 asm("strb r3, [r0, #7] "); /* set byte 7 to holding CPU number */ |
|
1061 asm("cmp r2, #0x40 "); |
|
1062 asm("bhs rwwlma_ok "); /* if EOrderNone, done */ |
|
1063 asm("cmp r2, #0x20 "); |
|
1064 asm("addhs r1, r1, #4 "); |
|
1065 asm("and r2, r2, #0x1f "); |
|
1066 asm("mov r3, #1 "); |
|
1067 asm("mov r3, r3, lsl r2 "); /* r3 = bit to set */ |
|
1068 asm("ldr r2, [r1] "); |
|
1069 asm("orr r2, r2, r3 "); |
|
1070 asm("str r2, [r1] "); /* set bit in iSpinLockOrderCheck corresponding to lock order */ |
|
1071 |
|
1072 asm("rwwlma_ok: "); |
|
1073 asm("msr cpsr, r12 "); /* restore interrupts */ |
|
1074 asm("ldmfd sp!, {r1,r2,r3,r12} "); |
|
1075 __JUMP(,lr); |
|
1076 } |
|
1077 |
|
1078 extern "C" __NAKED__ void rwspin_wunlock_entry_check() |
|
1079 { |
|
1080 /* R0 points to lock */ |
|
1081 asm("stmfd sp!, {r1,r2,r3,r12} "); |
|
1082 asm("mrs r12, cpsr "); |
|
1083 __ASM_CLI(); /* Disable interrupts */ |
|
1084 GET_RWNO_TID(, r1); /* R1->SubScheduler */ |
|
1085 asm("cmp r1, #0 "); |
|
1086 asm("beq rwwuec_ok "); /* Skip checks if subscheduler not yet initialised */ |
|
1087 asm("ldrh r2, [r0, #6] "); /* R2[8:15]=holding CPU, R2[0:7]=order */ |
|
1088 asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuNum)); |
|
1089 asm("eor r2, r2, r3, lsl #8 "); /* R2[8:15]=holding CPU^current CPU, R2[0:7]=order */ |
|
1090 asm("tst r2, #0xE0 "); |
|
1091 asm("bne rwwuec_preemption "); /* This lock requires preemption to be disabled */ |
|
1092 |
|
1093 /* check interrupts disabled */ |
|
1094 asm("and r3, r12, #%a0" : : "i" ((TInt)KAllInterruptsMask)); |
|
1095 asm("cmp r3, #%a0" : : "i" ((TInt)KAllInterruptsMask)); /* Check interrupts masked */ |
|
1096 asm("beq rwwuec_1 "); /* Yes - OK */ |
|
1097 __ASM_CRASH(); /* No - die */ |
|
1098 |
|
1099 asm("rwwuec_preemption: "); |
|
1100 asm("and r3, r2, #0xFF "); |
|
1101 asm("cmp r3, #0xFF "); /* check for EOrderNone */ |
|
1102 asm("ldrne r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iKernLockCount)); |
|
1103 asm("beq rwwuec_1 "); /* EOrderNone - don't check interrupts or preemption */ |
|
1104 asm("cmp r3, #0 "); |
|
1105 asm("bne rwwuec_1 "); /* Preemption disabled - OK */ |
|
1106 __ASM_CRASH(); /* Preemption enabled - die */ |
|
1107 |
|
1108 asm("rwwuec_1: "); |
|
1109 asm("tst r2, #0xFF00 "); /* Check if holding CPU ^ current CPU number == 0 */ |
|
1110 asm("beq rwwuec_2 "); /* Held by this CPU - OK */ |
|
1111 __ASM_CRASH(); /* Not held by this CPU - die */ |
|
1112 |
|
1113 asm("rwwuec_2: "); |
|
1114 asm("add r1, r1, #%a0" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck)); |
|
1115 asm("mov r3, #0xFF "); |
|
1116 asm("strb r3, [r0, #7] "); /* reset holding CPU */ |
|
1117 asm("cmp r2, #0x40 "); |
|
1118 asm("bhs rwwuec_ok "); /* if EOrderNone, done */ |
|
1119 asm("cmp r2, #0x20 "); |
|
1120 asm("addhs r1, r1, #4 "); |
|
1121 asm("and r2, r2, #0x1F "); |
|
1122 asm("mov r3, #1 "); |
|
1123 asm("mov r3, r3, lsl r2 "); /* r3 = bit to clear */ |
|
1124 asm("ldr r2, [r1] "); |
|
1125 asm("tst r2, r3 "); /* test bit originally set */ |
|
1126 asm("bic r2, r2, r3 "); |
|
1127 asm("str r2, [r1] "); /* clear bit in iSpinLockOrderCheck corresponding to lock order */ |
|
1128 asm("bne rwwuec_ok "); /* if originally set, OK */ |
|
1129 __ASM_CRASH(); /* if not, die - something must have got corrupted */ |
|
1130 |
|
1131 asm("rwwuec_ok: "); |
|
1132 asm("msr cpsr, r12 "); /* restore interrupts */ |
|
1133 asm("ldmfd sp!, {r1,r2,r3,r12} "); |
|
1134 __JUMP(,lr); |
|
1135 } |
|
1136 #endif |
|
1137 |
|
1138 |
|
1139 /*----------------------------------------------------------------------------- |
|
1140 - Read locks disabling IRQ |
|
1141 -----------------------------------------------------------------------------*/ |
|
1142 __NAKED__ EXPORT_C void TRWSpinLock::LockIrqR() |
|
1143 { |
|
1144 __ASM_CLI(); /* Disable interrupts */ |
|
1145 RWSPIN_RLOCK_ENTRY_CHECK() |
|
1146 asm("1: "); |
|
1147 LDREX(1,0); |
|
1148 asm("and r2, r1, #0xFF "); /* R2 = original in.w */ |
|
1149 asm("add r1, r1, #0x100 "); /* increment in.r */ |
|
1150 asm("tst r1, #0xFF00 "); /* if wraparound ... */ |
|
1151 asm("subeq r1, r1, #0x10000 "); /* ... revert carry into out.w */ |
|
1152 STREX(3,1,0); |
|
1153 asm("cmp r3, #0 "); |
|
1154 asm("bne 1b "); |
|
1155 asm("3: "); |
|
1156 asm("and r1, r1, #0xFF0000 "); /* R1 = out.w << 16 */ |
|
1157 asm("cmp r1, r2, lsl #16 "); /* out.w = original in.w ? */ |
|
1158 asm("bne 2f "); /* no - must wait */ |
|
1159 RWSPIN_RLOCK_MARK_ACQ() |
|
1160 __DATA_MEMORY_BARRIER__(r3); /* we have got the lock */ |
|
1161 __JUMP(,lr); |
|
1162 |
|
1163 asm("2: "); |
|
1164 ARM_WFE; |
|
1165 asm("ldr r1, [r0, #0] "); /* read out.w count again */ |
|
1166 asm("b 3b "); |
|
1167 } |
|
1168 |
|
1169 __NAKED__ EXPORT_C void TRWSpinLock::UnlockIrqR() |
|
1170 { |
|
1171 RWSPIN_RUNLOCK_ENTRY_CHECK() |
|
1172 __DATA_MEMORY_BARRIER_Z__(r3); /* Ensure accesses don't move outside locked section */ |
|
1173 asm("1: "); |
|
1174 LDREX(2,0); |
|
1175 asm("add r2, r2, #0x01000000 "); /* increment out.r */ |
|
1176 STREX(3,2,0); |
|
1177 asm("cmp r3, #0 "); |
|
1178 asm("bne 1b "); |
|
1179 __DATA_SYNC_BARRIER__(r3); /* Ensure write to out.r completes before SEV */ |
|
1180 ARM_SEV; /* Wake up any waiting processors */ |
|
1181 __ASM_STI(); /* Enable interrupts */ |
|
1182 __JUMP(,lr); |
|
1183 } |
|
1184 |
|
1185 __NAKED__ EXPORT_C TBool TRWSpinLock::FlashIrqR() |
|
1186 { |
|
1187 GET_RWNO_TID(,r12); /* r12 -> TSubScheduler */ |
|
1188 asm("ldr r12, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,i_GicCpuIfcAddr)); |
|
1189 asm("ldr r2, [r0, #0] "); |
|
1190 asm("ldr r3, [r12, #%a0]" : : "i" _FOFF(GicCpuIfc,iHighestPending)); |
|
1191 asm("eor r2, r2, r2, lsr #16 "); /* r2 low byte = out.w ^ in.w = 0 if no writers waiting */ |
|
1192 asm("tst r2, #0xFF "); |
|
1193 asm("addeq r3, r3, #1 "); |
|
1194 asm("cmpeq r3, #1024 "); /* if no writers waiting for lock, check for pending interrupt */ |
|
1195 asm("bne 1f "); /* branch if writers waiting or pending interrupt */ |
|
1196 asm("mov r0, #0 "); /* else return FALSE */ |
|
1197 __JUMP(,lr); |
|
1198 |
|
1199 asm("1: "); |
|
1200 asm("str lr, [sp, #-4]! "); |
|
1201 asm("bl " CSM_ZN11TRWSpinLock10UnlockIrqREv); |
|
1202 asm("bl " CSM_ZN11TRWSpinLock8LockIrqREv); |
|
1203 asm("mov r0, #1 "); |
|
1204 asm("ldr pc, [sp], #4 "); |
|
1205 } |
|
1206 |
|
1207 |
|
1208 /*----------------------------------------------------------------------------- |
|
1209 - Write locks disabling IRQ |
|
1210 -----------------------------------------------------------------------------*/ |
|
1211 __NAKED__ EXPORT_C void TRWSpinLock::LockIrqW() |
|
1212 { |
|
1213 __ASM_CLI(); /* Disable interrupts */ |
|
1214 RWSPIN_WLOCK_ENTRY_CHECK() |
|
1215 asm("1: "); |
|
1216 LDREX(1,0); |
|
1217 asm("mov r2, r1, lsl #16 "); /* R2 = original in << 16 */ |
|
1218 asm("add r1, r1, #1 "); /* increment in.w */ |
|
1219 asm("tst r1, #0xFF "); /* if wraparound ... */ |
|
1220 asm("subeq r1, r1, #0x100 "); /* ... revert carry into in.r */ |
|
1221 STREX(3,1,0); |
|
1222 asm("cmp r3, #0 "); |
|
1223 asm("bne 1b "); |
|
1224 asm("3: "); |
|
1225 asm("mov r1, r1, lsr #16 "); /* r1 = out */ |
|
1226 asm("cmp r1, r2, lsr #16 "); /* out = original in ? */ |
|
1227 asm("bne 2f "); /* no - must wait */ |
|
1228 RWSPIN_WLOCK_MARK_ACQ() |
|
1229 __DATA_MEMORY_BARRIER__(r3); /* we have got the lock */ |
|
1230 __JUMP(,lr); |
|
1231 |
|
1232 asm("2: "); |
|
1233 ARM_WFE; |
|
1234 asm("ldr r1, [r0, #0] "); /* read out count again */ |
|
1235 asm("b 3b "); |
|
1236 } |
|
1237 |
|
1238 __NAKED__ EXPORT_C void TRWSpinLock::UnlockIrqW() |
|
1239 { |
|
1240 RWSPIN_WUNLOCK_ENTRY_CHECK() |
|
1241 __DATA_MEMORY_BARRIER_Z__(r3); /* Ensure accesses don't move outside locked section */ |
|
1242 asm("ldrb r2, [r0, #2] "); |
|
1243 asm("add r2, r2, #1 "); |
|
1244 asm("strb r2, [r0, #2] "); /* increment out.w */ |
|
1245 __DATA_SYNC_BARRIER__(r3); /* Ensure write to out.w completes before SEV */ |
|
1246 ARM_SEV; /* Wake up any waiting processors */ |
|
1247 __ASM_STI(); /* Enable interrupts */ |
|
1248 __JUMP(,lr); |
|
1249 } |
|
1250 |
|
1251 __NAKED__ EXPORT_C TBool TRWSpinLock::FlashIrqW() |
|
1252 { |
|
1253 GET_RWNO_TID(,r12); /* r12 -> TSubScheduler */ |
|
1254 asm("ldr r12, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,i_GicCpuIfcAddr)); |
|
1255 asm("ldr r2, [r0, #0] "); |
|
1256 asm("ldr r3, [r12, #%a0]" : : "i" _FOFF(GicCpuIfc,iHighestPending)); |
|
1257 asm("add r2, r2, #0x00010000 "); /* increment out.w */ |
|
1258 asm("tst r2, #0x00FF0000 "); /* if wraparound, revert carry */ |
|
1259 asm("subeq r2, r2, #0x01000000 "); |
|
1260 asm("eor r2, r2, r2, lsl #16 "); /* test if (out.w+1,out.r) == (in.w,in.r) */ |
|
1261 asm("cmp r2, #0x00010000 "); |
|
1262 asm("bhs 1f "); /* if not, someone else is waiting */ |
|
1263 asm("add r3, r3, #1 "); |
|
1264 asm("cmp r3, #1024 "); /* if no-one waiting for lock, check for pending interrupt */ |
|
1265 asm("bne 1f "); /* branch if pending interrupt */ |
|
1266 asm("mov r0, #0 "); /* else return FALSE */ |
|
1267 __JUMP(,lr); |
|
1268 |
|
1269 asm("1: "); |
|
1270 asm("str lr, [sp, #-4]! "); |
|
1271 asm("bl " CSM_ZN11TRWSpinLock10UnlockIrqWEv); |
|
1272 asm("bl " CSM_ZN11TRWSpinLock8LockIrqWEv); |
|
1273 asm("mov r0, #1 "); |
|
1274 asm("ldr pc, [sp], #4 "); |
|
1275 } |
|
1276 |
|
1277 |
|
1278 |
|
1279 /*----------------------------------------------------------------------------- |
|
1280 - Read locks leaving IRQ alone |
|
1281 -----------------------------------------------------------------------------*/ |
|
1282 __NAKED__ EXPORT_C void TRWSpinLock::LockOnlyR() |
|
1283 { |
|
1284 RWSPIN_RLOCK_ENTRY_CHECK() |
|
1285 asm("1: "); |
|
1286 LDREX(1,0); |
|
1287 asm("and r2, r1, #0xFF "); /* R2 = original in.w */ |
|
1288 asm("add r1, r1, #0x100 "); /* increment in.r */ |
|
1289 asm("tst r1, #0xFF00 "); /* if wraparound ... */ |
|
1290 asm("subeq r1, r1, #0x10000 "); /* ... revert carry into out.w */ |
|
1291 STREX(3,1,0); |
|
1292 asm("cmp r3, #0 "); |
|
1293 asm("bne 1b "); |
|
1294 asm("3: "); |
|
1295 asm("and r1, r1, #0xFF0000 "); /* R1 = out.w << 16 */ |
|
1296 asm("cmp r1, r2, lsl #16 "); /* out.w = original in.w ? */ |
|
1297 asm("bne 2f "); /* no - must wait */ |
|
1298 RWSPIN_RLOCK_MARK_ACQ() |
|
1299 __DATA_MEMORY_BARRIER__(r3); /* we have got the lock */ |
|
1300 __JUMP(,lr); |
|
1301 |
|
1302 asm("2: "); |
|
1303 ARM_WFE; |
|
1304 asm("ldr r1, [r0, #0] "); /* read out.w count again */ |
|
1305 asm("b 3b "); |
|
1306 } |
|
1307 |
|
1308 __NAKED__ EXPORT_C void TRWSpinLock::UnlockOnlyR() |
|
1309 { |
|
1310 RWSPIN_RUNLOCK_ENTRY_CHECK() |
|
1311 __DATA_MEMORY_BARRIER_Z__(r3); /* Ensure accesses don't move outside locked section */ |
|
1312 asm("1: "); |
|
1313 LDREX(2,0); |
|
1314 asm("add r2, r2, #0x01000000 "); /* increment out.r */ |
|
1315 STREX(3,2,0); |
|
1316 asm("cmp r3, #0 "); |
|
1317 asm("bne 1b "); |
|
1318 __DATA_SYNC_BARRIER__(r3); /* Ensure write to out.r completes before SEV */ |
|
1319 ARM_SEV; /* Wake up any waiting processors */ |
|
1320 __JUMP(,lr); |
|
1321 } |
|
1322 |
|
1323 __NAKED__ EXPORT_C TBool TRWSpinLock::FlashOnlyR() |
|
1324 { |
|
1325 asm("ldr r2, [r0, #0] "); |
|
1326 asm("eor r2, r2, r2, lsr #16 "); /* r2 low byte = out.w ^ in.w = 0 if no writers waiting */ |
|
1327 asm("tst r2, #0xFF "); |
|
1328 asm("bne 1f "); /* branch if writers waiting */ |
|
1329 asm("mov r0, #0 "); /* else return FALSE */ |
|
1330 __JUMP(,lr); |
|
1331 |
|
1332 asm("1: "); |
|
1333 asm("str lr, [sp, #-4]! "); |
|
1334 asm("bl " CSM_ZN11TRWSpinLock11UnlockOnlyREv); |
|
1335 asm("bl " CSM_ZN11TRWSpinLock9LockOnlyREv); |
|
1336 asm("mov r0, #1 "); |
|
1337 asm("ldr pc, [sp], #4 "); |
|
1338 } |
|
1339 |
|
1340 |
|
1341 /*----------------------------------------------------------------------------- |
|
1342 - Write locks leaving IRQ alone |
|
1343 -----------------------------------------------------------------------------*/ |
|
1344 __NAKED__ EXPORT_C void TRWSpinLock::LockOnlyW() |
|
1345 { |
|
1346 RWSPIN_WLOCK_ENTRY_CHECK() |
|
1347 asm("1: "); |
|
1348 LDREX(1,0); |
|
1349 asm("mov r2, r1, lsl #16 "); /* R2 = original in << 16 */ |
|
1350 asm("add r1, r1, #1 "); /* increment in.w */ |
|
1351 asm("tst r1, #0xFF "); /* if wraparound ... */ |
|
1352 asm("subeq r1, r1, #0x100 "); /* ... revert carry into in.r */ |
|
1353 STREX(3,1,0); |
|
1354 asm("cmp r3, #0 "); |
|
1355 asm("bne 1b "); |
|
1356 asm("3: "); |
|
1357 asm("mov r1, r1, lsr #16 "); /* r1 = out */ |
|
1358 asm("cmp r1, r2, lsr #16 "); /* out = original in ? */ |
|
1359 asm("bne 2f "); /* no - must wait */ |
|
1360 RWSPIN_WLOCK_MARK_ACQ() |
|
1361 __DATA_MEMORY_BARRIER__(r3); /* we have got the lock */ |
|
1362 __JUMP(,lr); |
|
1363 |
|
1364 asm("2: "); |
|
1365 ARM_WFE; |
|
1366 asm("ldr r1, [r0, #0] "); /* read out count again */ |
|
1367 asm("b 3b "); |
|
1368 } |
|
1369 |
|
1370 __NAKED__ EXPORT_C void TRWSpinLock::UnlockOnlyW() |
|
1371 { |
|
1372 RWSPIN_WUNLOCK_ENTRY_CHECK() |
|
1373 __DATA_MEMORY_BARRIER_Z__(r3); /* Ensure accesses don't move outside locked section */ |
|
1374 asm("ldrb r2, [r0, #2] "); |
|
1375 asm("add r2, r2, #1 "); |
|
1376 asm("strb r2, [r0, #2] "); /* increment out.w */ |
|
1377 __DATA_SYNC_BARRIER__(r3); /* Ensure write to out.w completes before SEV */ |
|
1378 ARM_SEV; /* Wake up any waiting processors */ |
|
1379 __JUMP(,lr); |
|
1380 } |
|
1381 |
|
1382 __NAKED__ EXPORT_C TBool TRWSpinLock::FlashOnlyW() |
|
1383 { |
|
1384 asm("ldr r2, [r0, #0] "); |
|
1385 asm("add r2, r2, #0x00010000 "); /* increment out.w */ |
|
1386 asm("tst r2, #0x00FF0000 "); /* if wraparound, revert carry */ |
|
1387 asm("subeq r2, r2, #0x01000000 "); |
|
1388 asm("eor r2, r2, r2, lsl #16 "); /* test if (out.w+1,out.r) == (in.w,in.r) */ |
|
1389 asm("cmp r2, #0x00010000 "); |
|
1390 asm("bhs 1f "); /* if not, someone else is waiting */ |
|
1391 asm("mov r0, #0 "); /* else return FALSE */ |
|
1392 __JUMP(,lr); |
|
1393 |
|
1394 asm("1: "); |
|
1395 asm("str lr, [sp, #-4]! "); |
|
1396 asm("bl " CSM_ZN11TRWSpinLock11UnlockOnlyWEv); |
|
1397 asm("bl " CSM_ZN11TRWSpinLock9LockOnlyWEv); |
|
1398 asm("mov r0, #1 "); |
|
1399 asm("ldr pc, [sp], #4 "); |
|
1400 } |
|
1401 |
|
1402 |
|
1403 |
|
1404 /*----------------------------------------------------------------------------- |
|
1405 - Read locks disabling IRQ with save/restore IRQ state |
|
1406 -----------------------------------------------------------------------------*/ |
|
1407 __NAKED__ EXPORT_C TInt TRWSpinLock::LockIrqSaveR() |
|
1408 { |
|
1409 asm("mrs r12, cpsr "); |
|
1410 __ASM_CLI(); /* Disable interrupts */ |
|
1411 RWSPIN_RLOCK_ENTRY_CHECK() |
|
1412 asm("1: "); |
|
1413 LDREX(1,0); |
|
1414 asm("and r2, r1, #0xFF "); /* R2 = original in.w */ |
|
1415 asm("add r1, r1, #0x100 "); /* increment in.r */ |
|
1416 asm("tst r1, #0xFF00 "); /* if wraparound ... */ |
|
1417 asm("subeq r1, r1, #0x10000 "); /* ... revert carry into out.w */ |
|
1418 STREX(3,1,0); |
|
1419 asm("cmp r3, #0 "); |
|
1420 asm("bne 1b "); |
|
1421 asm("3: "); |
|
1422 asm("and r1, r1, #0xFF0000 "); /* R1 = out.w << 16 */ |
|
1423 asm("cmp r1, r2, lsl #16 "); /* out.w = original in.w ? */ |
|
1424 asm("bne 2f "); /* no - must wait */ |
|
1425 RWSPIN_RLOCK_MARK_ACQ() |
|
1426 __DATA_MEMORY_BARRIER__(r3); /* we have got the lock */ |
|
1427 asm("and r0, r12, #%a0" : : "i" ((TInt)KAllInterruptsMask)); /* return original CPSR I and F bits */ |
|
1428 __JUMP(,lr); |
|
1429 |
|
1430 asm("2: "); |
|
1431 ARM_WFE; |
|
1432 asm("ldr r1, [r0, #0] "); /* read out.w count again */ |
|
1433 asm("b 3b "); |
|
1434 } |
|
1435 |
|
1436 __NAKED__ EXPORT_C void TRWSpinLock::UnlockIrqRestoreR(TInt) |
|
1437 { |
|
1438 RWSPIN_RUNLOCK_ENTRY_CHECK() |
|
1439 __DATA_MEMORY_BARRIER_Z__(r3); /* Ensure accesses don't move outside locked section */ |
|
1440 asm("1: "); |
|
1441 LDREX(2,0); |
|
1442 asm("add r2, r2, #0x01000000 "); /* increment out.r */ |
|
1443 STREX(3,2,0); |
|
1444 asm("cmp r3, #0 "); |
|
1445 asm("bne 1b "); |
|
1446 asm("mrs r12, cpsr "); |
|
1447 __DATA_SYNC_BARRIER__(r3); /* Ensure write to out.r completes before SEV */ |
|
1448 asm("bic r12, r12, #%a0" : : "i" ((TInt)KAllInterruptsMask)); |
|
1449 ARM_SEV; /* Wake up any waiting processors */ |
|
1450 asm("orr r1, r1, r12 "); |
|
1451 asm("msr cpsr, r1 "); /* restore interrupts */ |
|
1452 __JUMP(,lr); |
|
1453 } |
|
1454 |
|
1455 __NAKED__ EXPORT_C TBool TRWSpinLock::FlashIrqRestoreR(TInt) |
|
1456 { |
|
1457 GET_RWNO_TID(,r12); /* r12 -> TSubScheduler */ |
|
1458 asm("ldr r12, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,i_GicCpuIfcAddr)); |
|
1459 asm("ldr r2, [r0, #0] "); |
|
1460 asm("ldr r3, [r12, #%a0]" : : "i" _FOFF(GicCpuIfc,iHighestPending)); |
|
1461 asm("eor r2, r2, r2, lsr #16 "); /* r2 low byte = out.w ^ in.w = 0 if no writers waiting */ |
|
1462 asm("tst r2, #0xFF "); |
|
1463 asm("addeq r3, r3, #1 "); |
|
1464 asm("cmpeq r3, #1024 "); /* if no writers waiting for lock, check for pending interrupt */ |
|
1465 asm("bne 1f "); /* branch if writers waiting or pending interrupt */ |
|
1466 asm("mov r0, #0 "); /* else return FALSE */ |
|
1467 __JUMP(,lr); |
|
1468 |
|
1469 asm("1: "); |
|
1470 asm("str lr, [sp, #-4]! "); |
|
1471 asm("bl " CSM_ZN11TRWSpinLock17UnlockIrqRestoreREi); |
|
1472 asm("bl " CSM_ZN11TRWSpinLock8LockIrqREv); |
|
1473 asm("mov r0, #1 "); |
|
1474 asm("ldr pc, [sp], #4 "); |
|
1475 } |
|
1476 |
|
1477 |
|
1478 /*----------------------------------------------------------------------------- |
|
1479 - Write locks disabling IRQ with save/restore IRQ state |
|
1480 -----------------------------------------------------------------------------*/ |
|
1481 __NAKED__ EXPORT_C TInt TRWSpinLock::LockIrqSaveW() |
|
1482 { |
|
1483 asm("mrs r12, cpsr "); |
|
1484 __ASM_CLI(); /* Disable interrupts */ |
|
1485 RWSPIN_WLOCK_ENTRY_CHECK() |
|
1486 asm("1: "); |
|
1487 LDREX(1,0); |
|
1488 asm("mov r2, r1, lsl #16 "); /* R2 = original in << 16 */ |
|
1489 asm("add r1, r1, #1 "); /* increment in.w */ |
|
1490 asm("tst r1, #0xFF "); /* if wraparound ... */ |
|
1491 asm("subeq r1, r1, #0x100 "); /* ... revert carry into in.r */ |
|
1492 STREX(3,1,0); |
|
1493 asm("cmp r3, #0 "); |
|
1494 asm("bne 1b "); |
|
1495 asm("3: "); |
|
1496 asm("mov r1, r1, lsr #16 "); /* r1 = out */ |
|
1497 asm("cmp r1, r2, lsr #16 "); /* out = original in ? */ |
|
1498 asm("bne 2f "); /* no - must wait */ |
|
1499 RWSPIN_WLOCK_MARK_ACQ() |
|
1500 __DATA_MEMORY_BARRIER__(r3); /* we have got the lock */ |
|
1501 asm("and r0, r12, #%a0" : : "i" ((TInt)KAllInterruptsMask)); /* return original CPSR I and F bits */ |
|
1502 __JUMP(,lr); |
|
1503 |
|
1504 asm("2: "); |
|
1505 ARM_WFE; |
|
1506 asm("ldr r1, [r0, #0] "); /* read out count again */ |
|
1507 asm("b 3b "); |
|
1508 } |
|
1509 |
|
1510 __NAKED__ EXPORT_C void TRWSpinLock::UnlockIrqRestoreW(TInt) |
|
1511 { |
|
1512 RWSPIN_WUNLOCK_ENTRY_CHECK() |
|
1513 __DATA_MEMORY_BARRIER_Z__(r3); /* Ensure accesses don't move outside locked section */ |
|
1514 asm("ldrb r2, [r0, #2] "); |
|
1515 asm("mrs r12, cpsr "); |
|
1516 asm("add r2, r2, #1 "); |
|
1517 asm("bic r12, r12, #%a0" : : "i" ((TInt)KAllInterruptsMask)); |
|
1518 asm("strb r2, [r0, #2] "); /* increment out.w */ |
|
1519 __DATA_SYNC_BARRIER__(r3); /* Ensure write to out.w completes before SEV */ |
|
1520 ARM_SEV; /* Wake up any waiting processors */ |
|
1521 asm("orr r1, r1, r12 "); |
|
1522 asm("msr cpsr, r1 "); /* restore interrupts */ |
|
1523 __JUMP(,lr); |
|
1524 } |
|
1525 |
|
1526 __NAKED__ EXPORT_C TBool TRWSpinLock::FlashIrqRestoreW(TInt) |
|
1527 { |
|
1528 GET_RWNO_TID(,r12); /* r12 -> TSubScheduler */ |
|
1529 asm("ldr r12, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,i_GicCpuIfcAddr)); |
|
1530 asm("ldr r2, [r0, #0] "); |
|
1531 asm("ldr r3, [r12, #%a0]" : : "i" _FOFF(GicCpuIfc,iHighestPending)); |
|
1532 asm("add r2, r2, #0x00010000 "); /* increment out.w */ |
|
1533 asm("tst r2, #0x00FF0000 "); /* if wraparound, revert carry */ |
|
1534 asm("subeq r2, r2, #0x01000000 "); |
|
1535 asm("eor r2, r2, r2, lsl #16 "); /* test if (out.w+1,out.r) == (in.w,in.r) */ |
|
1536 asm("cmp r2, #0x00010000 "); |
|
1537 asm("bhs 1f "); /* if not, someone else is waiting */ |
|
1538 asm("add r3, r3, #1 "); |
|
1539 asm("cmp r3, #1024 "); /* if no-one else waiting for lock, check for pending interrupt */ |
|
1540 asm("bne 1f "); /* branch if pending interrupt */ |
|
1541 asm("mov r0, #0 "); /* else return FALSE */ |
|
1542 __JUMP(,lr); |
|
1543 |
|
1544 asm("1: "); |
|
1545 asm("str lr, [sp, #-4]! "); |
|
1546 asm("bl " CSM_ZN11TRWSpinLock17UnlockIrqRestoreWEi); |
|
1547 asm("bl " CSM_ZN11TRWSpinLock8LockIrqWEv); |
|
1548 asm("mov r0, #1 "); |
|
1549 asm("ldr pc, [sp], #4 "); |
|
1550 } |
|
1551 |
|
1552 |
|
1553 /*----------------------------------------------------------------------------- |
|
1554 - Read lock flash allowing preemption |
|
1555 -----------------------------------------------------------------------------*/ |
|
1556 __NAKED__ EXPORT_C TBool TRWSpinLock::FlashPreemptR() |
|
1557 { |
|
1558 asm("ldr r2, [r0, #0] "); |
|
1559 GET_RWNO_TID(,r12); /* r12 -> TSubScheduler */ |
|
1560 asm("ldr r3, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag)); |
|
1561 asm("eor r2, r2, r2, lsr #16 "); /* r2 low byte = out.w ^ in.w = 0 if no writers waiting */ |
|
1562 asm("tst r2, #0xFF "); |
|
1563 asm("cmpeq r3, #0 "); /* if no writers waiting, check if reschedule or IDFCs pending */ |
|
1564 asm("bne 1f "); /* branch if so or if writers waiting */ |
|
1565 asm("mov r0, #0 "); /* else return FALSE */ |
|
1566 __JUMP(,lr); |
|
1567 |
|
1568 asm("1: "); |
|
1569 asm("stmfd sp!, {r0,lr} "); |
|
1570 asm("bl " CSM_ZN11TRWSpinLock11UnlockOnlyREv); |
|
1571 asm("bl " CSM_ZN5NKern15PreemptionPointEv); |
|
1572 asm("ldr r0, [sp], #4 "); |
|
1573 asm("bl " CSM_ZN11TRWSpinLock9LockOnlyREv); |
|
1574 asm("mov r0, #1 "); |
|
1575 asm("ldr pc, [sp], #4 "); |
|
1576 } |
|
1577 |
|
1578 |
|
1579 /*----------------------------------------------------------------------------- |
|
1580 - Write lock flash allowing preemption |
|
1581 -----------------------------------------------------------------------------*/ |
|
1582 __NAKED__ EXPORT_C TBool TRWSpinLock::FlashPreemptW() |
|
1583 { |
|
1584 asm("ldr r2, [r0, #0] "); |
|
1585 GET_RWNO_TID(,r12); /* r12 -> TSubScheduler */ |
|
1586 asm("ldr r3, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag)); |
|
1587 asm("add r2, r2, #0x00010000 "); /* increment out.w */ |
|
1588 asm("tst r2, #0x00FF0000 "); /* if wraparound, revert carry */ |
|
1589 asm("subeq r2, r2, #0x01000000 "); |
|
1590 asm("eor r2, r2, r2, lsl #16 "); /* test if (out.w+1,out.r) == (in.w,in.r) */ |
|
1591 asm("cmp r2, #0x00010000 "); |
|
1592 asm("bhs 1f "); /* if not, someone else is waiting */ |
|
1593 asm("cmp r3, #0 "); /* no-one else waiting, check if reschedule or IDFCs pending */ |
|
1594 asm("bne 1f "); /* if so, branch to release lock */ |
|
1595 asm("mov r0, #0 "); /* else return FALSE */ |
|
1596 __JUMP(,lr); |
|
1597 |
|
1598 asm("1: "); |
|
1599 asm("stmfd sp!, {r0,lr} "); |
|
1600 asm("bl " CSM_ZN11TRWSpinLock11UnlockOnlyWEv); |
|
1601 asm("bl " CSM_ZN5NKern15PreemptionPointEv); |
|
1602 asm("ldr r0, [sp], #4 "); |
|
1603 asm("bl " CSM_ZN11TRWSpinLock9LockOnlyWEv); |
|
1604 asm("mov r0, #1 "); |
|
1605 asm("ldr pc, [sp], #4 "); |
|
1606 } |
|
1607 |