|
1 // Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of the License "Eclipse Public License v1.0" |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: |
|
14 // e32\common\arm\atomic_64_exec.h |
|
15 // User-side 64 bit atomic operations on V6 or V5 processors using Exec calls |
|
16 // WARNING: GCC98r2 doesn't align registers so 'v' ends up in R2:R1 not R3:R2 |
|
17 // |
|
18 // |
|
19 |
|
20 #include "atomic_ops.h" |
|
21 |
|
22 #ifdef __CPU_ARMV6 |
|
23 // Write paging supported, so atomics must work on paged memory, so SLOW exec needed |
|
24 #define __ATOMIC64_EXEC__(op) SLOW_EXEC1_NR(EExecSlowAtomic##op##64) |
|
25 #else |
|
26 // Write paging not supported, so atomics can assume unpaged memory, so FAST exec OK |
|
27 #define __ATOMIC64_EXEC__(op) FAST_EXEC1_NR(EFastExecFastAtomic##op##64) |
|
28 #endif |
|
29 |
|
30 #ifdef __BARRIERS_NEEDED__ |
|
31 #error Barriers not supported on V6/V5, only V6K/V7 |
|
32 #endif |
|
33 |
|
34 #if defined(__OP_LOAD__) |
|
35 #error LOAD same as kernel side |
|
36 #elif defined(__OP_STORE__) |
|
37 #error STORE same as kernel side |
|
38 |
|
39 #elif defined(__OP_RMW1__) |
|
40 |
|
41 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(volatile TAny* /*a*/, __TYPE__ /*v*/) |
|
42 { |
|
43 // R0=a, R3:R2=v |
|
44 // return value in R1:R0 |
|
45 // just fall through to __e32_atomic_*_acq64 |
|
46 } |
|
47 |
|
48 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(volatile TAny* /*a*/, __TYPE__ /*v*/) |
|
49 { |
|
50 // R0=a, R3:R2=v |
|
51 // return value in R1:R0 |
|
52 // just fall through to __e32_atomic_*_acq64 |
|
53 } |
|
54 |
|
55 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(volatile TAny* /*a*/, __TYPE__ /*v*/) |
|
56 { |
|
57 // R0=a, R3:R2=v |
|
58 // return value in R1:R0 |
|
59 // just fall through to __e32_atomic_*_acq64 |
|
60 } |
|
61 |
|
62 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(volatile TAny* /*a*/, __TYPE__ /*v*/) |
|
63 { |
|
64 // R0=a, R3:R2=v |
|
65 // return value in R1:R0 |
|
66 |
|
67 #ifndef __EABI__ |
|
68 asm("mov r3, r2 "); |
|
69 asm("mov r2, r1 "); |
|
70 #endif |
|
71 ENSURE_8BYTE_ALIGNMENT(0); |
|
72 #if defined(__OP_SWP__) |
|
73 asm("mov r1, #0 "); |
|
74 asm("mov r12, #0 "); |
|
75 asm("stmfd sp!, {r2-r3} "); // i2 = XOR mask = v |
|
76 asm("stmfd sp!, {r1,r12} "); // i1 = AND mask = 0 |
|
77 asm("stmfd sp!, {r0-r1} "); // iA = a |
|
78 asm("mov r0, sp "); |
|
79 __ATOMIC64_EXEC__(Axo); |
|
80 asm("ldmia sp!, {r0-r1} "); |
|
81 asm("add sp, sp, #16 "); |
|
82 #elif defined(__OP_ADD__) |
|
83 asm("stmfd sp!, {r0-r3} "); |
|
84 asm("mov r0, sp "); |
|
85 __ATOMIC64_EXEC__(Add); |
|
86 asm("ldmia sp!, {r0-r1} "); |
|
87 asm("add sp, sp, #8 "); |
|
88 #elif defined(__OP_AND__) |
|
89 asm("mov r1, #0 "); |
|
90 asm("mov r12, #0 "); |
|
91 asm("stmfd sp!, {r1,r12} "); // i2 = XOR mask = 0 |
|
92 asm("stmfd sp!, {r0-r3} "); // i1 = AND mask = v, iA=a |
|
93 asm("mov r0, sp "); |
|
94 __ATOMIC64_EXEC__(Axo); |
|
95 asm("ldmia sp!, {r0-r1} "); |
|
96 asm("add sp, sp, #16 "); |
|
97 #elif defined(__OP_IOR__) |
|
98 asm("mvn r1, r2 "); // r12:r1 = ~r3:r2 |
|
99 asm("mvn r12, r3 "); |
|
100 asm("stmfd sp!, {r2-r3} "); // i2 = XOR mask = v |
|
101 asm("stmfd sp!, {r1,r12} "); // i1 = AND mask = ~v |
|
102 asm("stmfd sp!, {r0-r1} "); // iA = a |
|
103 asm("mov r0, sp "); |
|
104 __ATOMIC64_EXEC__(Axo); |
|
105 asm("ldmia sp!, {r0-r1} "); |
|
106 asm("add sp, sp, #16 "); |
|
107 #elif defined(__OP_XOR__) |
|
108 asm("mvn r1, #0 "); |
|
109 asm("mvn r12, #0 "); |
|
110 asm("stmfd sp!, {r2-r3} "); // i2 = XOR mask = v |
|
111 asm("stmfd sp!, {r1,r12} "); // i1 = AND mask = 0xFFFFFFFFFFFFFFFF |
|
112 asm("stmfd sp!, {r0-r1} "); // iA = a |
|
113 asm("mov r0, sp "); |
|
114 __ATOMIC64_EXEC__(Axo); |
|
115 asm("ldmia sp!, {r0-r1} "); |
|
116 asm("add sp, sp, #16 "); |
|
117 #endif |
|
118 __JUMP(,lr); |
|
119 } |
|
120 |
|
121 |
|
122 |
|
123 #elif defined(__OP_CAS__) |
|
124 |
|
125 extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,rel,64)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/) |
|
126 { |
|
127 // R0=a, R1=q, R3:R2=v |
|
128 // return value in R0 |
|
129 // just fall through to __e32_atomic_*_acq64 |
|
130 } |
|
131 |
|
132 extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,rlx,64)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/) |
|
133 { |
|
134 // R0=a, R1=q, R3:R2=v |
|
135 // return value in R0 |
|
136 // just fall through to __e32_atomic_*_acq64 |
|
137 } |
|
138 |
|
139 extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,ord,64)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/) |
|
140 { |
|
141 // R0=a, R1=q, R3:R2=v |
|
142 // return value in R0 |
|
143 // just fall through to __e32_atomic_*_acq64 |
|
144 } |
|
145 |
|
146 extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,acq,64)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/) |
|
147 { |
|
148 // R0=a, R1=q, R3:R2=v |
|
149 // return value in R0 |
|
150 ENSURE_8BYTE_ALIGNMENT(0); |
|
151 asm("stmfd sp!, {r0-r3} "); // iA=a, iQ=q, i1=v |
|
152 asm("mov r0, sp "); |
|
153 __ATOMIC64_EXEC__(Cas); // returns result in R0 |
|
154 asm("add sp, sp, #16 "); |
|
155 __JUMP(,lr); |
|
156 } |
|
157 |
|
158 |
|
159 #elif defined(__OP_AXO__) |
|
160 |
|
161 #ifdef __EABI__ |
|
162 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/) |
|
163 #else |
|
164 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(int,int,int,int) |
|
165 #endif |
|
166 { |
|
167 // R0=a, R3:R2=u, [SP+4,0]=v |
|
168 // return value in R1:R0 |
|
169 // just fall through to __e32_atomic_*_acq64 |
|
170 } |
|
171 |
|
172 #ifdef __EABI__ |
|
173 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/) |
|
174 #else |
|
175 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(int,int,int,int) |
|
176 #endif |
|
177 { |
|
178 // R0=a, R3:R2=u, [SP+4,0]=v |
|
179 // return value in R1:R0 |
|
180 // just fall through to __e32_atomic_*_acq64 |
|
181 } |
|
182 |
|
183 #ifdef __EABI__ |
|
184 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/) |
|
185 #else |
|
186 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(int,int,int,int) |
|
187 #endif |
|
188 { |
|
189 // R0=a, R3:R2=u, [SP+4,0]=v |
|
190 // return value in R1:R0 |
|
191 // just fall through to __e32_atomic_*_acq64 |
|
192 } |
|
193 |
|
194 #ifdef __EABI__ |
|
195 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/) |
|
196 #else |
|
197 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(int,int,int,int) |
|
198 #endif |
|
199 { |
|
200 // R0=a, R3:R2=u, [SP+4,0]=v |
|
201 // return value in R1:R0 |
|
202 ENSURE_8BYTE_ALIGNMENT(0); |
|
203 #ifdef __EABI__ |
|
204 // i2 = XOR mask = v already on stack |
|
205 asm("stmfd sp!, {r0-r3} "); // i1 = AND mask = u, iA = a |
|
206 #else |
|
207 asm("stmfd sp!, {r1-r3} "); // i1 = AND mask = u, i2 = XOR mask = v (high word already on stack) |
|
208 asm("stmfd sp!, {r0-r1} "); // iA = a, dummy word for i0 (unused) |
|
209 #endif |
|
210 asm("mov r0, sp "); |
|
211 __ATOMIC64_EXEC__(Axo); |
|
212 asm("ldmia sp!, {r0-r1} "); |
|
213 #ifdef __EABI__ |
|
214 asm("add sp, sp, #8 "); |
|
215 #else |
|
216 asm("add sp, sp, #12 "); |
|
217 #endif |
|
218 __JUMP(,lr); |
|
219 } |
|
220 |
|
221 |
|
222 #elif defined(__OP_RMW3__) |
|
223 |
|
224 #ifdef __EABI__ |
|
225 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/) |
|
226 #else |
|
227 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(int,int,int,int) |
|
228 #endif |
|
229 { |
|
230 // R0=a, R3:R2=t, [SP+4,0]=u, [SP+12,8]=v |
|
231 // return value in R1:R0 |
|
232 // just fall through to __e32_atomic_*_acq64 |
|
233 } |
|
234 |
|
235 #ifdef __EABI__ |
|
236 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/) |
|
237 #else |
|
238 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(int,int,int,int) |
|
239 #endif |
|
240 { |
|
241 // R0=a, R3:R2=t, [SP+4,0]=u, [SP+12,8]=v |
|
242 // return value in R1:R0 |
|
243 // just fall through to __e32_atomic_*_acq64 |
|
244 } |
|
245 |
|
246 #ifdef __EABI__ |
|
247 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/) |
|
248 #else |
|
249 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(int,int,int,int) |
|
250 #endif |
|
251 { |
|
252 // R0=a, R3:R2=t, [SP+4,0]=u, [SP+12,8]=v |
|
253 // return value in R1:R0 |
|
254 // just fall through to __e32_atomic_*_acq64 |
|
255 } |
|
256 |
|
257 #ifdef __EABI__ |
|
258 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/) |
|
259 #else |
|
260 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(int,int,int,int) |
|
261 #endif |
|
262 { |
|
263 // R0=a, R3:R2=t, [SP+4,0]=u, [SP+12,8]=v |
|
264 // return value in R1:R0 |
|
265 ENSURE_8BYTE_ALIGNMENT(0); |
|
266 #ifdef __EABI__ |
|
267 // i3 = v already on stack |
|
268 // i2 = u already on stack |
|
269 asm("stmfd sp!, {r0-r3} "); // i1 = t, iA = a |
|
270 #else |
|
271 // v and high word of u already on stack |
|
272 asm("stmfd sp!, {r1-r3} "); // i1 = t, i2 = u (high word already on stack) |
|
273 asm("stmfd sp!, {r0-r1} "); // iA = a, dummy word for i0 (unused) |
|
274 #endif |
|
275 asm("mov r0, sp "); |
|
276 #if defined(__OP_TAU__) |
|
277 __ATOMIC64_EXEC__(Tau); |
|
278 #elif defined(__OP_TAS__) |
|
279 __ATOMIC64_EXEC__(Tas); |
|
280 #endif |
|
281 asm("ldmia sp!, {r0-r1} "); |
|
282 #ifdef __EABI__ |
|
283 asm("add sp, sp, #8 "); |
|
284 #else |
|
285 asm("add sp, sp, #12 "); |
|
286 #endif |
|
287 __JUMP(,lr); |
|
288 } |
|
289 |
|
290 #endif |
|
291 |
|
292 // Second inclusion undefines temporaries |
|
293 #include "atomic_ops.h" |