|
1 // Copyright (c) 1997-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of the License "Eclipse Public License v1.0" |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: |
|
14 // e32\klib\arm\cumem.cia |
|
15 // |
|
16 // |
|
17 |
|
18 #include <kernel/klib.h> |
|
19 #include <e32cia.h> |
|
20 #include <arm.h> |
|
21 #if defined(__REPLACE_GENERIC_UTILS) |
|
22 #include "replacement_utils.h" |
|
23 #endif |
|
24 |
|
25 extern "C" { |
|
26 |
|
27 #ifdef _DEBUG |
|
28 #define CUMEM_FAULT(cc, reason) asm("mov"#cc" r0, #%a0 " : : "i" (reason)); \ |
|
29 asm("b"#cc" " CSM_ZN2KL5PanicENS_13TKernLibPanicE) |
|
30 #endif |
|
31 |
|
32 |
|
33 __NAKED__ void kumemget_no_paging_assert(TAny* /*aKernAddr*/, const TAny* /*aAddr*/, TInt /*aLength*/) |
|
34 { |
|
35 asm("mrs r3, spsr "); // r3=spsr_svc |
|
36 asm("tst r3, #0x0f "); // test for user mode |
|
37 asm("bne memcpy "); // if not, just do memcpy |
|
38 #ifndef USE_REPLACEMENT_UMEMGET |
|
39 asm("b umemget_no_paging_assert"); |
|
40 #else |
|
41 asm("b umemget"); |
|
42 #endif |
|
43 } |
|
44 |
|
45 |
|
46 #ifndef USE_REPLACEMENT_UMEMGET |
|
47 |
|
48 #ifdef __CPU_ARMV6 |
|
49 // Conditional returns are not predicted on ARMv6 |
|
50 __NAKED__ void dummy_umemget32_exit() |
|
51 { |
|
52 asm("_umemget32_exit: "); |
|
53 asm("ldmfd sp!, {r4, pc} "); |
|
54 } |
|
55 #define UMEMGET32_EXIT(cc) asm("b"#cc" _umemget32_exit") |
|
56 #else |
|
57 #define UMEMGET32_EXIT(cc) asm("ldm"#cc"fd sp!, {r4, pc}") |
|
58 #endif |
|
59 |
|
60 |
|
61 EXPORT_C __NAKED__ void kumemget32(TAny* /*aKernAddr*/, const TAny* /*aAddr*/, TInt /*aLength*/) |
|
62 { |
|
63 asm("mrs r3, spsr "); // r3=spsr_svc |
|
64 asm("tst r3, #0x0f "); // test for user mode |
|
65 asm("bne wordmove "); // if not, just do wordmove |
|
66 // otherwise fall through to umemget32 |
|
67 } |
|
68 |
|
69 |
|
70 EXPORT_C __NAKED__ void umemget32(TAny* /*aKernAddr*/, const TAny* /*aUserAddr*/, TInt /*aLength*/) |
|
71 { |
|
72 ASM_ASSERT_PAGING_SAFE |
|
73 #ifdef __USER_MEMORY_GUARDS_ENABLED__ |
|
74 asm("stmfd sp!, {r11, lr} "); |
|
75 asm("subs r12, r2, #1"); |
|
76 asm("ldrhsb r11, [r0]"); // test access to first byte of kernel memory |
|
77 asm("ldrhsb r11, [r0,r12]"); // test access to last byte of kernel memory |
|
78 USER_MEMORY_GUARD_OFF(,r11,r12); |
|
79 asm("bl 1f"); |
|
80 USER_MEMORY_GUARD_RESTORE(r11,r12); |
|
81 asm("ldmfd sp!, {r11, pc} "); |
|
82 asm("1:"); |
|
83 #endif |
|
84 PLD(1); |
|
85 #ifdef _DEBUG |
|
86 asm("tst r2, #3 "); // check length is a whole number of words |
|
87 CUMEM_FAULT(ne, KL::EWordMoveLengthNotMultipleOf4); |
|
88 #endif |
|
89 asm("_umemget_word_aligned: "); |
|
90 asm("stmfd sp!, {r4, lr} "); |
|
91 asm("subs ip, r2, #32 "); |
|
92 asm("blo _umemget32_small_copy "); |
|
93 PLD_ioff(1, 32); |
|
94 asm("beq _umemget32_32_byte_case "); // 32 byte case is common - don't bother to align |
|
95 |
|
96 asm("rsb lr, r0, #32 "); // align destination: 0 - 28 byte copy |
|
97 asm("movs lr, lr, lsl #27 "); |
|
98 asm("beq _umemget32_large_copy "); |
|
99 asm("sub r2, r2, lr, lsr #27 "); |
|
100 asm("msr cpsr_f, lr "); // put length bits 4, 3, 2 into N, Z, C |
|
101 asm("ldrmit r3, [r1], #4 "); |
|
102 asm("ldrmit r4, [r1], #4 "); |
|
103 asm("ldrmit ip, [r1], #4 "); |
|
104 asm("ldrmit lr, [r1], #4 "); |
|
105 asm("stmmiia r0!, {r3, r4, ip, lr} "); |
|
106 asm("ldreqt r3, [r1], #4 "); |
|
107 asm("ldreqt r4, [r1], #4 "); |
|
108 asm("ldrcst ip, [r1], #4 "); |
|
109 asm("stmeqia r0!, {r3, r4} "); |
|
110 asm("strcs ip, [r0], #4 "); |
|
111 asm("subs ip, r2, #32 "); |
|
112 asm("blo _umemget32_small_copy "); |
|
113 |
|
114 asm("_umemget32_large_copy: "); // copy 32 byte blocks |
|
115 PLD_ioff(1, 64); |
|
116 asm("_umemget32_32_byte_case: "); |
|
117 asm("ldrt r2, [r1], #4 "); |
|
118 asm("ldrt r3, [r1], #4 "); |
|
119 asm("ldrt r4, [r1], #4 "); |
|
120 asm("ldrt lr, [r1], #4 "); |
|
121 asm("subs ip, ip, #32 "); |
|
122 asm("stmia r0!, {r2, r3, r4, lr} "); |
|
123 asm("ldrt r2, [r1], #4 "); |
|
124 asm("ldrt r3, [r1], #4 "); |
|
125 asm("ldrt r4, [r1], #4 "); |
|
126 asm("ldrt lr, [r1], #4 "); |
|
127 asm("stmia r0!, {r2, r3, r4, lr} "); |
|
128 asm("bhs _umemget32_large_copy "); |
|
129 |
|
130 asm("_umemget32_small_copy: "); // 0 - 31 byte copy, length in ip bits 0-4 |
|
131 asm("movs r2, ip, lsl #27 "); |
|
132 UMEMGET32_EXIT(eq); |
|
133 asm("msr cpsr_f, r2 "); // put length bits 4, 3, 2 into N, Z, C |
|
134 asm("ldrmit r3, [r1], #4 "); |
|
135 asm("ldrmit r4, [r1], #4 "); |
|
136 asm("ldrmit ip, [r1], #4 "); |
|
137 asm("ldrmit lr, [r1], #4 "); |
|
138 asm("stmmiia r0!, {r3, r4, ip, lr} "); |
|
139 asm("ldreqt r3, [r1], #4 "); |
|
140 asm("ldreqt r4, [r1], #4 "); |
|
141 asm("ldrcst ip, [r1], #4 "); |
|
142 asm("stmeqia r0!, {r3, r4} "); |
|
143 asm("strcs ip, [r0], #4 "); |
|
144 asm("movs r2, r2, lsl #3 "); |
|
145 UMEMGET32_EXIT(eq); |
|
146 asm("msr cpsr_f, r2 "); // put length bits 1, 0 into N, Z |
|
147 asm("ldrmibt r3, [r1], #1 "); |
|
148 asm("ldrmibt r4, [r1], #1 "); |
|
149 asm("ldreqbt ip, [r1], #1 "); |
|
150 asm("strmib r3, [r0], #1 "); |
|
151 asm("strmib r4, [r0], #1 "); |
|
152 asm("streqb ip, [r0], #1 "); |
|
153 asm("ldmfd sp!, {r4, pc} "); |
|
154 } |
|
155 |
|
156 |
|
157 EXPORT_C __NAKED__ void kumemget(TAny* /*aKernAddr*/, const TAny* /*aAddr*/, TInt /*aLength*/) |
|
158 { |
|
159 asm("mrs r3, spsr "); // r3=spsr_svc |
|
160 asm("tst r3, #0x0f "); // test for user mode |
|
161 asm("bne memcpy "); // if not, just do memcpy |
|
162 // otherwise fall through to umemget |
|
163 } |
|
164 |
|
165 |
|
166 EXPORT_C __NAKED__ void umemget(TAny* /*aKernAddr*/, const TAny* /*aUserAddr*/, TInt /*aLength*/) |
|
167 { |
|
168 // Optimised for aligned transfers, as unaligned are very rare in practice |
|
169 |
|
170 ASM_ASSERT_PAGING_SAFE |
|
171 asm("umemget_no_paging_assert:"); |
|
172 #ifdef __USER_MEMORY_GUARDS_ENABLED__ |
|
173 asm("stmfd sp!, {r11, lr} "); |
|
174 asm("subs r12, r2, #1"); |
|
175 asm("ldrhsb r11, [r0]"); // test access to first byte of kernel memory |
|
176 asm("ldrhsb r11, [r0,r12]"); // test access to last byte of kernel memory |
|
177 USER_MEMORY_GUARD_OFF(,r11,r12); |
|
178 asm("bl 1f"); |
|
179 USER_MEMORY_GUARD_RESTORE(r11,r12); |
|
180 asm("ldmfd sp!, {r11, pc} "); |
|
181 asm("1:"); |
|
182 #endif |
|
183 PLD(1); |
|
184 asm("tst r0, #3 "); |
|
185 asm("tsteq r1, #3 "); |
|
186 asm("beq _umemget_word_aligned "); |
|
187 asm("cmp r2, #8 "); |
|
188 asm("bhs 1f "); |
|
189 |
|
190 asm("2: "); |
|
191 asm("subs r2, r2, #1 "); |
|
192 asm("ldrplbt r3, [r1], #1 "); |
|
193 asm("strplb r3, [r0], #1 "); |
|
194 asm("bgt 2b "); |
|
195 __JUMP(,lr); |
|
196 |
|
197 asm("1: "); // Attempt to word align |
|
198 asm("movs r3, r0, lsl #30 "); |
|
199 asm("beq 5f "); |
|
200 asm("rsbs r3, r3, #0 "); // 01->c0000000 (MI,VC) 10->80000000 (MI,VS) 11->40000000 (PL,VC) |
|
201 asm("sub r2, r2, r3, lsr #30 "); |
|
202 asm("ldrmibt r3, [r1], #1 "); |
|
203 asm("strmib r3, [r0], #1 "); |
|
204 asm("ldrmibt r3, [r1], #1 "); |
|
205 asm("strmib r3, [r0], #1 "); |
|
206 asm("ldrvcbt r3, [r1], #1 "); |
|
207 asm("strvcb r3, [r0], #1 "); // r0 now word aligned |
|
208 asm("5: "); |
|
209 asm("movs r3, r1, lsl #31 "); |
|
210 asm("bic r1, r1, #3 "); |
|
211 asm("bcs 3f "); // branch if src mod 4 = 2 or 3 |
|
212 asm("bpl _umemget_word_aligned "); // branch if src mod 4 = 0 |
|
213 |
|
214 asm("4: "); // src mod 4 = 1 |
|
215 asm("subs r2, r2, #4 "); |
|
216 asm("ldrget r3, [r1], #4 "); |
|
217 asm("ldrget ip, [r1] "); |
|
218 asm("movge r3, r3, lsr #8 "); |
|
219 asm("orrge r3, r3, ip, lsl #24 "); |
|
220 asm("strge r3, [r0], #4 "); |
|
221 asm("bgt 4b "); |
|
222 asm("add r1, r1, #1 "); |
|
223 asm("b umemget_do_end "); |
|
224 |
|
225 asm("3: "); |
|
226 asm("bmi 5f "); |
|
227 asm("2: "); // src mod 4 = 2 |
|
228 asm("subs r2, r2, #4 "); |
|
229 asm("ldrget r3, [r1], #4 "); |
|
230 asm("ldrget ip, [r1] "); |
|
231 asm("movge r3, r3, lsr #16 "); |
|
232 asm("orrge r3, r3, ip, lsl #16 "); |
|
233 asm("strge r3, [r0], #4 "); |
|
234 asm("bgt 2b "); |
|
235 asm("add r1, r1, #2 "); |
|
236 asm("b umemget_do_end "); |
|
237 |
|
238 asm("5: "); // src mod 4 = 3 |
|
239 asm("subs r2, r2, #4 "); |
|
240 asm("ldrget r3, [r1], #4 "); |
|
241 asm("ldrget ip, [r1] "); |
|
242 asm("movge r3, r3, lsr #24 "); |
|
243 asm("orrge r3, r3, ip, lsl #8 "); |
|
244 asm("strge r3, [r0], #4 "); |
|
245 asm("bgt 5b "); |
|
246 asm("add r1, r1, #3 "); |
|
247 |
|
248 asm("umemget_do_end: "); |
|
249 __JUMP(eq,lr); |
|
250 asm("adds r2, r2, #2 "); // -1 if 1 left, 0 if 2 left, +1 if 3 left |
|
251 asm("ldrplbt r3, [r1], #1 "); |
|
252 asm("strplb r3, [r0], #1 "); |
|
253 asm("ldrplbt r3, [r1], #1 "); |
|
254 asm("strplb r3, [r0], #1 "); |
|
255 asm("ldrnebt r3, [r1], #1 "); |
|
256 asm("strneb r3, [r0], #1 "); |
|
257 __JUMP(,lr); |
|
258 } |
|
259 |
|
260 #endif // USE_REPLACEMENT_UMEMGET |
|
261 |
|
262 __NAKED__ void kumemput_no_paging_assert(TAny* /*aAddr*/, const TAny* /*aKernAddr*/, TInt /*aLength*/) |
|
263 { |
|
264 asm("mrs r3, spsr "); // r3=spsr_svc |
|
265 asm("tst r3, #0x0f "); // test for user mode |
|
266 asm("bne memcpy "); // if not, just do memcpy |
|
267 #ifndef USE_REPLACEMENT_UMEMPUT |
|
268 asm("b umemput_no_paging_assert"); |
|
269 #else |
|
270 asm("b umemput"); |
|
271 #endif |
|
272 } |
|
273 |
|
274 |
|
275 #ifndef USE_REPLACEMENT_UMEMPUT |
|
276 |
|
277 #ifdef __CPU_ARMV6 |
|
278 // Conditional returns are not predicted on ARMv6 |
|
279 __NAKED__ void dummy_umemput32_exit() |
|
280 { |
|
281 asm("_umemput32_exit: "); |
|
282 asm("ldmfd sp!, {r4, pc} "); |
|
283 } |
|
284 #define UMEMPUT32_EXIT(cc) asm("b"#cc" _umemput32_exit") |
|
285 #else |
|
286 #define UMEMPUT32_EXIT(cc) asm("ldm"#cc"fd sp!, {r4, pc}") |
|
287 #endif |
|
288 |
|
289 |
|
290 EXPORT_C __NAKED__ void kumemput32(TAny* /*aAddr*/, const TAny* /*aKernAddr*/, TInt /*aLength*/) |
|
291 { |
|
292 asm("mrs r3, spsr "); // r3=spsr_svc |
|
293 asm("tst r3, #0x0f "); // test for user mode |
|
294 asm("bne wordmove "); // if not, just do wordmove |
|
295 // otherwise fall through to umemput32 |
|
296 } |
|
297 |
|
298 |
|
299 EXPORT_C __NAKED__ void umemput32(TAny* /*aUserAddr*/, const TAny* /*aKernAddr*/, TInt /*aLength*/) |
|
300 { |
|
301 ASM_ASSERT_DATA_PAGING_SAFE |
|
302 #ifdef __USER_MEMORY_GUARDS_ENABLED__ |
|
303 asm("stmfd sp!, {r11, lr} "); |
|
304 asm("subs r12, r2, #1"); |
|
305 asm("ldrhsb r11, [r1]"); // test access to first byte of kernel memory |
|
306 asm("ldrhsb r11, [r1,r12]"); // test access to last byte of kernel memory |
|
307 USER_MEMORY_GUARD_OFF(,r11,r12); |
|
308 asm("bl 1f"); |
|
309 USER_MEMORY_GUARD_RESTORE(r11,r12); |
|
310 asm("ldmfd sp!, {r11, pc} "); |
|
311 asm("1:"); |
|
312 #endif |
|
313 PLD(1); |
|
314 #ifdef _DEBUG |
|
315 asm("tst r2, #3 "); // check length is a whole number of words |
|
316 CUMEM_FAULT(ne, KL::EWordMoveLengthNotMultipleOf4); |
|
317 #endif |
|
318 asm("cmp r2, #4 "); // special case for 4 byte copy which is common |
|
319 asm("ldrhs r3, [r1], #4 "); |
|
320 asm("subhs r2, r2, #4 "); |
|
321 asm("strhst r3, [r0], #4 "); |
|
322 __JUMP(ls,lr); |
|
323 |
|
324 asm("_umemput_word_aligned: "); |
|
325 asm("stmfd sp!, {r4, lr} "); |
|
326 asm("subs r2, r2, #32 "); |
|
327 asm("bhs _umemput32_align_source "); |
|
328 |
|
329 asm("_umemput32_small_copy: "); // copy 1 - 31 bytes |
|
330 asm("mov r2, r2, lsl #27 "); |
|
331 asm("msr cpsr_f, r2 "); // put length bits 4, 3, 2 into N, Z, C |
|
332 asm("ldmmiia r1!, {r3, r4, ip, lr} "); |
|
333 asm("strmit r3, [r0], #4 "); |
|
334 asm("strmit r4, [r0], #4 "); |
|
335 asm("ldmeqia r1!, {r3, r4} "); |
|
336 asm("strmit ip, [r0], #4 "); |
|
337 asm("ldrcs ip, [r1], #4 "); |
|
338 asm("strmit lr, [r0], #4 "); |
|
339 asm("streqt r3, [r0], #4 "); |
|
340 asm("streqt r4, [r0], #4 "); |
|
341 asm("strcst ip, [r0], #4 "); |
|
342 asm("movs r2, r2, lsl #3 "); |
|
343 UMEMPUT32_EXIT(eq); |
|
344 asm("msr cpsr_f, r2 "); // put length bits 1, 0 into N, Z |
|
345 asm("ldrmih r3, [r1], #2 "); |
|
346 asm("ldreqb r4, [r1], #1 "); |
|
347 asm("strmibt r3, [r0], #1 "); |
|
348 asm("movmi r3, r3, lsr #8 "); |
|
349 asm("strmibt r3, [r0], #1 "); |
|
350 asm("streqbt r4, [r0], #1 "); |
|
351 asm("ldmfd sp!, {r4, pc} "); |
|
352 |
|
353 asm("_umemput32_align_source: "); |
|
354 PLD_ioff(1, 32); |
|
355 asm("cmp r2, #32 "); |
|
356 asm("bls _umemput32_large_copy "); // don't bother if length <= 64 |
|
357 asm("rsb ip, r1, #32 "); |
|
358 asm("movs ip, ip, lsl #27 "); |
|
359 asm("beq _umemput32_large_copy "); |
|
360 asm("msr cpsr_f, ip "); // put length bits 4, 3, 2 into N, Z, C |
|
361 asm("sub r2, r2, ip, lsr #27 "); |
|
362 asm("ldmmiia r1!, {r3, r4, ip, lr} "); |
|
363 asm("strmit r3, [r0], #4 "); |
|
364 asm("strmit r4, [r0], #4 "); |
|
365 asm("ldmeqia r1!, {r3, r4} "); |
|
366 asm("strmit ip, [r0], #4 "); |
|
367 asm("ldrcs ip, [r1], #4 "); |
|
368 asm("strmit lr, [r0], #4 "); |
|
369 asm("streqt r3, [r0], #4 "); |
|
370 asm("streqt r4, [r0], #4 "); |
|
371 asm("strcst ip, [r0], #4 "); |
|
372 |
|
373 asm("_umemput32_large_copy: "); // copy 32 byte blocks |
|
374 PLD_ioff(1, 64); |
|
375 asm("ldmia r1!, {r3, r4, ip, lr} "); |
|
376 asm("strt r3, [r0], #4 "); |
|
377 asm("strt r4, [r0], #4 "); |
|
378 asm("strt ip, [r0], #4 "); |
|
379 asm("strt lr, [r0], #4 "); |
|
380 asm("ldmia r1!, {r3, r4, ip, lr} "); |
|
381 asm("strt r3, [r0], #4 "); |
|
382 asm("strt r4, [r0], #4 "); |
|
383 asm("strt ip, [r0], #4 "); |
|
384 asm("strt lr, [r0], #4 "); |
|
385 asm("subs r2, r2, #32 "); |
|
386 asm("bhs _umemput32_large_copy "); |
|
387 asm("adds r2, r2, #32 "); |
|
388 asm("bne _umemput32_small_copy "); |
|
389 asm("ldmfd sp!, {r4, pc} "); |
|
390 } |
|
391 |
|
392 |
|
393 __NAKED__ void uumemcpy32(TAny* /*aUserDst*/, const TAny* /*aUserSrc*/, TInt /*aLength*/) |
|
394 { |
|
395 ASM_ASSERT_PAGING_SAFE |
|
396 #ifdef __USER_MEMORY_GUARDS_ENABLED__ |
|
397 asm("stmfd sp!, {r11, lr} "); |
|
398 USER_MEMORY_GUARD_OFF(,r11,r12); |
|
399 asm("bl 1f"); |
|
400 USER_MEMORY_GUARD_RESTORE(r11,r12); |
|
401 asm("ldmfd sp!, {r11, pc} "); |
|
402 asm("1:"); |
|
403 #endif |
|
404 asm("1: "); |
|
405 asm("subs r2, r2, #4 "); |
|
406 asm("ldrplt r3, [r1], #4 "); |
|
407 asm("strplt r3, [r0], #4 "); |
|
408 asm("bpl 1b "); |
|
409 __JUMP(,lr); |
|
410 } |
|
411 |
|
412 |
|
413 __NAKED__ void uumemcpy(TAny* /*aUserDst*/, const TAny* /*aUserSrc*/, TInt /*aLength*/) |
|
414 { |
|
415 ASM_ASSERT_PAGING_SAFE |
|
416 #ifdef __USER_MEMORY_GUARDS_ENABLED__ |
|
417 asm("stmfd sp!, {r11, lr} "); |
|
418 USER_MEMORY_GUARD_OFF(,r11,r12); |
|
419 asm("bl 1f"); |
|
420 USER_MEMORY_GUARD_RESTORE(r11,r12); |
|
421 asm("ldmfd sp!, {r11, pc} "); |
|
422 asm("1:"); |
|
423 #endif |
|
424 asm("cmp r2, #8 "); |
|
425 asm("bcs 1f "); |
|
426 asm("2: "); |
|
427 asm("subs r2, r2, #1 "); |
|
428 asm("ldrplbt r3, [r1], #1 "); |
|
429 asm("strplbt r3, [r0], #1 "); |
|
430 asm("bgt 2b "); |
|
431 __JUMP(,lr); |
|
432 asm("1: "); |
|
433 asm("movs r3, r0, lsl #30 "); |
|
434 asm("beq 5f "); |
|
435 asm("rsbs r3, r3, #0 "); // 01->c0000000 (MI,VC) 10->80000000 (MI,VS) 11->40000000 (PL,VC) |
|
436 asm("sub r2, r2, r3, lsr #30 "); |
|
437 asm("ldrmibt r3, [r1], #1 "); |
|
438 asm("strmibt r3, [r0], #1 "); |
|
439 asm("ldrmibt r3, [r1], #1 "); |
|
440 asm("strmibt r3, [r0], #1 "); |
|
441 asm("ldrvcbt r3, [r1], #1 "); |
|
442 asm("strvcbt r3, [r0], #1 "); // r0 now word aligned |
|
443 asm("5: "); |
|
444 asm("movs r3, r1, lsl #31 "); |
|
445 asm("bic r1, r1, #3 "); |
|
446 asm("bcs 3f "); // branch if src mod 4 = 2 or 3 |
|
447 asm("bmi 4f "); // branch if src mod 4 = 1 |
|
448 asm("2: "); |
|
449 asm("subs r2, r2, #4 "); |
|
450 asm("ldrget r3, [r1], #4 "); |
|
451 asm("strget r3, [r0], #4 "); |
|
452 asm("bgt 2b "); |
|
453 asm("uumemcpy_do_end: "); |
|
454 __JUMP(eq,lr); |
|
455 asm("adds r2, r2, #2 "); // -1 if 1 left, 0 if 2 left, +1 if 3 left |
|
456 asm("ldrplbt r3, [r1], #1 "); |
|
457 asm("strplbt r3, [r0], #1 "); |
|
458 asm("ldrplbt r3, [r1], #1 "); |
|
459 asm("strplbt r3, [r0], #1 "); |
|
460 asm("ldrnebt r3, [r1], #1 "); |
|
461 asm("strnebt r3, [r0], #1 "); |
|
462 __JUMP(,lr); |
|
463 asm("3: "); // get here if src mod 4 = 2 or 3 |
|
464 asm("bmi 5f "); // branch if 3 |
|
465 asm("2: "); |
|
466 asm("subs r2, r2, #4 "); |
|
467 asm("ldrget r3, [r1], #4 "); |
|
468 asm("ldrget ip, [r1] "); |
|
469 asm("movge r3, r3, lsr #16 "); |
|
470 asm("orrge r3, r3, ip, lsl #16 "); |
|
471 asm("strget r3, [r0], #4 "); |
|
472 asm("bgt 2b "); |
|
473 asm("add r1, r1, #2 "); |
|
474 asm("b uumemcpy_do_end "); |
|
475 asm("5: "); |
|
476 asm("subs r2, r2, #4 "); |
|
477 asm("ldrget r3, [r1], #4 "); |
|
478 asm("ldrget ip, [r1] "); |
|
479 asm("movge r3, r3, lsr #24 "); |
|
480 asm("orrge r3, r3, ip, lsl #8 "); |
|
481 asm("strget r3, [r0], #4 "); |
|
482 asm("bgt 5b "); |
|
483 asm("add r1, r1, #3 "); |
|
484 asm("b uumemcpy_do_end "); |
|
485 asm("4: "); |
|
486 asm("subs r2, r2, #4 "); |
|
487 asm("ldrget r3, [r1], #4 "); |
|
488 asm("ldrget ip, [r1] "); |
|
489 asm("movge r3, r3, lsr #8 "); |
|
490 asm("orrge r3, r3, ip, lsl #24 "); |
|
491 asm("strget r3, [r0], #4 "); |
|
492 asm("bgt 4b "); |
|
493 asm("add r1, r1, #1 "); |
|
494 asm("b uumemcpy_do_end "); |
|
495 } |
|
496 |
|
497 |
|
498 EXPORT_C __NAKED__ void kumemput(TAny* /*aAddr*/, const TAny* /*aKernAddr*/, TInt /*aLength*/) |
|
499 { |
|
500 asm("mrs r3, spsr "); // r3=spsr_svc |
|
501 asm("tst r3, #0x0f "); // test for user mode |
|
502 asm("bne memcpy "); // if not, just do memcpy |
|
503 // otherwise fall through to umemput |
|
504 } |
|
505 |
|
506 |
|
507 EXPORT_C __NAKED__ void umemput(TAny* /*aUserAddr*/, const TAny* /*aKernAddr*/, TInt /*aLength*/) |
|
508 { |
|
509 // Optimised for word-aligned transfers, as unaligned are very rare in practice |
|
510 |
|
511 ASM_ASSERT_DATA_PAGING_SAFE |
|
512 asm("umemput_no_paging_assert:"); |
|
513 #ifdef __USER_MEMORY_GUARDS_ENABLED__ |
|
514 asm("stmfd sp!, {r11, lr} "); |
|
515 asm("subs r12, r2, #1"); |
|
516 asm("ldrhsb r11, [r1]"); // test access to first byte of kernel memory |
|
517 asm("ldrhsb r11, [r1,r12]"); // test access to last byte of kernel memory |
|
518 USER_MEMORY_GUARD_OFF(,r11,r12); |
|
519 asm("bl 1f"); |
|
520 USER_MEMORY_GUARD_RESTORE(r11,r12); |
|
521 asm("ldmfd sp!, {r11, pc} "); |
|
522 asm("1:"); |
|
523 #endif |
|
524 PLD(1); |
|
525 asm("tst r0, #3 "); |
|
526 asm("tsteq r1, #3 "); |
|
527 asm("beq _umemput_word_aligned "); |
|
528 |
|
529 asm("cmp r2, #8 "); |
|
530 asm("bcs 1f "); |
|
531 asm("2: "); // Copy 0 - 7 bytes |
|
532 asm("subs r2, r2, #1 "); |
|
533 asm("ldrplb r3, [r1], #1 "); |
|
534 asm("strplbt r3, [r0], #1 "); |
|
535 asm("bgt 2b "); |
|
536 __JUMP(,lr); |
|
537 |
|
538 asm("1: "); // Word-align dest |
|
539 asm("movs r3, r0, lsl #30 "); |
|
540 asm("beq 5f "); |
|
541 asm("rsbs r3, r3, #0 "); // 01->c0000000 (MI,VC) 10->80000000 (MI,VS) 11->40000000 (PL,VC) |
|
542 asm("sub r2, r2, r3, lsr #30 "); |
|
543 asm("ldrmib r3, [r1], #1 "); |
|
544 asm("strmibt r3, [r0], #1 "); |
|
545 asm("ldrmib r3, [r1], #1 "); |
|
546 asm("strmibt r3, [r0], #1 "); |
|
547 asm("ldrvcb r3, [r1], #1 "); |
|
548 asm("strvcbt r3, [r0], #1 "); // r0 now word aligned |
|
549 asm("5: "); |
|
550 asm("movs r3, r1, lsl #31 "); |
|
551 asm("bic r1, r1, #3 "); |
|
552 asm("bcs 3f "); // branch if src mod 4 = 2 or 3 |
|
553 asm("bpl _umemput_word_aligned "); // branch if src mod 4 = 0 |
|
554 |
|
555 asm("4: "); // get here if src mod 4 = 1 |
|
556 asm("subs r2, r2, #4 "); |
|
557 asm("ldrge r3, [r1], #4 "); |
|
558 asm("ldrge ip, [r1] "); |
|
559 asm("movge r3, r3, lsr #8 "); |
|
560 asm("orrge r3, r3, ip, lsl #24 "); |
|
561 asm("strget r3, [r0], #4 "); |
|
562 asm("bgt 4b "); |
|
563 asm("add r1, r1, #1 "); |
|
564 asm("b _umemput_do_end "); |
|
565 |
|
566 asm("3: "); // get here if src mod 4 = 2 or 3 |
|
567 asm("bmi 5f "); // branch if 3 |
|
568 asm("2: "); |
|
569 asm("subs r2, r2, #4 "); |
|
570 asm("ldrge r3, [r1], #4 "); |
|
571 asm("ldrge ip, [r1] "); |
|
572 asm("movge r3, r3, lsr #16 "); |
|
573 asm("orrge r3, r3, ip, lsl #16 "); |
|
574 asm("strget r3, [r0], #4 "); |
|
575 asm("bgt 2b "); |
|
576 asm("add r1, r1, #2 "); |
|
577 asm("b _umemput_do_end "); |
|
578 |
|
579 asm("5: "); // get here if src mod 4 = 3 |
|
580 asm("subs r2, r2, #4 "); |
|
581 asm("ldrge r3, [r1], #4 "); |
|
582 asm("ldrge ip, [r1] "); |
|
583 asm("movge r3, r3, lsr #24 "); |
|
584 asm("orrge r3, r3, ip, lsl #8 "); |
|
585 asm("strget r3, [r0], #4 "); |
|
586 asm("bgt 5b "); |
|
587 asm("add r1, r1, #3 "); |
|
588 |
|
589 asm("_umemput_do_end: "); // z set if done, else r2 == length remaining - 4 |
|
590 __JUMP(eq,lr); |
|
591 asm("adds r2, r2, #2 "); // r2 = -1 if 1 left, 0 if 2 left, +1 if 3 left |
|
592 asm("ldrplb r3, [r1], #1 "); |
|
593 asm("strplbt r3, [r0], #1 "); |
|
594 asm("ldrplb r3, [r1], #1 "); |
|
595 asm("strplbt r3, [r0], #1 "); |
|
596 asm("ldrneb r3, [r1], #1 "); |
|
597 asm("strnebt r3, [r0], #1 "); |
|
598 __JUMP(,lr); |
|
599 } |
|
600 |
|
601 #endif // USE_REPLACEMENT_UMEMPUT |
|
602 |
|
603 |
|
604 EXPORT_C __NAKED__ void kumemset(TAny* /*aAddr*/, const TUint8 /*aValue*/, TInt /*aLength*/) |
|
605 { |
|
606 asm("mrs r3, spsr "); // r3=spsr_svc |
|
607 asm("tst r3, #0x0f "); // test for user mode |
|
608 asm("bne memset "); // if not, just do memset |
|
609 // otherwise fall through to umemset |
|
610 } |
|
611 |
|
612 |
|
613 EXPORT_C __NAKED__ void umemset(TAny* /*aUserAddr*/, const TUint8 /*aValue*/, TInt /*aLength*/) |
|
614 { |
|
615 ASM_ASSERT_DATA_PAGING_SAFE |
|
616 #ifdef __USER_MEMORY_GUARDS_ENABLED__ |
|
617 asm("stmfd sp!, {r11, lr} "); |
|
618 USER_MEMORY_GUARD_OFF(,r11,r12); |
|
619 asm("bl 1f"); |
|
620 USER_MEMORY_GUARD_RESTORE(r11,r12); |
|
621 asm("ldmfd sp!, {r11, pc} "); |
|
622 asm("1:"); |
|
623 #endif |
|
624 asm("cmp r2, #7 "); |
|
625 asm("bhi 2f "); |
|
626 asm("1: "); |
|
627 asm("subs r2, r2, #1 "); |
|
628 asm("strplbt r1, [r0], #1 "); |
|
629 asm("bgt 1b "); |
|
630 __JUMP(,lr); |
|
631 asm("2: "); |
|
632 asm("and r1, r1, #0xff "); |
|
633 asm("orr r1, r1, r1, lsl #8 "); |
|
634 asm("orr r1, r1, r1, lsl #16 "); |
|
635 asm("movs r3, r0, lsl #30 "); |
|
636 asm("beq 3f "); |
|
637 asm("rsbs r3, r3, #0 "); // 01->c0000000 (MI,VC) 10->80000000 (MI,VS) 11->40000000 (PL,VC) |
|
638 asm("strmibt r1, [r0], #1 "); // if 01 or 10 do 2 byte stores |
|
639 asm("strmibt r1, [r0], #1 "); |
|
640 asm("strvcbt r1, [r0], #1 "); // if 01 or 11 do 1 byte store |
|
641 asm("sub r2, r2, r3, lsr #30 "); |
|
642 asm("3: "); // r0 now word aligned |
|
643 asm("subs r2, r2, #4 "); |
|
644 asm("strplt r1, [r0], #4 "); |
|
645 asm("bgt 3b "); |
|
646 __JUMP(eq,lr); // return if finished |
|
647 asm("adds r2, r2, #2 "); // -1 if 1 left, 0 if 2 left, +1 if 3 left |
|
648 asm("strplbt r1, [r0], #1 "); |
|
649 asm("strplbt r1, [r0], #1 "); |
|
650 asm("strnebt r1, [r0], #1 "); |
|
651 __JUMP(,lr); |
|
652 } |
|
653 |
|
654 } |