author | mikek |
Fri, 02 Jul 2010 14:28:50 +0100 | |
branch | GCC_SURGE |
changeset 192 | 8132de65a0da |
parent 188 | 38a7352e23d3 |
child 203 | 71dce6d9df02 |
permissions | -rw-r--r-- |
0 | 1 |
// Copyright (c) 1995-2009 Nokia Corporation and/or its subsidiary(-ies). |
2 |
// All rights reserved. |
|
3 |
// This component and the accompanying materials are made available |
|
4 |
// under the terms of the License "Eclipse Public License v1.0" |
|
5 |
// which accompanies this distribution, and is available |
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 |
// |
|
8 |
// Initial Contributors: |
|
9 |
// Nokia Corporation - initial contribution. |
|
10 |
// |
|
11 |
// Contributors: |
|
12 |
// |
|
13 |
// Description: |
|
14 |
// e32\common\arm\cmem.cia |
|
15 |
// |
|
16 |
// |
|
17 |
||
18 |
#include "../common.h" |
|
19 |
#include <e32cia.h> |
|
20 |
#if defined(__REPLACE_GENERIC_UTILS) |
|
21 |
#include "replacement_utils.h" |
|
22 |
#endif |
|
23 |
||
24 |
#if defined(__MEM_MACHINE_CODED__) |
|
25 |
||
26 |
#ifndef USE_REPLACEMENT_MEMSET |
|
27 |
||
28 |
#if defined(_DEBUG) |
|
29 |
||
30 |
#ifdef __STANDALONE_NANOKERNEL__ |
|
31 |
||
32 |
#define ARM_ASSERT_MULTIPLE_OF_FOUR(rt1, panicfunc) \ |
|
33 |
asm("tst "#rt1", #3"); \ |
|
34 |
asm("ldrne "#rt1", ["#rt1"]") |
|
35 |
||
36 |
#else // __STANDALONE_NANOKERNEL__ |
|
37 |
GLDEF_C void PanicEWordMoveLengthNotMultipleOf4(); |
|
38 |
GLDEF_C void PanicEWordMoveSourceNotAligned(); |
|
39 |
GLDEF_C void PanicEWordMoveTargetNotAligned(); |
|
40 |
||
41 |
#define ARM_ASSERT_MULTIPLE_OF_FOUR(rt1, panicfunc) \ |
|
42 |
asm("tst "#rt1", #3"); \ |
|
43 |
asm("bne " panicfunc ) |
|
44 |
||
45 |
#endif // __STANDALONE_NANOKERNEL__ |
|
46 |
||
47 |
#else // _DEBUG |
|
48 |
||
49 |
#define ARM_ASSERT_MULTIPLE_OF_FOUR(rt1, panicfunc) |
|
50 |
||
51 |
#endif //_DEBUG |
|
52 |
||
53 |
||
54 |
// See header file e32cmn.h for the in-source documentation. |
|
55 |
extern "C" EXPORT_C __NAKED__ TAny* memclr(TAny* /*aTrg*/, unsigned int /*aLength*/) |
|
56 |
{ |
|
57 |
KMEMCLRHOOK |
|
58 |
asm("mov r2, #0 "); |
|
59 |
asm("b fill "); |
|
60 |
} |
|
61 |
||
62 |
// See header file e32cmn.h for the in-source documentation. |
|
63 |
extern "C" EXPORT_C __NAKED__ TAny* memset(TAny* /*aTrg*/, TInt /*aValue*/, unsigned int /*aLength*/) |
|
64 |
{ |
|
65 |
KMEMSETHOOK |
|
66 |
asm(" mov r3, r2 "); /* length into r3 */ |
|
67 |
asm(" and r2,r1,#255"); /* fill value into r2 */ |
|
68 |
asm(" mov r1, r3 "); /* length into r1 */ |
|
69 |
||
70 |
asm("fill:"); |
|
71 |
asm(" cmp r1,#8"); |
|
72 |
asm(" bls small_fill"); // only taken ~20% of the time |
|
73 |
||
74 |
asm(" stmfd sp!,{r0,r4-r9,lr}"); |
|
75 |
asm(" movs r3, r0, lsl #30 "); // Check if word aligned |
|
76 |
asm(" orr r2,r2,r2,lsl #8"); |
|
77 |
asm(" orr r2,r2,r2,lsl #16"); |
|
78 |
asm(" bne unaligned_fill "); |
|
79 |
||
80 |
// Align destination address to 32 byte boundary if possible |
|
81 |
||
82 |
asm("word_aligned_fill: "); |
|
83 |
asm(" mov r4,r2"); |
|
84 |
asm(" mov r5,r2"); |
|
85 |
asm(" mov r6,r2"); |
|
86 |
asm(" movs r3, r0, lsl #27 "); |
|
87 |
asm(" beq aligned_fill "); |
|
88 |
asm(" rsb r3, r3, #0 "); // calculate fill length necessary for aligment |
|
89 |
asm(" cmp r1, r3, lsr #27 "); // compare with remaining length |
|
90 |
asm(" blo smaller_fill "); // skip alignment if greater |
|
91 |
asm(" msr cpsr_f, r3 "); // put length bits 4, 3, 2 into N, Z, C flags |
|
92 |
asm(" strcs r2, [r0], #4 "); // align to 8 byte boundary |
|
93 |
asm(" stmeqia r0!, {r2, r4} "); // align to 16 byte boundary |
|
94 |
asm(" stmmiia r0!, {r2, r4-r6} "); // align to 32 byte boundary |
|
95 |
asm(" sub r1, r1, r3, lsr #27 "); // adjust remaining length |
|
96 |
||
97 |
asm("aligned_fill:"); |
|
98 |
asm(" cmp r1, #64 "); |
|
99 |
asm(" bhs big_fill "); |
|
100 |
||
101 |
// Fill 0-63 bytes |
|
102 |
||
103 |
asm("smaller_fill:"); |
|
104 |
asm(" movs r1, r1, lsl #26"); |
|
105 |
asm(" beq mem_fill_end "); |
|
106 |
asm(" msr cpsr_flg, r1 "); |
|
107 |
asm(" stmmiia r0!,{r2,r4-r6}"); // Fill 32 |
|
108 |
asm(" stmmiia r0!,{r2,r4-r6}"); |
|
109 |
asm(" stmeqia r0!,{r2,r4-r6}"); // Fill 16 |
|
110 |
asm(" stmcsia r0!,{r2,r4}"); // Fill 8 |
|
111 |
asm(" strvs r2,[r0],#4"); // Fill 4 |
|
112 |
asm(" movs r1, r1, lsl #4 "); |
|
113 |
asm(" bne smallest_fill "); |
|
114 |
asm("mem_fill_end: "); |
|
115 |
__POPRET("r0,r4-r9,"); |
|
116 |
||
117 |
// Fill last 1-3 bytes |
|
118 |
||
119 |
asm("smallest_fill: "); |
|
120 |
asm(" msr cpsr_flg,r1"); |
|
121 |
asm(" strmih r2,[r0],#2"); // Fill 2 |
|
122 |
asm(" streqb r2,[r0],#1"); // Fill 1 |
|
123 |
__POPRET("r0,r4-r9,"); |
|
124 |
||
125 |
// Fill loop for length >= 64 |
|
126 |
||
127 |
asm("big_fill: "); |
|
128 |
asm(" mov r3,r2"); |
|
129 |
asm(" mov r7,r2"); |
|
130 |
asm(" mov r8,r2"); |
|
131 |
asm(" mov r9,r2"); |
|
132 |
asm(" movs ip,r1,lsr #8"); // Number of 256 byte blocks to fill |
|
133 |
asm(" beq medium_fill "); |
|
134 |
asm("fill_256_bytes_loop:"); |
|
135 |
asm(" stmia r0!,{r2-r9}"); // Fill 256 bytes |
|
136 |
asm(" stmia r0!,{r2-r9}"); |
|
137 |
asm(" stmia r0!,{r2-r9}"); |
|
138 |
asm(" stmia r0!,{r2-r9}"); |
|
139 |
asm(" stmia r0!,{r2-r9}"); |
|
140 |
asm(" stmia r0!,{r2-r9}"); |
|
141 |
asm(" stmia r0!,{r2-r9}"); |
|
142 |
asm(" stmia r0!,{r2-r9}"); |
|
143 |
asm(" subs ip,ip,#1"); |
|
144 |
asm(" bne fill_256_bytes_loop"); |
|
145 |
asm("medium_fill: "); |
|
146 |
asm(" movs ip,r1,lsl #24"); |
|
147 |
asm(" msr cpsr_flg,ip"); |
|
148 |
asm(" stmmiia r0!,{r2-r9}"); // Fill 128 |
|
149 |
asm(" stmmiia r0!,{r2-r9}"); |
|
150 |
asm(" stmmiia r0!,{r2-r9}"); |
|
151 |
asm(" stmmiia r0!,{r2-r9}"); |
|
152 |
asm(" stmeqia r0!,{r2-r9}"); // Fill 64 |
|
153 |
asm(" stmeqia r0!,{r2-r9}"); |
|
154 |
asm(" and r1, r1, #63 "); |
|
155 |
asm(" b smaller_fill"); |
|
156 |
||
157 |
// Word-align destination address, length >= 8 |
|
158 |
||
159 |
asm("unaligned_fill: "); |
|
160 |
asm(" rsb r3, r3, #0 "); // calculate fill length necessary for aligment |
|
161 |
asm(" msr cpsr_flg, r3"); |
|
162 |
asm(" streqb r2, [r0], #1 "); // align to 2 byte boundary |
|
163 |
asm(" strmih r2, [r0], #2 "); // align to 4 byte boundary |
|
164 |
asm(" sub r1, r1, r3, lsr #30 "); |
|
165 |
asm(" b word_aligned_fill "); |
|
166 |
||
167 |
// Fill for length <= 8 |
|
168 |
||
169 |
asm("small_fill: "); |
|
170 |
asm(" mov r3, r0 "); /* r3=dest */ |
|
171 |
asm(" adr ip, small_fill_end "); |
|
172 |
asm(" sub pc, ip, r1, lsl #2 "); |
|
173 |
asm(" strb r2, [r3], #1"); |
|
174 |
asm(" strb r2, [r3], #1"); |
|
175 |
asm(" strb r2, [r3], #1"); |
|
176 |
asm(" strb r2, [r3], #1"); |
|
177 |
asm(" strb r2, [r3], #1"); |
|
178 |
asm(" strb r2, [r3], #1"); |
|
179 |
asm(" strb r2, [r3], #1"); |
|
180 |
asm(" strb r2, [r3], #1"); |
|
181 |
asm("small_fill_end: "); |
|
182 |
__JUMP(,lr); |
|
183 |
||
188
38a7352e23d3
1) Fix for Bug 3117 - [GCCE] Missing symbols in linkage of template_ekern.exe
mikek
parents:
0
diff
changeset
|
184 |
} |
38a7352e23d3
1) Fix for Bug 3117 - [GCCE] Missing symbols in linkage of template_ekern.exe
mikek
parents:
0
diff
changeset
|
185 |
|
0 | 186 |
#endif // USE_REPLACEMENT_MEMSET |
187 |
||
188 |
#ifndef USE_REPLACEMENT_MEMCPY |
|
189 |
||
190 |
// See header file e32cmn.h for the in-source documentation. |
|
191 |
||
192 |
extern "C" EXPORT_C __NAKED__ TAny* wordmove(TAny* /*aTrg*/, const TAny* /*aSrc*/, unsigned int /*aLength*/) |
|
193 |
// |
|
194 |
// Assumes all is aligned |
|
195 |
// |
|
196 |
{ |
|
197 |
ARM_ASSERT_MULTIPLE_OF_FOUR(r0, CSM_Z30PanicEWordMoveTargetNotAlignedv); |
|
198 |
ARM_ASSERT_MULTIPLE_OF_FOUR(r1, CSM_Z30PanicEWordMoveSourceNotAlignedv); |
|
199 |
ARM_ASSERT_MULTIPLE_OF_FOUR(r2, CSM_Z34PanicEWordMoveLengthNotMultipleOf4v); |
|
200 |
||
201 |
// Mask length to a multiple of four bytes to avoid memory, or register |
|
202 |
// corruption by the special cases below. |
|
203 |
asm("bic r2,r2,#3"); |
|
204 |
||
205 |
// Length <= 24 in ~90% of cases, however can only copy > 16 bytes in 4 |
|
206 |
// instructions if LDM instuction restores thumb state when loading the PC. |
|
207 |
#ifdef __CPU_ARM_LDR_PC_SETS_TBIT |
|
208 |
asm("cmp r2, #24 "); |
|
209 |
#else |
|
210 |
asm("cmp r2, #16 "); |
|
211 |
#endif |
|
212 |
PLD(1); |
|
213 |
asm("addls pc, pc, r2, lsl #2 "); // take branch depending on size |
|
214 |
asm("b 9f "); // too big |
|
215 |
||
216 |
// 0 words |
|
217 |
__JUMP(,lr); |
|
218 |
__JUMP(,lr); |
|
219 |
__JUMP(,lr); |
|
220 |
__JUMP(,lr); |
|
221 |
||
222 |
// 1 word |
|
223 |
asm("ldr ip, [r1] "); |
|
224 |
asm("str ip, [r0] "); |
|
225 |
__JUMP(,lr); |
|
226 |
__JUMP(,lr); |
|
227 |
||
228 |
// 2 words |
|
229 |
asm("ldmia r1, {r2,r3}"); |
|
230 |
asm("stmia r0, {r2,r3}"); |
|
231 |
__JUMP(,lr); |
|
232 |
__JUMP(,lr); |
|
233 |
||
234 |
// 3 words |
|
235 |
asm("ldmia r1, {r2,r3,ip}"); |
|
236 |
asm("stmia r0, {r2,r3,ip}"); |
|
237 |
__JUMP(,lr); |
|
238 |
__JUMP(,lr); |
|
239 |
||
240 |
// 4 words |
|
241 |
asm("ldmia r1, {r1,r2,r3,ip}"); |
|
242 |
asm("stmia r0, {r1,r2,r3,ip}"); |
|
243 |
__JUMP(,lr); |
|
244 |
__JUMP(,lr); |
|
245 |
||
246 |
#ifdef __CPU_ARM_LDR_PC_SETS_TBIT |
|
247 |
// 5 words |
|
248 |
asm("stmfd sp!, {lr}"); |
|
249 |
asm("ldmia r1, {r1,r2,r3,ip,lr}"); |
|
250 |
asm("stmia r0, {r1,r2,r3,ip,lr}"); |
|
251 |
asm("ldmfd sp!, {pc}"); |
|
252 |
||
253 |
// 6 words |
|
254 |
asm("stmfd sp!, {r4,lr}"); |
|
255 |
asm("ldmia r1, {r1,r2,r3,r4,ip,lr}"); |
|
256 |
asm("stmia r0, {r1,r2,r3,r4,ip,lr}"); |
|
257 |
asm("ldmfd sp!, {r4,pc}"); |
|
258 |
#endif |
|
259 |
||
260 |
asm("9: "); |
|
261 |
asm("subs r3, r0, r1 "); // r3 = dest - source |
|
262 |
__JUMP(eq,lr); // return if source = dest |
|
263 |
asm("stmfd sp!, {r0,r4-r11,lr} "); |
|
264 |
asm("cmphi r2, r3 "); // if dest>source, compare length with dest-source |
|
265 |
asm("bls mem_move_fore "); // if dest<source or length<=dest-source do forwards aligned copy |
|
266 |
asm("add r0, r0, r2 "); |
|
267 |
asm("add r1, r1, r2 "); |
|
268 |
asm("b mem_move_back "); // Backwards aligned copy |
|
269 |
} |
|
270 |
||
271 |
||
272 |
||
273 |
||
274 |
// See header file e32cmn.h for the in-source documentation. |
|
275 |
extern "C" EXPORT_C __NAKED__ TAny* memmove(TAny* /*aTrg*/, const TAny* /*aSrc*/, unsigned int /*aLength*/) |
|
276 |
{ |
|
277 |
KMEMMOVEHOOK |
|
278 |
// fall through |
|
279 |
} |
|
280 |
||
281 |
||
282 |
||
283 |
// See header file e32cmn.h for the in-source documentation. |
|
284 |
extern "C" EXPORT_C __NAKED__ TAny* memcpy(TAny* /*aTrg*/, const TAny* /*aSrc*/, unsigned int /*aLength*/) |
|
285 |
{ |
|
286 |
KMEMCPYHOOK |
|
287 |
// |
|
288 |
// Check for zero length or source and target being the same |
|
289 |
// |
|
290 |
asm(" cmp r2, #0 "); // zero length? |
|
291 |
asm(" subnes r3, r0, r1 "); // if not, r3 = dest-source |
|
292 |
__JUMP(eq,lr); // if zero length or dest=source, nothing to do |
|
293 |
asm(" cmphi r2, r3 "); // if dest>source compare length to dest-source |
|
294 |
asm(" movhi r3, #0 "); // if dest>source and length>dest-source need to go backwards - set r3=0 |
|
295 |
// |
|
296 |
// If <16 bytes, just do byte moves |
|
297 |
// |
|
298 |
asm(" cmp r2, #15 "); |
|
299 |
asm(" bhi main_copy "); |
|
300 |
||
301 |
asm(" ldrb r12, [r0] "); // read dest so it's in cache - avoid lots of single accesses to external memory |
|
302 |
asm(" sub r12, r0, #1 "); |
|
303 |
asm(" ldrb r12, [r12, r2] "); // read dest+length-1 |
|
304 |
asm(" cmp r3, #0 "); |
|
305 |
asm(" beq small_copy_back "); // r3=0 means go backwards |
|
306 |
||
307 |
asm("small_copy_fwd: "); |
|
308 |
asm(" mov r3, r0 "); |
|
309 |
asm(" adr r12, small_copy_fwd_end "); |
|
310 |
asm(" sub pc, r12, r2, lsl #3 "); |
|
311 |
||
312 |
asm(" ldrb r12, [r1], #1 "); |
|
313 |
asm(" strb r12, [r3], #1 "); |
|
314 |
asm(" ldrb r12, [r1], #1 "); |
|
315 |
asm(" strb r12, [r3], #1 "); |
|
316 |
asm(" ldrb r12, [r1], #1 "); |
|
317 |
asm(" strb r12, [r3], #1 "); |
|
318 |
asm(" ldrb r12, [r1], #1 "); |
|
319 |
asm(" strb r12, [r3], #1 "); |
|
320 |
asm(" ldrb r12, [r1], #1 "); |
|
321 |
asm(" strb r12, [r3], #1 "); |
|
322 |
asm(" ldrb r12, [r1], #1 "); |
|
323 |
asm(" strb r12, [r3], #1 "); |
|
324 |
asm(" ldrb r12, [r1], #1 "); |
|
325 |
asm(" strb r12, [r3], #1 "); |
|
326 |
asm(" ldrb r12, [r1], #1 "); |
|
327 |
asm(" strb r12, [r3], #1 "); |
|
328 |
asm(" ldrb r12, [r1], #1 "); |
|
329 |
asm(" strb r12, [r3], #1 "); |
|
330 |
asm(" ldrb r12, [r1], #1 "); |
|
331 |
asm(" strb r12, [r3], #1 "); |
|
332 |
asm(" ldrb r12, [r1], #1 "); |
|
333 |
asm(" strb r12, [r3], #1 "); |
|
334 |
asm(" ldrb r12, [r1], #1 "); |
|
335 |
asm(" strb r12, [r3], #1 "); |
|
336 |
asm(" ldrb r12, [r1], #1 "); |
|
337 |
asm(" strb r12, [r3], #1 "); |
|
338 |
asm(" ldrb r12, [r1], #1 "); |
|
339 |
asm(" strb r12, [r3], #1 "); |
|
340 |
asm(" ldrb r12, [r1], #1 "); |
|
341 |
asm(" strb r12, [r3], #1 "); |
|
342 |
asm("small_copy_fwd_end: "); |
|
343 |
__JUMP(,lr); |
|
344 |
||
345 |
asm("small_copy_back: "); |
|
346 |
asm(" add r3, r0, r2 "); |
|
347 |
asm(" add r1, r1, r2 "); |
|
348 |
asm(" adr r12, small_copy_back_end "); |
|
349 |
asm(" sub pc, r12, r2, lsl #3 "); |
|
350 |
||
351 |
asm(" ldrb r12, [r1, #-1]! "); |
|
352 |
asm(" strb r12, [r3, #-1]! "); |
|
353 |
asm(" ldrb r12, [r1, #-1]! "); |
|
354 |
asm(" strb r12, [r3, #-1]! "); |
|
355 |
asm(" ldrb r12, [r1, #-1]! "); |
|
356 |
asm(" strb r12, [r3, #-1]! "); |
|
357 |
asm(" ldrb r12, [r1, #-1]! "); |
|
358 |
asm(" strb r12, [r3, #-1]! "); |
|
359 |
asm(" ldrb r12, [r1, #-1]! "); |
|
360 |
asm(" strb r12, [r3, #-1]! "); |
|
361 |
asm(" ldrb r12, [r1, #-1]! "); |
|
362 |
asm(" strb r12, [r3, #-1]! "); |
|
363 |
asm(" ldrb r12, [r1, #-1]! "); |
|
364 |
asm(" strb r12, [r3, #-1]! "); |
|
365 |
asm(" ldrb r12, [r1, #-1]! "); |
|
366 |
asm(" strb r12, [r3, #-1]! "); |
|
367 |
asm(" ldrb r12, [r1, #-1]! "); |
|
368 |
asm(" strb r12, [r3, #-1]! "); |
|
369 |
asm(" ldrb r12, [r1, #-1]! "); |
|
370 |
asm(" strb r12, [r3, #-1]! "); |
|
371 |
asm(" ldrb r12, [r1, #-1]! "); |
|
372 |
asm(" strb r12, [r3, #-1]! "); |
|
373 |
asm(" ldrb r12, [r1, #-1]! "); |
|
374 |
asm(" strb r12, [r3, #-1]! "); |
|
375 |
asm(" ldrb r12, [r1, #-1]! "); |
|
376 |
asm(" strb r12, [r3, #-1]! "); |
|
377 |
asm(" ldrb r12, [r1, #-1]! "); |
|
378 |
asm(" strb r12, [r3, #-1]! "); |
|
379 |
asm(" ldrb r12, [r1, #-1]! "); |
|
380 |
asm(" strb r12, [r3, #-1]! "); |
|
381 |
asm("small_copy_back_end: "); |
|
382 |
__JUMP(,lr); |
|
383 |
||
384 |
||
385 |
asm("main_copy: "); |
|
386 |
PLD(1); // preload first two cache lines |
|
387 |
PLD_ioff(1, 32); |
|
388 |
asm(" stmfd sp!, {r0,r4-r11,lr} "); // r0 == dest, r1 == src, r2 == len |
|
389 |
asm(" cmp r3, #0 "); |
|
390 |
asm(" beq copy_back "); // we must go backwards |
|
391 |
asm(" movs r3, r0, lsl #30 "); // check destination word aligned |
|
392 |
asm(" bne dest_unaligned_fore "); |
|
393 |
||
394 |
// |
|
395 |
// Normal copy forwards. r0 should point to end address on exit |
|
396 |
// Destination now word-aligned; if source is also word-aligned, do aligned copy. |
|
397 |
// |
|
398 |
asm("dest_aligned_fore: "); |
|
399 |
asm(" ands r12, r1, #3 "); // r12=alignment of source |
|
400 |
asm(" bne copy_fwd_nonaligned "); |
|
401 |
||
402 |
// |
|
403 |
// We are now word aligned, at least 13 bytes to do |
|
404 |
// |
|
405 |
||
406 |
asm("mem_move_fore:"); |
|
407 |
// |
|
408 |
// superalign |
|
409 |
// |
|
410 |
asm(" movs r4, r0, lsl #27 "); // destination alignment into r4 |
|
411 |
asm(" beq f_al_already_aligned "); // fast path |
|
412 |
asm(" rsb r4, r4, #0 "); // bytes required to align destination to 32 |
|
413 |
asm(" cmp r2, r4, lsr #27 "); // check that many remaining |
|
414 |
asm(" blo its_smaller_fore "); // if too short, just stick with word alignment |
|
415 |
asm(" msr cpsr_flg, r4 "); // destination alignment into N, Z, C flags |
|
416 |
// do word moves to align destination |
|
417 |
asm(" ldrcs lr, [r1], #4 "); // C flag == 1 word (we are already word aligned) |
|
418 |
asm(" ldmeqia r1!, {r3,r9} "); // Z flag == 2 words |
|
419 |
asm(" ldmmiia r1!, {r5-r8} "); // N flag == 4 words, destination now 32 byte aligned |
|
420 |
asm(" sub r2, r2, r4, lsr #27 "); // adjust length |
|
421 |
asm(" strcs lr, [r0], #4 "); // destination now 8 byte aligned |
|
422 |
asm(" stmeqia r0!, {r3,r9} "); // destination now 16 byte aligned |
|
423 |
asm(" stmmiia r0!, {r5-r8} "); // destination now 32 byte aligned |
|
424 |
||
425 |
asm("f_al_already_aligned: "); |
|
426 |
asm(" cmp r2, #64 "); |
|
427 |
asm(" bhs large_copy_fore "); |
|
428 |
// |
|
429 |
// Less than 64 bytes to go... |
|
430 |
// |
|
431 |
asm("its_smaller_fore:"); |
|
432 |
asm(" movs ip, r2, lsl #26 "); // length bits 5, 4, 3, 2 into N, Z, C, V |
|
433 |
asm(" beq mem_copy_end "); // skip if remaining length zero |
|
434 |
asm(" msr cpsr_flg, ip "); |
|
435 |
asm(" ldmmiia r1!, {r3-r10} "); |
|
436 |
asm(" stmmiia r0!, {r3-r10} "); // copy 32 |
|
437 |
asm(" ldmeqia r1!, {r3-r6} "); |
|
438 |
asm(" ldmcsia r1!, {r7-r8} "); |
|
439 |
asm(" ldrvs r9, [r1], #4 "); |
|
440 |
asm(" stmeqia r0!, {r3-r6} "); // copy 16 |
|
441 |
asm(" stmcsia r0!, {r7-r8} "); // copy 8 |
|
442 |
asm(" strvs r9, [r0], #4 "); // copy 4 |
|
443 |
||
444 |
asm(" movs ip, r2, lsl #30 "); |
|
445 |
asm(" bne smallest_copy_fore "); |
|
446 |
||
447 |
asm("mem_copy_end: "); |
|
448 |
__POPRET("r0,r4-r11,"); |
|
449 |
||
450 |
||
451 |
// |
|
452 |
// Less than 4 bytes to go... |
|
453 |
// |
|
454 |
||
455 |
asm("smallest_copy_fore: "); |
|
456 |
asm(" msr cpsr_flg, ip "); |
|
457 |
asm(" ldrmih r3, [r1], #2 "); |
|
458 |
asm(" ldreqb r4, [r1], #1 "); |
|
459 |
asm(" strmih r3, [r0], #2 "); // copy 2 |
|
460 |
asm(" streqb r4, [r0], #1 "); // copy 1 |
|
461 |
__POPRET("r0,r4-r11,"); |
|
462 |
||
463 |
||
464 |
// |
|
465 |
// Do byte moves if necessary to word-align destination |
|
466 |
// |
|
467 |
asm("dest_unaligned_fore: "); |
|
468 |
asm(" rsb r3, r3, #0 "); |
|
469 |
asm(" msr cpsr_flg, r3 "); |
|
470 |
asm(" ldrmib r4, [r1], #1 "); // move bytes to align destination |
|
471 |
asm(" ldrmib r5, [r1], #1 "); |
|
472 |
asm(" ldreqb r6, [r1], #1 "); |
|
473 |
asm(" sub r2, r2, r3, lsr #30 "); // adjust length, at least 13 bytes remaining |
|
474 |
asm(" strmib r4, [r0], #1 "); |
|
475 |
asm(" strmib r5, [r0], #1 "); |
|
476 |
asm(" streqb r6, [r0], #1 "); |
|
477 |
asm(" b dest_aligned_fore "); |
|
478 |
||
479 |
||
480 |
// |
|
481 |
// Large copy, length >= 64 |
|
482 |
// |
|
483 |
||
484 |
asm("large_copy_fore: "); |
|
485 |
asm(" movs ip, r2, lsr #6 "); // ip = number of 64 blocks to copy |
|
486 |
asm("1: "); |
|
487 |
PLD_ioff(1, 32); |
|
488 |
PLD_ioff(1, 64); |
|
489 |
asm(" ldmia r1!, {r3-r10} "); // Copy 64 |
|
490 |
asm(" stmia r0!, {r3-r10} "); |
|
491 |
asm(" ldmia r1!, {r3-r10} "); |
|
492 |
asm(" subs ip, ip, #1 "); |
|
493 |
asm(" stmia r0!, {r3-r10} "); |
|
494 |
asm(" bne 1b "); |
|
495 |
asm(" and r2, r2, #63 "); |
|
496 |
asm(" b its_smaller_fore "); |
|
497 |
||
498 |
||
499 |
// |
|
500 |
// Forward unlaigned copy |
|
501 |
// |
|
502 |
||
503 |
asm("copy_fwd_nonaligned:"); |
|
504 |
// |
|
505 |
// superalign |
|
506 |
// |
|
507 |
asm(" bic r1, r1, #3 "); // align source |
|
508 |
asm(" ldr r11, [r1], #4 "); // get first word |
|
509 |
asm(" mov r12, r12, lsl #3 "); // r12 = 8*source alignment |
|
510 |
asm(" ands r4, r0, #31 "); // destination alignment into r4 |
|
511 |
asm(" beq medium_unal_copy "); // skip if already aligned |
|
512 |
asm(" rsb r4, r4, #32 "); // r4 = bytes to align dest to 32 |
|
513 |
asm(" cmp r2, r4 "); // check if length big enough to align to 32 |
|
514 |
asm(" blo copy_fwd_remainder "); // skip if too small |
|
515 |
asm(" sub r2, r2, r4 "); // adjust length |
|
516 |
asm(" rsb r3, r12, #32 "); // r3 = 32 - 8*source alignment |
|
517 |
||
518 |
asm("1: "); |
|
519 |
asm(" mov r5, r11, lsr r12 "); // r5 = part of previous source word required to make destination word |
|
520 |
asm(" ldr r11, [r1], #4 "); // get next word |
|
521 |
asm(" subs r4, r4, #4 "); // 4 bytes less to do |
|
522 |
asm(" orr r5, r5, r11, lsl r3 "); // form next destination word |
|
523 |
asm(" str r5, [r0], #4 "); // and store it |
|
524 |
asm(" bne 1b "); // loop until destination 32 byte aligned |
|
525 |
||
526 |
asm("medium_unal_copy: "); // destination now aligned to 32 bytes |
|
527 |
asm(" movs lr, r2, lsr #5 "); // lr=number of 32-byte blocks |
|
528 |
asm(" beq copy_fwd_remainder "); // skip if length < 32 |
|
529 |
||
530 |
asm(" cmp r12, #16 "); |
|
531 |
asm(" beq copy_fwd_nonaligned_2 "); // branch if source = 2 mod 4 |
|
532 |
asm(" bhi copy_fwd_nonaligned_3 "); // branch if source = 3 mod 4, else source = 1 mod 4 |
|
533 |
||
534 |
// source = 1 mod 4 |
|
535 |
asm("copy_fwd_nonaligned_1: "); |
|
536 |
asm(" mov r3, r11, lsr #8 "); |
|
537 |
asm(" ldmia r1!, {r4-r11} "); |
|
538 |
PLD_ioff(1, 32); |
|
539 |
asm(" subs lr, lr, #1 "); |
|
540 |
asm(" orr r3, r3, r4, lsl #24 "); |
|
541 |
asm(" mov r4, r4, lsr #8 "); |
|
542 |
asm(" orr r4, r4, r5, lsl #24 "); |
|
543 |
asm(" mov r5, r5, lsr #8 "); |
|
544 |
asm(" orr r5, r5, r6, lsl #24 "); |
|
545 |
asm(" mov r6, r6, lsr #8 "); |
|
546 |
asm(" orr r6, r6, r7, lsl #24 "); |
|
547 |
asm(" mov r7, r7, lsr #8 "); |
|
548 |
asm(" orr r7, r7, r8, lsl #24 "); |
|
549 |
asm(" mov r8, r8, lsr #8 "); |
|
550 |
asm(" orr r8, r8, r9, lsl #24 "); |
|
551 |
asm(" mov r9, r9, lsr #8 "); |
|
552 |
asm(" orr r9, r9, r10, lsl #24 "); |
|
553 |
asm(" mov r10, r10, lsr #8 "); |
|
554 |
asm(" orr r10, r10, r11, lsl #24 "); |
|
555 |
asm(" stmia r0!, {r3-r10} "); |
|
556 |
asm(" bne copy_fwd_nonaligned_1 "); |
|
557 |
asm(" b copy_fwd_remainder "); |
|
558 |
||
559 |
// source = 2 mod 4 |
|
560 |
asm("copy_fwd_nonaligned_2: "); |
|
561 |
asm(" mov r3, r11, lsr #16 "); |
|
562 |
asm(" ldmia r1!, {r4-r11} "); |
|
563 |
PLD_ioff(1, 32); |
|
564 |
asm(" subs lr, lr, #1 "); |
|
565 |
asm(" orr r3, r3, r4, lsl #16 "); |
|
566 |
asm(" mov r4, r4, lsr #16 "); |
|
567 |
asm(" orr r4, r4, r5, lsl #16 "); |
|
568 |
asm(" mov r5, r5, lsr #16 "); |
|
569 |
asm(" orr r5, r5, r6, lsl #16 "); |
|
570 |
asm(" mov r6, r6, lsr #16 "); |
|
571 |
asm(" orr r6, r6, r7, lsl #16 "); |
|
572 |
asm(" mov r7, r7, lsr #16 "); |
|
573 |
asm(" orr r7, r7, r8, lsl #16 "); |
|
574 |
asm(" mov r8, r8, lsr #16 "); |
|
575 |
asm(" orr r8, r8, r9, lsl #16 "); |
|
576 |
asm(" mov r9, r9, lsr #16 "); |
|
577 |
asm(" orr r9, r9, r10, lsl #16 "); |
|
578 |
asm(" mov r10, r10, lsr #16 "); |
|
579 |
asm(" orr r10, r10, r11, lsl #16 "); |
|
580 |
asm(" stmia r0!, {r3-r10} "); |
|
581 |
asm(" bne copy_fwd_nonaligned_2 "); |
|
582 |
asm(" b copy_fwd_remainder "); |
|
583 |
||
584 |
// source = 3 mod 4 |
|
585 |
asm("copy_fwd_nonaligned_3: "); |
|
586 |
asm(" mov r3, r11, lsr #24 "); |
|
587 |
asm(" ldmia r1!, {r4-r11} "); |
|
588 |
PLD_ioff(1, 32); |
|
589 |
asm(" subs lr, lr, #1 "); |
|
590 |
asm(" orr r3, r3, r4, lsl #8 "); |
|
591 |
asm(" mov r4, r4, lsr #24 "); |
|
592 |
asm(" orr r4, r4, r5, lsl #8 "); |
|
593 |
asm(" mov r5, r5, lsr #24 "); |
|
594 |
asm(" orr r5, r5, r6, lsl #8 "); |
|
595 |
asm(" mov r6, r6, lsr #24 "); |
|
596 |
asm(" orr r6, r6, r7, lsl #8 "); |
|
597 |
asm(" mov r7, r7, lsr #24 "); |
|
598 |
asm(" orr r7, r7, r8, lsl #8 "); |
|
599 |
asm(" mov r8, r8, lsr #24 "); |
|
600 |
asm(" orr r8, r8, r9, lsl #8 "); |
|
601 |
asm(" mov r9, r9, lsr #24 "); |
|
602 |
asm(" orr r9, r9, r10, lsl #8 "); |
|
603 |
asm(" mov r10, r10, lsr #24 "); |
|
604 |
asm(" orr r10, r10, r11, lsl #8 "); |
|
605 |
asm(" stmia r0!, {r3-r10} "); |
|
606 |
asm(" bne copy_fwd_nonaligned_3 "); |
|
607 |
||
608 |
// <32 bytes to go, source alignment could be 1, 2 or 3 mod 4 |
|
609 |
// r12 = 8 * (source mod 4) |
|
610 |
asm("copy_fwd_remainder: "); |
|
611 |
asm(" ands r4, r2, #0x1c "); // r4 = 4*number of words left |
|
612 |
asm(" beq 2f "); // skip if none |
|
613 |
asm(" rsb r3, r12, #32 "); // r3 = 32 - 8*source alignment |
|
614 |
||
615 |
asm("1: "); |
|
616 |
asm(" mov r5, r11, lsr r12 "); // r5 = part of previous source word required to make destination word |
|
617 |
asm(" ldr r11, [r1], #4 "); // get next word |
|
618 |
asm(" subs r4, r4, #4 "); // 4 bytes less to do |
|
619 |
asm(" orr r5, r5, r11, lsl r3 "); // form next destination word |
|
620 |
asm(" str r5, [r0], #4 "); // and store it |
|
621 |
asm(" bne 1b "); // loop until destination 32 byte aligned |
|
622 |
||
623 |
asm("2: "); |
|
624 |
asm(" sub r1, r1, #4 "); |
|
625 |
asm(" add r1, r1, r12, lsr #3 "); // r1 = real unaligned source address |
|
626 |
asm(" tst r2, #2 "); // 2 bytes left? |
|
627 |
asm(" ldrneb r5, [r1], #1 "); // copy 2 |
|
628 |
asm(" strneb r5, [r0], #1 "); |
|
629 |
asm(" ldrneb r5, [r1], #1 "); |
|
630 |
asm(" strneb r5, [r0], #1 "); |
|
631 |
asm(" tst r2, #1 "); // 1 byte left? |
|
632 |
asm(" ldrneb r5, [r1], #1 "); // copy 1 |
|
633 |
asm(" strneb r5, [r0], #1 "); |
|
634 |
__POPRET("r0,r4-r11,"); |
|
635 |
||
636 |
||
637 |
// |
|
638 |
// Source is before destination and they overlap, so need to copy backwards |
|
639 |
// |
|
640 |
||
641 |
asm("copy_back:"); |
|
642 |
asm(" add r0, r0, r2 "); // r0=last dest address+1 |
|
643 |
asm(" add r1, r1, r2 "); // r1=last source address+1 |
|
644 |
PLD_noff(1, 33); // preload last two cache lines |
|
645 |
PLD_noff(1, 1); |
|
646 |
||
647 |
asm(" movs r3, r0, lsl #30 "); // check destination word aligned |
|
648 |
asm(" bne dest_unaligned_back "); |
|
649 |
||
650 |
asm("dest_aligned_back: "); |
|
651 |
asm(" ands r12, r1, #3 "); // r12=alignment of source |
|
652 |
asm(" bne copy_back_nonaligned "); |
|
653 |
||
654 |
// |
|
655 |
// Backwards copying, addresses both word aligned, at least 13 bytes to go |
|
656 |
// |
|
657 |
||
658 |
asm("mem_move_back:"); |
|
659 |
// |
|
660 |
// superalign |
|
661 |
// |
|
662 |
asm(" movs r4, r0, lsl #27 "); // bytes required to align destination to 32 |
|
663 |
asm(" beq bal_already_aligned "); // skip if already aligned to 32 |
|
664 |
asm(" cmp r2, r4, lsr #27 "); // check that many remaining |
|
665 |
asm(" blo its_smaller_back "); // if too short, just stick with word alignment |
|
666 |
asm(" msr cpsr_flg, r4 "); // destination alignment into N, Z, C flags |
|
667 |
// do word moves to align destination |
|
668 |
asm(" ldrcs lr, [r1, #-4]! "); // C flag == 1 word (we are already word aligned) |
|
669 |
asm(" ldmeqdb r1!, {r3,r9} "); // Z flag == 2 words |
|
670 |
asm(" ldmmidb r1!, {r5-r8} "); |
|
671 |
asm(" sub r2, r2, r4, lsr #27 "); // adjust length |
|
672 |
asm(" strcs lr, [r0, #-4]! "); // destination now 8 byte aligned |
|
673 |
asm(" stmeqdb r0!, {r3,r9} "); // destination now 16 byte aligned |
|
674 |
asm(" stmmidb r0!, {r5-r8} "); // N flag == 4 words, destination now 32 byte aligned |
|
675 |
||
676 |
asm("bal_already_aligned: "); |
|
677 |
asm(" cmp r2, #64 "); |
|
678 |
asm(" bhs large_copy_back "); |
|
679 |
// |
|
680 |
// Less than 64 bytes to go |
|
681 |
// |
|
682 |
asm("its_smaller_back: "); |
|
683 |
asm(" movs ip, r2, lsl #26 "); // r2 = remaining length (<256) << 24 |
|
684 |
asm(" beq mem_copy_end2 "); // skip if remaining length zero |
|
685 |
asm(" msr cpsr_flg, ip "); |
|
686 |
asm(" ldmmidb r1!, {r3-r10} "); |
|
687 |
asm(" stmmidb r0!, {r3-r10} "); // copy 32 |
|
688 |
asm(" ldmeqdb r1!, {r3-r6} "); |
|
689 |
asm(" ldmcsdb r1!, {r7,r8} "); |
|
690 |
asm(" ldrvs r9, [r1, #-4]! "); |
|
691 |
asm(" stmeqdb r0!, {r3-r6} "); // copy 16 |
|
692 |
asm(" stmcsdb r0!, {r7,r8} "); // copy 8 |
|
693 |
asm(" strvs r9, [r0, #-4]! "); // copy 4 |
|
694 |
||
695 |
asm(" movs ip, r2, lsl #30 "); |
|
696 |
asm(" bne smallest_copy_back "); |
|
697 |
||
698 |
asm("mem_copy_end2: "); |
|
699 |
__POPRET("r0,r4-r11,"); |
|
700 |
||
701 |
||
702 |
// |
|
703 |
// Less than 4 bytes to go... |
|
704 |
// |
|
705 |
||
706 |
asm("smallest_copy_back: "); |
|
707 |
asm(" msr cpsr_flg, ip "); |
|
708 |
asm(" ldrmih r3, [r1, #-2]! "); |
|
709 |
asm(" ldreqb r4, [r1, #-1]! "); |
|
710 |
asm(" strmih r3, [r0, #-2]! "); // copy 2 |
|
711 |
asm(" streqb r4, [r0, #-1]! "); // copy 1 |
|
712 |
__POPRET("r0,r4-r11,"); |
|
713 |
||
714 |
||
715 |
// |
|
716 |
// Do byte moves if necessary to word-align destination |
|
717 |
// |
|
718 |
asm("dest_unaligned_back: "); |
|
719 |
asm(" msr cpsr_flg, r3 "); // destination alignment in r3 into N,Z flags |
|
720 |
asm(" ldrmib r4, [r1, #-1]! "); // do byte moves to align destination |
|
721 |
asm(" ldrmib r5, [r1, #-1]! "); |
|
722 |
asm(" ldreqb r6, [r1, #-1]! "); |
|
723 |
asm(" sub r2, r2, r3, lsr #30 "); // adjust length, at least 13 bytes remaining |
|
724 |
asm(" strmib r4, [r0, #-1]! "); |
|
725 |
asm(" strmib r5, [r0, #-1]! "); |
|
726 |
asm(" streqb r6, [r0, #-1]! "); |
|
727 |
asm(" b dest_aligned_back "); |
|
728 |
||
729 |
||
730 |
// |
|
731 |
// Large backwards copy, length >= 64 |
|
732 |
// |
|
733 |
||
734 |
asm("large_copy_back: "); |
|
735 |
asm(" movs ip, r2, lsr #6 "); |
|
736 |
asm("1: "); |
|
737 |
PLD_noff(1, 65); |
|
738 |
PLD_noff(1, 33); |
|
739 |
asm(" ldmdb r1!, {r3-r10} "); // Copy 64 |
|
740 |
asm(" stmdb r0!, {r3-r10} "); |
|
741 |
asm(" ldmdb r1!, {r3-r10} "); |
|
742 |
asm(" subs ip, ip, #1 "); |
|
743 |
asm(" stmdb r0!, {r3-r10} "); |
|
744 |
asm(" bne 1b "); |
|
745 |
asm(" and r2, r2, #63 "); |
|
746 |
asm(" b its_smaller_back "); |
|
747 |
||
748 |
// |
|
749 |
// Backwards unlaigned copy |
|
750 |
// |
|
751 |
||
752 |
asm("copy_back_nonaligned: "); |
|
753 |
// |
|
754 |
// superalign |
|
755 |
// |
|
756 |
asm(" bic r1, r1, #3 "); // align source |
|
757 |
asm(" ldr r3, [r1] "); // get first word |
|
758 |
asm(" mov r12, r12, lsl #3 "); // r12 = 8*source alignment |
|
759 |
asm(" ands r4, r0, #31 "); // r4 = bytes to align dest to 32 |
|
760 |
asm(" beq bunal_already_aligned "); // skip if already aligned |
|
761 |
asm(" cmp r2, r4 "); // check if length big enough to align to 32 |
|
762 |
asm(" blo copy_back_remainder "); // skip if too small |
|
763 |
asm(" sub r2, r2, r4 "); // adjust length |
|
764 |
asm(" rsb r6, r12, #32 "); // r6 = 32 - 8*source alignment |
|
765 |
||
766 |
asm("1: "); |
|
767 |
asm(" mov r5, r3, lsl r6 "); // r5 = part of previous source word required to make destination word |
|
768 |
asm(" ldr r3, [r1, #-4]! "); // get next word |
|
769 |
asm(" subs r4, r4, #4 "); // 4 bytes less to do |
|
770 |
asm(" orr r5, r5, r3, lsr r12 "); // form next destination word |
|
771 |
asm(" str r5, [r0, #-4]! "); // and store it |
|
772 |
asm(" bne 1b "); // loop until destination 32 byte aligned |
|
773 |
||
774 |
asm("bunal_already_aligned: "); // destination now aligned to 32 bytes |
|
775 |
asm(" movs lr, r2, lsr #5 "); // lr=number of 32-byte blocks |
|
776 |
asm(" beq copy_back_remainder "); // skip if length < 32 |
|
777 |
||
778 |
asm(" cmp r12, #16 "); |
|
779 |
asm(" beq copy_back_nonaligned_2 "); // branch if source = 2 mod 4 |
|
780 |
asm(" bhi copy_back_nonaligned_3 "); // branch if source = 3 mod 4, else source = 1 mod 4 |
|
781 |
||
782 |
// source = 1 mod 4 |
|
783 |
asm("copy_back_nonaligned_1: "); |
|
784 |
asm(" mov r11, r3, lsl #24 "); |
|
785 |
asm(" ldmdb r1!, {r3-r10} "); |
|
786 |
PLD_noff(1, 64); |
|
787 |
asm(" orr r11, r11, r10, lsr #8 "); |
|
788 |
asm(" mov r10, r10, lsl #24 "); |
|
789 |
asm(" orr r10, r10, r9, lsr #8 "); |
|
790 |
asm(" mov r9, r9, lsl #24 "); |
|
791 |
asm(" orr r9, r9, r8, lsr #8 "); |
|
792 |
asm(" mov r8, r8, lsl #24 "); |
|
793 |
asm(" orr r8, r8, r7, lsr #8 "); |
|
794 |
asm(" mov r7, r7, lsl #24 "); |
|
795 |
asm(" orr r7, r7, r6, lsr #8 "); |
|
796 |
asm(" mov r6, r6, lsl #24 "); |
|
797 |
asm(" orr r6, r6, r5, lsr #8 "); |
|
798 |
asm(" mov r5, r5, lsl #24 "); |
|
799 |
asm(" orr r5, r5, r4, lsr #8 "); |
|
800 |
asm(" mov r4, r4, lsl #24 "); |
|
801 |
asm(" orr r4, r4, r3, lsr #8 "); |
|
802 |
asm(" stmdb r0!, {r4-r11} "); |
|
803 |
asm(" subs lr, lr, #1 "); |
|
804 |
asm(" bne copy_back_nonaligned_1 "); |
|
805 |
asm(" b copy_back_remainder "); |
|
806 |
||
807 |
// source = 2 mod 4 |
|
808 |
asm("copy_back_nonaligned_2: "); |
|
809 |
asm(" mov r11, r3, lsl #16 "); |
|
810 |
asm(" ldmdb r1!, {r3-r10} "); |
|
811 |
PLD_noff(1, 64); |
|
812 |
asm(" orr r11, r11, r10, lsr #16 "); |
|
813 |
asm(" mov r10, r10, lsl #16 "); |
|
814 |
asm(" orr r10, r10, r9, lsr #16 "); |
|
815 |
asm(" mov r9, r9, lsl #16 "); |
|
816 |
asm(" orr r9, r9, r8, lsr #16 "); |
|
817 |
asm(" mov r8, r8, lsl #16 "); |
|
818 |
asm(" orr r8, r8, r7, lsr #16 "); |
|
819 |
asm(" mov r7, r7, lsl #16 "); |
|
820 |
asm(" orr r7, r7, r6, lsr #16 "); |
|
821 |
asm(" mov r6, r6, lsl #16 "); |
|
822 |
asm(" orr r6, r6, r5, lsr #16 "); |
|
823 |
asm(" mov r5, r5, lsl #16 "); |
|
824 |
asm(" orr r5, r5, r4, lsr #16 "); |
|
825 |
asm(" mov r4, r4, lsl #16 "); |
|
826 |
asm(" orr r4, r4, r3, lsr #16 "); |
|
827 |
asm(" stmdb r0!, {r4-r11} "); |
|
828 |
asm(" subs lr, lr, #1 "); |
|
829 |
asm(" bne copy_back_nonaligned_2 "); |
|
830 |
asm(" b copy_back_remainder "); |
|
831 |
||
832 |
// source = 3 mod 4 |
|
833 |
asm("copy_back_nonaligned_3: "); |
|
834 |
asm(" mov r11, r3, lsl #8 "); |
|
835 |
asm(" ldmdb r1!, {r3-r10} "); |
|
836 |
PLD_noff(1, 64); |
|
837 |
asm(" orr r11, r11, r10, lsr #24 "); |
|
838 |
asm(" mov r10, r10, lsl #8 "); |
|
839 |
asm(" orr r10, r10, r9, lsr #24 "); |
|
840 |
asm(" mov r9, r9, lsl #8 "); |
|
841 |
asm(" orr r9, r9, r8, lsr #24 "); |
|
842 |
asm(" mov r8, r8, lsl #8 "); |
|
843 |
asm(" orr r8, r8, r7, lsr #24 "); |
|
844 |
asm(" mov r7, r7, lsl #8 "); |
|
845 |
asm(" orr r7, r7, r6, lsr #24 "); |
|
846 |
asm(" mov r6, r6, lsl #8 "); |
|
847 |
asm(" orr r6, r6, r5, lsr #24 "); |
|
848 |
asm(" mov r5, r5, lsl #8 "); |
|
849 |
asm(" orr r5, r5, r4, lsr #24 "); |
|
850 |
asm(" mov r4, r4, lsl #8 "); |
|
851 |
asm(" orr r4, r4, r3, lsr #24 "); |
|
852 |
asm(" stmdb r0!, {r4-r11} "); |
|
853 |
asm(" subs lr, lr, #1 "); |
|
854 |
asm(" bne copy_back_nonaligned_3 "); |
|
855 |
||
856 |
// <32 bytes to go, source alignment could be 1, 2 or 3 mod 4 |
|
857 |
// r12 = 8 * (source mod 4) |
|
858 |
asm("copy_back_remainder: "); |
|
859 |
asm(" ands r4, r2, #0x1c "); // r4 = 4*number of words left |
|
860 |
asm(" beq 2f "); // skip if none |
|
861 |
asm(" rsb r6, r12, #32 "); // r6 = 32 - 8*source alignment |
|
862 |
||
863 |
asm("1: "); |
|
864 |
asm(" mov r5, r3, lsl r6 "); // r5 = part of previous source word required to make destination word |
|
865 |
asm(" ldr r3, [r1, #-4]! "); // get next word |
|
866 |
asm(" subs r4, r4, #4 "); // 4 bytes less to do |
|
867 |
asm(" orr r5, r5, r3, lsr r12 "); // form next destination word |
|
868 |
asm(" str r5, [r0, #-4]! "); // and store it |
|
869 |
asm(" bne 1b "); // loop until destination 32 byte aligned |
|
870 |
||
871 |
asm("2: "); |
|
872 |
asm(" add r1, r1, r12, lsr #3 "); // r1 = real unaligned source address |
|
873 |
asm(" tst r2, #2 "); // 2 bytes left? |
|
874 |
asm(" ldrneb r3, [r1, #-1]! "); // copy 2 |
|
875 |
asm(" strneb r3, [r0, #-1]! "); |
|
876 |
asm(" ldrneb r3, [r1, #-1]! "); |
|
877 |
asm(" strneb r3, [r0, #-1]! "); |
|
878 |
asm(" tst r2, #1 "); // 1 byte left? |
|
879 |
asm(" ldrneb r3, [r1, #-1]! "); // copy 1 |
|
880 |
asm(" strneb r3, [r0, #-1]! "); |
|
881 |
__POPRET("r0,r4-r11,"); |
|
882 |
} |
|
883 |
||
884 |
#endif // USE_REPLACEMENT_MEMCPY |
|
885 |
||
886 |
||
887 |
#ifndef __KERNEL_MODE__ |
|
888 |
#ifdef __GCC32__ |
|
889 |
/** |
|
890 |
Compares a block of data at one specified location with a block of data at |
|
891 |
another specified location. |
|
892 |
||
893 |
The comparison proceeds on a byte for byte basis, the result of the comparison |
|
894 |
is based on the difference of the first bytes to disagree. |
|
895 |
||
896 |
The data at the two locations are equal if they have the same length and content. |
|
897 |
Where the lengths are different and the shorter section of data is the same |
|
898 |
as the first part of the longer section of data, the shorter is considered |
|
899 |
to be less than the longer. |
|
900 |
||
901 |
@param aLeft A pointer to the first (or left) block of 8 bit data |
|
902 |
to be compared. |
|
903 |
@param aLeftL The length of the first (or left) block of data to be compared, |
|
904 |
i.e. the number of bytes. |
|
905 |
@param aRight A pointer to the second (or right) block of 8 bit data to be |
|
906 |
compared. |
|
907 |
@param aRightL The length of the second (or right) block of data to be compared |
|
908 |
i.e. the number of bytes. |
|
909 |
||
910 |
@return Positive, if the first (or left) block of data is greater than the |
|
911 |
second (or right) block of data. |
|
912 |
Negative, if the first (or left) block of data is less than the |
|
913 |
second (or right) block of data. |
|
914 |
Zero, if both the first (or left) and second (or right) blocks of data |
|
915 |
have the same length and the same content. |
|
916 |
*/ |
|
917 |
EXPORT_C __NAKED__ TInt Mem::Compare(const TUint8* /*aLeft*/, TInt /*aLeftL*/, const TUint8* /*aRight*/, TInt /*aRightL*/) |
|
918 |
{ |
|
919 |
// fall through |
|
920 |
} |
|
921 |
#endif |
|
922 |
#endif |
|
923 |
||
924 |
||
925 |
||
926 |
// See header file e32cmn.h for the in-source documentation. |
|
927 |
extern "C" EXPORT_C __NAKED__ TInt memcompare(const TUint8* /*aLeft*/, TInt /*aLeftL*/, const TUint8* /*aRight*/, TInt /*aRightL*/) |
|
928 |
// |
|
929 |
// Compares until the smaller of the two lengths is reached. |
|
930 |
// If the lengths differ, returns leftlen-rightlen |
|
931 |
// If a difference is encountered, returns left byte-right byte |
|
932 |
// |
|
933 |
{ |
|
934 |
||
935 |
asm(" stmfd sp!,{r4,r5,r6,lr}"); |
|
936 |
asm(" mov r4,r0"); |
|
937 |
// |
|
938 |
// Get the shorter of the two lengths, and check for zero length |
|
939 |
// |
|
940 |
asm(" cmp r1,r3"); |
|
941 |
asm(" mov r6,r1"); |
|
942 |
asm(" movge r6,r3"); |
|
943 |
asm(" cmp r6,#0"); |
|
944 |
asm(" beq compare_done"); |
|
945 |
asm(" cmp r6,#16"); |
|
946 |
// |
|
947 |
// Check for aligned buffers for faster comparing if more than 16 bytes |
|
948 |
// |
|
949 |
asm(" andge r0,r4,#3"); |
|
950 |
asm(" andge r5,r2,#3"); |
|
951 |
asm(" addlt r0,r5,#1"); |
|
952 |
asm(" cmp r0,r5"); |
|
953 |
asm(" beq aligned_compare"); |
|
954 |
// |
|
955 |
// Get aLeft+Min(aLeftL,aRightL) |
|
956 |
// |
|
957 |
asm(" add r6,r4,r6"); |
|
958 |
||
959 |
asm("compare_loop:"); |
|
960 |
asm(" ldrb r0,[r4],#1"); |
|
961 |
asm(" ldrb r5,[r2],#1"); |
|
962 |
asm(" subs r0,r0,r5"); |
|
963 |
asm("bne compare_exit "); |
|
964 |
asm(" cmp r4,r6"); |
|
965 |
asm(" beq compare_done"); |
|
966 |
||
967 |
asm(" ldrb r0,[r4],#1"); |
|
968 |
asm(" ldrb r5,[r2],#1"); |
|
969 |
asm(" subs r0,r0,r5"); |
|
970 |
asm("bne compare_exit "); |
|
971 |
asm(" cmp r4,r6"); |
|
972 |
asm(" beq compare_done"); |
|
973 |
||
974 |
asm(" ldrb r0,[r4],#1"); |
|
975 |
asm(" ldrb r5,[r2],#1"); |
|
976 |
asm(" subs r0,r0,r5"); |
|
977 |
asm("bne compare_exit "); |
|
978 |
asm(" cmp r4,r6"); |
|
979 |
asm(" beq compare_done"); |
|
980 |
||
981 |
asm(" ldrb r0,[r4],#1"); |
|
982 |
asm(" ldrb r5,[r2],#1"); |
|
983 |
asm(" subs r0,r0,r5"); |
|
984 |
asm("bne compare_exit "); |
|
985 |
asm(" cmp r4,r6"); |
|
986 |
asm(" bne compare_loop"); |
|
987 |
// |
|
988 |
// Return difference of lengths |
|
989 |
// |
|
990 |
asm("compare_done:"); |
|
991 |
asm(" sub r0,r1,r3"); |
|
992 |
||
993 |
asm("compare_exit:"); |
|
994 |
__POPRET("r4-r6,"); |
|
995 |
// |
|
996 |
// Compare byte at a time until word aligned... |
|
997 |
// |
|
998 |
asm("aligned_compare:"); |
|
999 |
// |
|
1000 |
// Get number of bytes to compare before word alignment reached...and jump to appropriate point |
|
1001 |
// |
|
1002 |
asm(" mov ip,r6"); |
|
1003 |
asm(" add r6,r4,r6"); |
|
1004 |
asm(" subs r0,r0,#1"); |
|
1005 |
asm(" movmi r0,#3"); |
|
1006 |
asm(" rsb r5,r0,#3"); |
|
1007 |
asm(" sub ip,ip,r5"); |
|
1008 |
asm(" mov ip,ip,lsr #2"); |
|
1009 |
asm(" add pc,pc,r0,asl #4"); |
|
1010 |
asm(" b compare_done"); // Never executed |
|
1011 |
// |
|
1012 |
// Jump here if alignment is 1. Do not use more than 4 instructions without altering above relative jump |
|
1013 |
// |
|
1014 |
asm(" ldrb r0,[r4],#1"); |
|
1015 |
asm(" ldrb r5,[r2],#1"); |
|
1016 |
asm(" subs r0,r0,r5"); |
|
1017 |
asm("bne compare_exit "); |
|
1018 |
// |
|
1019 |
// Jump here if alignment is 2. Do not use more than 4 instructions without altering above relative jump |
|
1020 |
// |
|
1021 |
asm(" ldrb r0,[r4],#1"); |
|
1022 |
asm(" ldrb r5,[r2],#1"); |
|
1023 |
asm(" subs r0,r0,r5"); |
|
1024 |
asm("bne compare_exit "); |
|
1025 |
// |
|
1026 |
// Jump here if alignment is 3. Do not use more than 4 instructions without altering above relative jump |
|
1027 |
// |
|
1028 |
asm(" ldrb r0,[r4],#1"); |
|
1029 |
asm(" ldrb r5,[r2],#1"); |
|
1030 |
asm(" subs r0,r0,r5"); |
|
1031 |
asm("bne compare_exit "); |
|
1032 |
// |
|
1033 |
// Must now be word aligned |
|
1034 |
// |
|
1035 |
asm("aligned_compare_loop:"); |
|
1036 |
asm(" ldr r0,[r4],#4"); |
|
1037 |
asm(" ldr r5,[r2],#4"); |
|
1038 |
asm(" eors r0,r0,r5"); |
|
1039 |
asm(" bne word_different"); |
|
1040 |
asm(" subs ip,ip,#1"); |
|
1041 |
asm(" bne aligned_compare_loop"); |
|
1042 |
// |
|
1043 |
// Less than 4 bytes to go... |
|
1044 |
// |
|
1045 |
asm(" cmp r4,r6"); |
|
1046 |
asm(" bne compare_loop"); |
|
1047 |
asm(" sub r0,r1,r3"); |
|
1048 |
__POPRET("r4-r6,"); |
|
1049 |
// |
|
1050 |
// A difference encountered while word comparing, find out which byte it was |
|
1051 |
// |
|
1052 |
asm("word_different:"); |
|
1053 |
asm(" ldrb r0,[r4,#-4]"); |
|
1054 |
asm(" ldrb r5,[r2,#-4]"); |
|
1055 |
asm(" subs r0,r0,r5"); |
|
1056 |
asm("bne compare_exit "); |
|
1057 |
asm(" ldrb r0,[r4,#-3]"); |
|
1058 |
asm(" ldrb r5,[r2,#-3]"); |
|
1059 |
asm(" subs r0,r0,r5"); |
|
1060 |
asm("bne compare_exit "); |
|
1061 |
asm(" ldrb r0,[r4,#-2]"); |
|
1062 |
asm(" ldrb r5,[r2,#-2]"); |
|
1063 |
asm(" subs r0,r0,r5"); |
|
1064 |
asm("bne compare_exit "); |
|
1065 |
// |
|
1066 |
// This must be the different byte... |
|
1067 |
// |
|
1068 |
asm(" ldrb r0,[r4,#-1]"); |
|
1069 |
asm(" ldrb r5,[r2,#-1]"); |
|
1070 |
asm(" sub r0,r0,r5"); |
|
1071 |
__POPRET("r4-r6,"); |
|
1072 |
} |
|
1073 |
#endif |
|
1074 |