|
1 // Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of the License "Eclipse Public License v1.0" |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: |
|
14 // e32\common\arm\carray.cia |
|
15 // Machine coded arrays for ARM |
|
16 // |
|
17 // |
|
18 |
|
19 #include <e32cia.h> |
|
20 #include "../common.h" |
|
21 |
|
22 #ifdef __ARRAY_MACHINE_CODED__ |
|
23 extern "C" void PanicBadArrayFindMode(); |
|
24 |
|
25 EXPORT_C __NAKED__ TAny*& RPointerArrayBase::At(TInt /*anIndex*/) const |
|
26 { |
|
27 asm("ldmia r0, {r2,r3} "); // r2=iCount, r3=iEntries |
|
28 asm("cmp r1, #0 "); // check anIndex>=0 |
|
29 asm("cmpge r2, r1 "); // if so, check iCount>anIndex |
|
30 asm("addgt r0, r3, r1, lsl #2 "); // address of entry = iEntries+4*anIndex |
|
31 #ifdef __CPU_ARMV6 |
|
32 asm("ble 1f "); // avoid conditional return on ARMv6 |
|
33 __JUMP(,lr); |
|
34 #else |
|
35 __JUMP(gt,lr); |
|
36 #endif |
|
37 asm("1: "); |
|
38 asm("b " CSM_Z18PanicBadArrayIndexv); |
|
39 } |
|
40 |
|
41 EXPORT_C __NAKED__ TInt RPointerArrayBase::Append(const TAny* /*anEntry*/) |
|
42 { |
|
43 asm("ldmia r0, {r2,r3,r12} "); // r2=iCount, r3=iEntries, r12=iAllocated |
|
44 asm("cmp r2, r12 "); |
|
45 asm("beq ptr_append_1 "); |
|
46 asm("ptr_append_0: "); |
|
47 asm("str r1, [r3, r2, lsl #2] "); |
|
48 asm("add r2, r2, #1 "); |
|
49 asm("str r2, [r0] "); |
|
50 asm("mov r0, #0 "); |
|
51 __JUMP(,lr); |
|
52 asm("ptr_append_1: "); |
|
53 asm("stmfd sp!, {r0,r1,r2,lr} "); |
|
54 asm("bl " CSM_ZN17RPointerArrayBase4GrowEv); |
|
55 asm("cmp r0, #0 "); |
|
56 asm("bne ptr_append_2 "); |
|
57 asm("ldmfd sp!, {r0,r1,r2,lr} "); |
|
58 asm("ldmia r0, {r2, r3} "); |
|
59 asm("b ptr_append_0 "); |
|
60 asm("ptr_append_2: "); // error enlarging array |
|
61 asm("add sp, sp, #12 "); |
|
62 __POPRET(""); |
|
63 } |
|
64 |
|
65 EXPORT_C __NAKED__ TInt RPointerArrayBase::Find(const TAny* /*anEntry*/) const |
|
66 { |
|
67 asm("ldmia r0, {r2,r3} "); // r2=iCount, r3=iEntries |
|
68 asm("mov r0, #0 "); // r0=0 (will be index+1) |
|
69 asm("subs r2, r2, #1 "); // r2=iCount-1 |
|
70 asm("blt 0f "); |
|
71 asm("1: "); |
|
72 asm("ldr r12, [r3], #4 "); // r12=iEntries[r0] |
|
73 asm("add r0, r0, #1 "); // r0 = index+1 |
|
74 asm("cmp r2, r0 "); // C=1 iff iCount-1>=index+1 iff index<=iCount-2 iff this isn't last entry |
|
75 asm("teq r12, r1 "); // check for equality, doesn't affect C |
|
76 asm("bhi 1b "); // loop if C=1 & Z=0, i.e. if no match and this isn't last entry |
|
77 asm("0: "); |
|
78 asm("movne r0, #0 "); // if no match set r0=0 |
|
79 asm("sub r0, r0, #1 "); // r0 was index+1, return index |
|
80 __JUMP(,lr); |
|
81 } |
|
82 |
|
83 EXPORT_C __NAKED__ TInt RPointerArrayBase::Find(const TAny* /*anEntry*/, TGeneralIdentityRelation /*anIdentity*/) const |
|
84 { |
|
85 asm("stmfd sp!, {r4-r8,lr} "); |
|
86 __EH_FRAME_PUSH2(r4-r8,lr) |
|
87 asm("ldmia r0, {r4,r5} "); // r4=iCount, r5=iEntries |
|
88 asm("mvn r6, #0 "); |
|
89 asm("mov r7, r1 "); |
|
90 asm("mov r8, r2 "); |
|
91 asm("subs r4, r4, #1 "); // r4=iCount-1 |
|
92 asm("bmi ptr_find2_return "); // if count=0, return -1 |
|
93 asm("ptr_find2_loop: "); |
|
94 asm("ldr r1, [r5], #4 "); // r1=pointer to entry r6 |
|
95 asm("add r6, r6, #1 "); |
|
96 asm("mov r0, r7 "); // r0=anEntry |
|
97 __JUMPL(8); |
|
98 asm("cmp r0, #0 "); |
|
99 asm("bne ptr_find2_return "); // if equal, return r6 |
|
100 asm("cmp r6, r4 "); |
|
101 asm("blt ptr_find2_loop "); |
|
102 asm("mvn r6, #0 "); |
|
103 asm("ptr_find2_return: "); // return r6 |
|
104 asm("mov r0, r6 "); |
|
105 __POPRET("r4-r8,"); |
|
106 } |
|
107 |
|
108 EXPORT_C __NAKED__ TInt RPointerArrayBase::BinarySearchSigned(TInt /*anEntry*/, TInt& /*anIndex*/) const |
|
109 { |
|
110 asm("mov r3, #0 "); |
|
111 // fall through |
|
112 } |
|
113 |
|
114 EXPORT_C __NAKED__ TInt RPointerArrayBase::BinarySearchSigned(TInt /*anEntry*/, TInt& /*anIndex*/, TInt /*aMode*/) const |
|
115 { |
|
116 asm("stmfd sp!, {r4-r6,lr} "); |
|
117 __EH_FRAME_PUSH2(r4-r6,lr) |
|
118 asm("mov r6, r2 "); // r6=&anIndex |
|
119 asm("ldmia r0, {r2,r4} "); // r2=count, r4=iEntries |
|
120 asm("bl BinarySearchSigned "); |
|
121 asm("str r2, [r6] "); // store index |
|
122 __POPRET("r4-r6,"); |
|
123 |
|
124 // Binary search list of signed integers |
|
125 // Match value in r1 |
|
126 // List address in r4 |
|
127 // Count in r2 |
|
128 // Match mode in r3 |
|
129 // Return with: r0=0 if match found, r0=-1 otherwise |
|
130 // Z flag set if match found, clear if not |
|
131 // r2=index of match or next higher |
|
132 // r5 modified |
|
133 asm("BinarySearchSigned: "); |
|
134 #ifdef _DEBUG |
|
135 asm("cmp r3, #%a0" : : "i" ((TInt)EArrayFindMode_Limit) ); |
|
136 asm("bhs PanicBadArrayFindMode "); |
|
137 #endif |
|
138 asm("mov r3, r3, lsl #30 "); // match mode -> bits 30,31 (00000000=any, 40000000=first, 80000000=last) |
|
139 asm("orr r3, r3, #1 "); // set NOT FOUND flag |
|
140 asm("cmp r2, #0 "); // r2 will be right index |
|
141 asm("beq 0f "); |
|
142 asm("mov r5, #0 "); // r5 = left index |
|
143 asm("1: "); |
|
144 asm("add r12, r2, r5 "); |
|
145 asm("mov r12, r12, lsr #1 "); // r12 = mid index |
|
146 asm("ldr r0, [r4, r12, lsl #2] "); // r0 = entry[mid] |
|
147 asm("subs r0, r0, r1 "); // r0 = entry[mid] - match |
|
148 asm("beq 2f "); // if match branch out |
|
149 asm("3: "); |
|
150 asm("addlt r5, r12, #1 "); // else if entry<match left=mid+1 |
|
151 asm("movgt r2, r12 "); // else if entry>match right=mid |
|
152 asm("subs r0, r2, r5 "); // right > left ? |
|
153 asm("bgt 1b "); // r0 = 0 when loop terminates |
|
154 asm("0: "); |
|
155 asm("tst r3, #1 "); // test not found flag |
|
156 asm("mvnnes r0, #0 "); // if set r0=-1 = KErrNotFound |
|
157 __JUMP(,lr); |
|
158 asm("2: "); |
|
159 asm("bics r3, r3, #1 "); // clear NOT FOUND flag, test for find mode ANY (Z set if so) |
|
160 asm("bne 3b "); // if not, V=0 (left from subs), N=1 for last, 0 for first, Z=0 => LAST->LT FIRST->GT |
|
161 asm("mov r2, r12 "); // if so, r2 = mid |
|
162 __JUMP(,lr); // and return with r0 = 0 |
|
163 } |
|
164 |
|
165 EXPORT_C __NAKED__ TInt RPointerArrayBase::BinarySearchUnsigned(TUint /*anEntry*/, TInt& /*anIndex*/) const |
|
166 { |
|
167 asm("mov r3, #0 "); |
|
168 // fall through |
|
169 } |
|
170 |
|
171 EXPORT_C __NAKED__ TInt RPointerArrayBase::BinarySearchUnsigned(TUint /*anEntry*/, TInt& /*anIndex*/, TInt /*aMode*/) const |
|
172 { |
|
173 asm("stmfd sp!, {r4-r6,lr} "); |
|
174 __EH_FRAME_PUSH2(r4-r6,lr) |
|
175 asm("mov r6, r2 "); // r6=&anIndex |
|
176 asm("ldmia r0, {r2,r4} "); // r2=count, r4=iEntries |
|
177 asm("bl BinarySearchUnsigned "); |
|
178 asm("str r2, [r6] "); // store index |
|
179 __POPRET("r4-r6,"); |
|
180 |
|
181 // Binary search list of unsigned integers |
|
182 // Match value in r1 |
|
183 // List address in r4 |
|
184 // Count in r2 |
|
185 // Match mode in r3 |
|
186 // Return with: r0=0 if match found, r0=-1 otherwise |
|
187 // Z flag set if match found, clear if not |
|
188 // r2=index of match or next higher |
|
189 // r5 modified |
|
190 asm("BinarySearchUnsigned: "); |
|
191 #ifdef _DEBUG |
|
192 asm("cmp r3, #%a0" : : "i" ((TInt)EArrayFindMode_Limit) ); |
|
193 asm("bhs PanicBadArrayFindMode "); |
|
194 #endif |
|
195 asm("mov r3, r3, lsl #30 "); // match mode -> bits 30,31 (00000000=any, 40000000=first, 80000000=last) |
|
196 asm("orr r3, r3, #1 "); // set NOT FOUND flag |
|
197 asm("cmp r2, #0 "); // r2 will be right index |
|
198 asm("beq 0f "); |
|
199 asm("mov r5, #0 "); // r5 = left index |
|
200 asm("1: "); |
|
201 asm("add r12, r2, r5 "); |
|
202 asm("mov r12, r12, lsr #1 "); // r12 = mid index |
|
203 asm("ldr r0, [r4, r12, lsl #2] "); // r0 = entry[mid] |
|
204 asm("subs r0, r1, r0 "); // r0 = match - entry[mid] |
|
205 asm("beq 2f "); // if match branch out |
|
206 asm("3: "); |
|
207 asm("addhi r5, r12, #1 "); // else if entry<match left=mid+1 HI = C &~ Z |
|
208 asm("movlo r2, r12 "); // else if entry>match right=mid LO = ~C |
|
209 asm("subs r0, r2, r5 "); // right > left ? |
|
210 asm("bgt 1b "); // r0 = 0 when loop terminates |
|
211 asm("0: "); |
|
212 asm("tst r3, #1 "); // test not found flag |
|
213 asm("mvnnes r0, #0 "); // if set r0=-1 = KErrNotFound |
|
214 __JUMP(,lr); |
|
215 asm("2: "); // N=0 Z=1 C=1 V=0 r0=0 here |
|
216 asm("bics r3, r3, #1 "); // clear NOT FOUND flag, test for find mode ANY (Z set if so) |
|
217 asm("cmpne r3, #0x60000000 "); // HI if LAST, LO if FIRST |
|
218 asm("bne 3b "); // if not ANY, branch back |
|
219 asm("mov r2, r12 "); // if ANY, r2 = mid |
|
220 __JUMP(,lr); // and return with r0 = 0 |
|
221 } |
|
222 |
|
223 EXPORT_C __NAKED__ TInt RPointerArrayBase::BinarySearch(const TAny* /*anEntry*/, TInt& /*anIndex*/, TGeneralLinearOrder /*anOrder*/, TInt /*aMode*/) const |
|
224 { |
|
225 asm("stmfd sp!, {r2,r4-r11,lr} "); // store &anIndex, r4-r11, lr |
|
226 __EH_FRAME_ADDRESS(sp,4) |
|
227 __EH_FRAME_PUSH2(r4-r11,lr) |
|
228 asm("ldmia r0, {r5,r6} "); // r5=count, r6=iEntries |
|
229 asm("ldr r11, [sp, #40] "); // r11 = aMode |
|
230 asm("mov r7, r3 "); // r7=anOrder |
|
231 asm("mov r4, r1 "); // r1=anEntry |
|
232 asm("bl BinarySearchPointers "); // r0=KErrNone if match, KErrNotFound if not |
|
233 asm("ldmfd sp!, {r2,r4} "); // r2=&anIndex, restore r4 |
|
234 // Dont need to FRAME RESTORE here since there's no barrier here |
|
235 asm("str r5, [r2] "); // store index |
|
236 __POPRET("r5-r11,"); // restore r5-r11 and return |
|
237 |
|
238 // Binary search list of pointers |
|
239 // Pointer to match value in r4 |
|
240 // List address in r6 |
|
241 // Count in r5 |
|
242 // Pointer to ordering function in r7 |
|
243 // r11 = find mode |
|
244 // Return with: r0=0 if match found, r0=-1 otherwise |
|
245 // Z flag set if match found, clear otherwise |
|
246 // r5=index of match or next higher |
|
247 // r9,r10,r11 modified |
|
248 asm("BinarySearchPointers: "); |
|
249 #ifdef _DEBUG |
|
250 asm("cmp r11, #%a0" : : "i" ((TInt)EArrayFindMode_Limit) ); |
|
251 asm("bhs PanicBadArrayFindMode "); |
|
252 #endif |
|
253 asm("movs r11, r11, lsl #30 "); // match mode -> bits 30,31 (00000000=any, 40000000=first, 80000000=last) |
|
254 asm("eorne r11, r11, #0xC0000000 "); // match mode -> bits 30,31 (00000000=any, 80000000=first, 40000000=last) |
|
255 asm("orr r11, r11, #1 "); // set NOT FOUND flag |
|
256 asm("mov r9, lr "); |
|
257 asm("cmp r5, #0 "); // r5 will be right index |
|
258 asm("beq 0f "); |
|
259 asm("mov r10, #0 "); // r10 = left index |
|
260 asm("1: "); |
|
261 asm("add r8, r5, r10 "); |
|
262 asm("mov r8, r8, lsr #1 "); // r8 = mid index |
|
263 |
|
264 /** the latency of the indirect call should mask the latency of the ldr |
|
265 arm1136 requires base register to be valid one cycle early |
|
266 */ |
|
267 asm("mov r0, r4 "); // r0 points to match value |
|
268 asm("ldr r1, [r6, r8, lsl #2] "); // r1 points to entry[mid] |
|
269 __JUMPL(7); // call ordering function (match, entry) |
|
270 asm("cmp r0, #0 "); |
|
271 asm("biceq r11, r11, #1 "); // if match clear NOT FOUND flag |
|
272 asm("addeqs r0, r0, r11 "); // and add match mode to r0 (>0 if LAST, <0 if FIRST, 0 if ANY) |
|
273 asm("beq 2f "); // branch out if match and ANY |
|
274 asm("addgt r10, r8, #1 "); // else if match > entry, left = mid + 1 |
|
275 asm("movlt r5, r8 "); // else if match < entry, right = mid |
|
276 asm("subs r0, r5, r10 "); // loop if right > left |
|
277 asm("bgt 1b "); // finish loop with r0 = 0 |
|
278 asm("0: "); |
|
279 asm("tst r11, #1 "); // test not found flag |
|
280 asm("mvnnes r0, #0 "); // if set r0=-1 = KErrNotFound |
|
281 __JUMP(,r9); |
|
282 asm("2: "); |
|
283 asm("mov r5, r8 "); // if ANY, r8 = mid |
|
284 __JUMP(,r9); // and return with r0 = 0, Z=1 |
|
285 } |
|
286 |
|
287 EXPORT_C __NAKED__ TInt RPointerArrayBase::FindIsqSigned(TInt /*anEntry*/) const |
|
288 { |
|
289 asm("mov r2, #0 "); |
|
290 // fall through |
|
291 } |
|
292 |
|
293 EXPORT_C __NAKED__ TInt RPointerArrayBase::FindIsqSigned(TInt /*anEntry*/, TInt /*aMode*/) const |
|
294 { |
|
295 #ifdef __EABI__ |
|
296 // sp needs correct alignment |
|
297 asm("stmfd sp!, {r4-r6,lr} "); |
|
298 __EH_FRAME_PUSH2(r4-r6,lr) |
|
299 #else |
|
300 asm("stmfd sp!, {r4,r5,lr} "); |
|
301 #endif |
|
302 asm("mov r3, r2 "); // r3 = match mode |
|
303 asm("ldmia r0, {r2,r4} "); // r2=count, r4=iEntries |
|
304 asm("bl BinarySearchSigned "); |
|
305 asm("moveq r0, r2 "); // if match r0=match index; if no match, r0=KErrNotFound |
|
306 #ifdef __EABI__ |
|
307 __POPRET("r4-r6,"); |
|
308 #else |
|
309 __POPRET("r4,r5,"); |
|
310 #endif |
|
311 } |
|
312 |
|
313 EXPORT_C __NAKED__ TInt RPointerArrayBase::FindIsqUnsigned(TUint /*anEntry*/) const |
|
314 { |
|
315 asm("mov r2, #0 "); |
|
316 // fall through |
|
317 } |
|
318 |
|
319 EXPORT_C __NAKED__ TInt RPointerArrayBase::FindIsqUnsigned(TUint /*anEntry*/, TInt /*aMode*/) const |
|
320 { |
|
321 #ifdef __EABI__ |
|
322 // sp needs correct alignment |
|
323 asm("stmfd sp!, {r4-r6,lr} "); |
|
324 __EH_FRAME_PUSH2(r4-r6,lr) |
|
325 #else |
|
326 asm("stmfd sp!, {r4,r5,lr} "); |
|
327 #endif |
|
328 asm("mov r3, r2 "); // r3 = match mode |
|
329 asm("ldmia r0, {r2,r4} "); // r2=count, r4=iEntries |
|
330 asm("bl BinarySearchUnsigned "); |
|
331 asm("moveq r0, r2 "); // if match r0=match index; if no match, r0=KErrNotFound |
|
332 #ifdef __EABI__ |
|
333 __POPRET("r4-r6,"); |
|
334 #else |
|
335 __POPRET("r4,r5,"); |
|
336 #endif |
|
337 } |
|
338 |
|
339 EXPORT_C __NAKED__ TInt RPointerArrayBase::FindIsq(const TAny* /*anEntry*/, TGeneralLinearOrder /*anOrder*/) const |
|
340 { |
|
341 asm("mov r3, #0 "); |
|
342 // fall through |
|
343 } |
|
344 |
|
345 EXPORT_C __NAKED__ TInt RPointerArrayBase::FindIsq(const TAny* /*anEntry*/, TGeneralLinearOrder /*anOrder*/, TInt /*aMode*/) const |
|
346 { |
|
347 |
|
348 asm("stmfd sp!, {r3-r11,lr} "); |
|
349 __EH_FRAME_PUSH2(r4-r6,lr) |
|
350 asm("ldmia r0, {r5,r6} "); // r5=count, r6=iEntries |
|
351 asm("mov r11, r3 "); // r11 = aMode |
|
352 asm("mov r7, r2 "); // r7=anOrder |
|
353 asm("mov r4, r1 "); // r1=anEntry |
|
354 asm("bl BinarySearchPointers "); |
|
355 asm("moveq r0, r5 "); // if match, r0=match index |
|
356 __POPRET("r3-r11,"); |
|
357 } |
|
358 |
|
359 #ifndef __KERNEL_MODE__ |
|
360 EXPORT_C __NAKED__ void RPointerArrayBase::HeapSortSigned() |
|
361 { |
|
362 #ifdef __EABI__ |
|
363 asm("stmfd sp!, {r4-r10,lr} "); |
|
364 __EH_FRAME_PUSH2(r4-r10,lr) |
|
365 #else |
|
366 asm("stmfd sp!, {r4-r9,lr} "); |
|
367 #endif |
|
368 asm("ldmia r0, {r4,r5} "); // r4=iCount, r5=iEntries |
|
369 asm("bl HeapSortSigned "); |
|
370 #ifdef __EABI__ |
|
371 __POPRET("r4-r10,"); |
|
372 #else |
|
373 __POPRET("r4-r9,"); |
|
374 #endif |
|
375 // Heap sort list of signed integers |
|
376 // List address in r5, count in r4 |
|
377 // r4=ss, r6=sh, r7=si |
|
378 // r8,r9 modified |
|
379 asm("HeapSortSigned: "); |
|
380 asm("cmp r4, #1 "); |
|
381 __JUMP(le,lr); |
|
382 asm("mov r6, r4, lsr #1 "); |
|
383 asm("hss_loop_start1: "); |
|
384 asm("sub r6, r6, #1 "); |
|
385 asm("ldr r7, [r5, r6, lsl #2] "); |
|
386 asm("mov r8, r6 "); |
|
387 asm("mov r9, r6 "); |
|
388 asm("b hss_loop_start2 "); |
|
389 asm("hss_loop_inner: "); |
|
390 asm("ldr r0, [r5, r8, lsl #2] "); |
|
391 asm("str r0, [r5, r9, lsl #2] "); |
|
392 asm("mov r9, r8 "); |
|
393 asm("hss_loop_start2: "); |
|
394 asm("add r8, r8, #1 "); |
|
395 asm("add r8, r8, r8 "); |
|
396 asm("cmp r8, r4 "); |
|
397 asm("bgt hss_loop_inner_end "); |
|
398 asm("add r0, r5, r8, lsl #2 "); |
|
399 asm("ldmneda r0, {r1,r2} "); |
|
400 asm("ldreq r1, [r0, #-4] "); |
|
401 asm("subeq r8, r8, #1 "); |
|
402 asm("beq hss_loop_inner2 "); |
|
403 asm("cmp r1, r2 "); |
|
404 asm("subgt r8, r8, #1 "); |
|
405 asm("movle r1, r2 "); |
|
406 asm("hss_loop_inner2: "); |
|
407 asm("cmp r1, r7 "); |
|
408 asm("bgt hss_loop_inner "); |
|
409 asm("hss_loop_inner_end: "); |
|
410 asm("str r7, [r5, r9, lsl #2] "); |
|
411 asm("cmp r6, #0 "); |
|
412 asm("bne hss_loop_start1 "); |
|
413 asm("sub r4, r4, #1 "); |
|
414 asm("ldr r7, [r5, r4, lsl #2] "); |
|
415 asm("ldr r0, [r5, #0] "); |
|
416 asm("str r0, [r5, r4, lsl #2] "); |
|
417 asm("cmp r4, #1 "); |
|
418 asm("mov r8, r6 "); |
|
419 asm("mov r9, r6 "); |
|
420 asm("bgt hss_loop_start2 "); |
|
421 asm("str r7, [r5, #0] "); |
|
422 __JUMP(,lr); |
|
423 } |
|
424 |
|
425 EXPORT_C __NAKED__ void RPointerArrayBase::HeapSortUnsigned() |
|
426 { |
|
427 asm("stmfd sp!, {r4-r9,lr} "); |
|
428 asm("ldmia r0, {r4,r5} "); // r4=iCount, r5=iEntries |
|
429 asm("bl HeapSortUnsigned "); |
|
430 __POPRET("r4-r9,"); |
|
431 } |
|
432 #endif // !__KERNEL_MODE__ |
|
433 |
|
434 __NAKED__ void HeapSortUnsigned(TUint* aEntries,TInt aCount) |
|
435 { |
|
436 asm("stmfd sp!, {r4-r9,lr} "); |
|
437 asm("mov r4,r1"); // r4=iCount |
|
438 asm("mov r5,r0"); // r5=iEntries |
|
439 asm("bl HeapSortUnsigned "); |
|
440 __POPRET("r4-r9,"); |
|
441 |
|
442 // Heap sort list of unsigned integers |
|
443 // List address in r5, count in r4 |
|
444 // r4=ss, r6=sh, r7=si |
|
445 // r8,r9 modified |
|
446 asm("HeapSortUnsigned: "); |
|
447 asm("cmp r4, #1 "); |
|
448 __JUMP(le,lr); |
|
449 asm("mov r6, r4, lsr #1 "); |
|
450 asm("hsu_loop_start1: "); |
|
451 asm("sub r6, r6, #1 "); |
|
452 asm("ldr r7, [r5, r6, lsl #2] "); |
|
453 asm("mov r8, r6 "); |
|
454 asm("mov r9, r6 "); |
|
455 asm("b hsu_loop_start2 "); |
|
456 asm("hsu_loop_inner: "); |
|
457 asm("ldr r0, [r5, r8, lsl #2] "); |
|
458 asm("str r0, [r5, r9, lsl #2] "); |
|
459 asm("mov r9, r8 "); |
|
460 asm("hsu_loop_start2: "); |
|
461 asm("add r8, r8, #1 "); |
|
462 asm("add r8, r8, r8 "); |
|
463 asm("cmp r8, r4 "); |
|
464 asm("bgt hsu_loop_inner_end "); |
|
465 asm("add r0, r5, r8, lsl #2 "); |
|
466 asm("ldmneda r0, {r1,r2} "); |
|
467 asm("ldreq r1, [r0, #-4] "); |
|
468 asm("subeq r8, r8, #1 "); |
|
469 asm("beq hsu_loop_inner2 "); |
|
470 asm("cmp r1, r2 "); |
|
471 asm("subhi r8, r8, #1 "); |
|
472 asm("movls r1, r2 "); |
|
473 asm("hsu_loop_inner2: "); |
|
474 asm("cmp r1, r7 "); |
|
475 asm("bhi hsu_loop_inner "); |
|
476 asm("hsu_loop_inner_end: "); |
|
477 asm("str r7, [r5, r9, lsl #2] "); |
|
478 asm("cmp r6, #0 "); |
|
479 asm("bne hsu_loop_start1 "); |
|
480 asm("sub r4, r4, #1 "); |
|
481 asm("ldr r7, [r5, r4, lsl #2] "); |
|
482 asm("ldr r0, [r5, #0] "); |
|
483 asm("str r0, [r5, r4, lsl #2] "); |
|
484 asm("cmp r4, #1 "); |
|
485 asm("mov r8, r6 "); |
|
486 asm("mov r9, r6 "); |
|
487 asm("bgt hsu_loop_start2 "); |
|
488 asm("str r7, [r5, #0] "); |
|
489 __JUMP(,lr); |
|
490 } |
|
491 |
|
492 #ifndef __KERNEL_MODE__ |
|
493 EXPORT_C __NAKED__ void RPointerArrayBase::HeapSort(TGeneralLinearOrder /*anOrder*/) |
|
494 { |
|
495 asm("stmfd sp!, {r3-r11,lr} "); |
|
496 // r3 is caller save |
|
497 __EH_FRAME_ADDRESS(sp,4) |
|
498 // we can push the callee save regs |
|
499 __EH_FRAME_PUSH2(r4-r11,lr) |
|
500 asm("ldmia r0, {r4,r5} "); // r4=iCount, r5=iEntries |
|
501 asm("mov r10, r1 "); // r10=anOrder |
|
502 asm("bl HeapSortPointers "); |
|
503 __POPRET("r3-r11,"); |
|
504 |
|
505 // Heap sort list of pointers |
|
506 // List address in r5, count in r4, r10 points to ordering function |
|
507 // r4=ss, r6=sh, r7=si |
|
508 // r8,r9,r11 modified |
|
509 asm("HeapSortPointers: "); |
|
510 asm("cmp r4, #1 "); |
|
511 __JUMP(le,lr); |
|
512 asm("mov r11, lr "); |
|
513 asm("mov r6, r4, lsr #1 "); |
|
514 asm("hsp_loop_start1: "); |
|
515 asm("sub r6, r6, #1 "); |
|
516 asm("ldr r7, [r5, r6, lsl #2] "); |
|
517 asm("mov r8, r6 "); |
|
518 asm("mov r9, r6 "); |
|
519 asm("b hsp_loop_start2 "); |
|
520 asm("hsp_loop_inner: "); |
|
521 asm("ldr r0, [r5, r8, lsl #2] "); |
|
522 asm("str r0, [r5, r9, lsl #2] "); |
|
523 asm("mov r9, r8 "); |
|
524 asm("hsp_loop_start2: "); |
|
525 asm("add r8, r8, #1 "); |
|
526 asm("add r8, r8, r8 "); |
|
527 asm("cmp r8, r4 "); |
|
528 asm("bgt hsp_loop_inner_end "); |
|
529 asm("subeq r8, r8, #1 "); |
|
530 asm("beq hsp_loop_inner2 "); |
|
531 asm("add r0, r5, r8, lsl #2 "); |
|
532 asm("ldmda r0, {r0,r1} "); |
|
533 __JUMPL(10); |
|
534 asm("cmp r0, #0 "); |
|
535 asm("subgt r8, r8, #1 "); |
|
536 asm("hsp_loop_inner2: "); |
|
537 asm("ldr r0, [r5, r8, lsl #2] "); |
|
538 asm("mov r1, r7 "); |
|
539 __JUMPL(10); |
|
540 asm("cmp r0, #0 "); |
|
541 asm("bgt hsp_loop_inner "); |
|
542 asm("hsp_loop_inner_end: "); |
|
543 asm("str r7, [r5, r9, lsl #2] "); |
|
544 asm("cmp r6, #0 "); |
|
545 asm("bne hsp_loop_start1 "); |
|
546 asm("sub r4, r4, #1 "); |
|
547 asm("ldr r7, [r5, r4, lsl #2] "); |
|
548 asm("ldr r0, [r5, #0] "); |
|
549 asm("str r0, [r5, r4, lsl #2] "); |
|
550 asm("cmp r4, #1 "); |
|
551 asm("mov r8, r6 "); |
|
552 asm("mov r9, r6 "); |
|
553 asm("bgt hsp_loop_start2 "); |
|
554 asm("str r7, [r5, #0] "); |
|
555 __JUMP(,r11); |
|
556 } |
|
557 #endif // __KERNEL_MODE__ |
|
558 |
|
559 EXPORT_C __NAKED__ TAny* RArrayBase::At(TInt /*anIndex*/) const |
|
560 { |
|
561 asm("ldmia r0, {r2,r3,r12} "); // r2=iCount, r3=iEntries, r12=iEntrySize |
|
562 asm("cmp r1, #0 "); // check anIndex>=0 |
|
563 asm("cmpge r2, r1 "); // if so, check iCount>anIndex |
|
564 asm("mlagt r0, r1, r12, r3 "); // if ok, r0=anIndex*iEntrySize+iEntries |
|
565 #ifdef __CPU_ARMV6 |
|
566 asm("ble 1f "); |
|
567 __JUMP(,lr); |
|
568 #else |
|
569 __JUMP(gt,lr); |
|
570 #endif |
|
571 asm("1: "); |
|
572 asm("b " CSM_Z18PanicBadArrayIndexv); |
|
573 } |
|
574 |
|
575 EXPORT_C __NAKED__ TInt RArrayBase::Append(const TAny* /*anEntry*/) |
|
576 { |
|
577 asm("stmfd sp!, {lr} "); |
|
578 asm("ldmia r0, {r3,r12} "); // r3=iCount, r12=iEntries |
|
579 asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(RArrayBase,iAllocated)); |
|
580 asm("cmp r3, r2 "); |
|
581 asm("beq simple_append_1 "); |
|
582 asm("simple_append_0: "); |
|
583 asm("add r2, r3, #1 "); |
|
584 asm("str r2, [r0] "); // iCount++ |
|
585 asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(RArrayBase,iEntrySize)); |
|
586 asm("mla r0, r2, r3, r12 "); // r0=iEntries+iEntrySize*iCount |
|
587 asm("bl wordmove "); // r1=anEntry, r2=iEntrySize, do copy |
|
588 asm("mov r0, #0 "); // return KErrNone; |
|
589 __POPRET(""); |
|
590 |
|
591 asm("simple_append_1: "); |
|
592 asm("stmfd sp!, {r0,r1,r2} "); |
|
593 asm("bl " CSM_ZN10RArrayBase4GrowEv); |
|
594 asm("cmp r0, #0 "); |
|
595 asm("bne simple_append_2 "); |
|
596 asm("ldmfd sp!, {r0,r1,r2} "); |
|
597 asm("ldmia r0, {r3, r12} "); |
|
598 asm("b simple_append_0 "); |
|
599 asm("simple_append_2: "); // error enlarging array |
|
600 asm("add sp, sp, #12 "); |
|
601 __POPRET(""); |
|
602 } |
|
603 |
|
604 EXPORT_C __NAKED__ TInt RArrayBase::Find(const TAny* /*anEntry*/) const |
|
605 { |
|
606 asm("ldmia r0, {r0,r2,r3,r12} "); // r0=count, r2=iEntries, r3=iEntrySize, r12=iKeyOffset |
|
607 asm("stmfd sp!, {r4,lr} "); // save r4,lr |
|
608 asm("subs r0, r0, #1 "); // r0 = count-1 |
|
609 asm("blt 0f "); // skip if count was zero |
|
610 asm("ldr r1, [r1, r12] "); // r1=key of match entry |
|
611 asm("sub r4, r0, #1 "); // r4 = iCount-2 |
|
612 asm("1: "); |
|
613 asm("ldr lr, [r2, r12] "); // lr=key of current entry |
|
614 asm("add r2, r2, r3 "); // step r2 to next entry |
|
615 asm("subs r0, r0, #1 "); // C=1 iff this isn't last entry |
|
616 asm("teq lr, r1 "); // check for match - C unaffected |
|
617 asm("bhi 1b "); // loop if C=1 & Z=0, i.e. if no match and this isn't last entry |
|
618 asm("0: "); |
|
619 asm("mvnne r0, #0 "); // if no match, return -1 |
|
620 asm("subeq r0, r4, r0 "); // if match, index = (iCount-2)-r0 |
|
621 __POPRET("r4,"); |
|
622 } |
|
623 |
|
624 EXPORT_C __NAKED__ TInt RArrayBase::Find(const TAny* /*anEntry*/, TGeneralIdentityRelation /*anIdentity*/) const |
|
625 { |
|
626 asm("stmfd sp!, {r4-r10,lr} "); // save r4-r10,lr |
|
627 __EH_FRAME_PUSH2(r4-r10,lr) |
|
628 asm("ldmia r0, {r4,r5,r6} "); // r4=count, r5=iEntries, r6=iEntrySize |
|
629 asm("mov r8, r1 "); // r8=anEntry |
|
630 asm("mov r9, r2 "); // r9=anIdentity |
|
631 asm("sub r7, r4, #1 "); // r7=iCount-1 |
|
632 asm("b simple_find2_start "); |
|
633 asm("simple_find2_loop: "); |
|
634 asm("mov r1, r5 "); // r1->current entry |
|
635 asm("mov r0, r8 "); // r0=anEntry |
|
636 __JUMPL(9); |
|
637 asm("cmp r0, #0 "); |
|
638 asm("bne simple_find2_return "); |
|
639 asm("add r5, r5, r6 "); // else step to next entry |
|
640 asm("simple_find2_start: "); |
|
641 asm("subs r4, r4, #1 "); |
|
642 asm("bpl simple_find2_loop "); |
|
643 asm("add r4, r7, #1 "); // no match, arrange to return -1 |
|
644 asm("simple_find2_return: "); |
|
645 asm("sub r0, r7, r4 "); // index=count-r4 |
|
646 __POPRET("r4-r10,"); |
|
647 } |
|
648 |
|
649 EXPORT_C __NAKED__ TInt RArrayBase::BinarySearchSigned(const TAny* /*anEntry*/, TInt& /*anIndex*/) const |
|
650 { |
|
651 asm("mov r3, #0 "); |
|
652 // fall through |
|
653 } |
|
654 |
|
655 EXPORT_C __NAKED__ TInt RArrayBase::BinarySearchSigned(const TAny* /*anEntry*/, TInt& /*anIndex*/, TInt /*aMode*/) const |
|
656 { |
|
657 asm("stmfd sp!, {r4-r8,lr} "); |
|
658 __EH_FRAME_PUSH2(r4-r8,lr) |
|
659 asm("mov r8, r2 "); // r8=&anIndex |
|
660 asm("ldmia r0, {r2,r4,r5,r6} "); // r2=count, r3=iEntries, r5=entry size, r6=key offset |
|
661 asm("cmp r5, #4 "); // check for 4 byte entries |
|
662 asm("ldr r1, [r1, r6] "); // r1=match key |
|
663 asm("beq 1f "); // if 4 byte entries, call simpler routine |
|
664 asm("bl BinarySearchSignedKey "); // if not, call general routine |
|
665 asm("b 2f "); |
|
666 asm("1: "); |
|
667 asm("bl BinarySearchSigned "); // use simpler routine for 4 byte entries |
|
668 asm("2: "); |
|
669 asm("str r2, [r8] "); |
|
670 __POPRET("r4-r8,"); |
|
671 |
|
672 // Binary search list of signed integers |
|
673 // Match key in r1 |
|
674 // List address in r4 |
|
675 // Count in r2 |
|
676 // Match mode in r3 |
|
677 // EntrySize in r5, KeyOffset in r6 |
|
678 // Return with: r0=0 if match found, r0 nonzero otherwise |
|
679 // r2=index of match or next higher |
|
680 // r7 modified |
|
681 asm("BinarySearchSignedKey: "); |
|
682 #ifdef _DEBUG |
|
683 asm("cmp r3, #%a0" : : "i" ((TInt)EArrayFindMode_Limit) ); |
|
684 asm("bhs PanicBadArrayFindMode "); |
|
685 #endif |
|
686 asm("mov r3, r3, lsl #30 "); // match mode -> bits 30,31 (00000000=any, 40000000=first, 80000000=last) |
|
687 asm("orr r3, r3, #1 "); // set NOT FOUND flag |
|
688 asm("cmp r2, #0 "); // r2 will be right index |
|
689 asm("beq 0f "); |
|
690 asm("mov r7, #0 "); // r7 will be left index |
|
691 asm("1: "); |
|
692 asm("add r12, r2, r7 "); |
|
693 asm("mov r12, r12, lsr #1 "); // r12 = mid index |
|
694 asm("mla r0, r12, r5, r6 "); // r0 = key offset + entry size * mid index |
|
695 asm("ldr r0, [r4, r0] "); // r0 = key[mid] |
|
696 asm("subs r0, r0, r1 "); // r0 = entry[mid] - match |
|
697 asm("beq 2f "); // if match branch out |
|
698 asm("3: "); |
|
699 asm("addlt r7, r12, #1 "); // else if entry<match left=mid+1 |
|
700 asm("movgt r2, r12 "); // else if entry>match right=mid |
|
701 asm("subs r0, r2, r7 "); // right > left ? |
|
702 asm("bgt 1b "); // r0 = 0 when loop terminates |
|
703 asm("0: "); |
|
704 asm("tst r3, #1 "); // test not found flag |
|
705 asm("mvnnes r0, #0 "); // if set r0=-1 = KErrNotFound |
|
706 __JUMP(,lr); |
|
707 asm("2: "); |
|
708 asm("bics r3, r3, #1 "); // clear NOT FOUND flag, test for find mode ANY (Z set if so) |
|
709 asm("bne 3b "); // if not, V=0 (left from subs), N=1 for last, 0 for first, Z=0 => LAST->LT FIRST->GT |
|
710 asm("mov r2, r12 "); // if so, r2 = mid |
|
711 __JUMP(,lr); // and return with r0 = 0 |
|
712 } |
|
713 |
|
714 EXPORT_C __NAKED__ TInt RArrayBase::BinarySearchUnsigned(const TAny* /*anEntry*/, TInt& /*anIndex*/) const |
|
715 { |
|
716 asm("mov r3, #0 "); |
|
717 // fall through |
|
718 } |
|
719 |
|
720 EXPORT_C __NAKED__ TInt RArrayBase::BinarySearchUnsigned(const TAny* /*anEntry*/, TInt& /*anIndex*/, TInt /*aMode*/) const |
|
721 { |
|
722 asm("stmfd sp!, {r4-r8,lr} "); |
|
723 __EH_FRAME_PUSH2(r4-r8,lr) |
|
724 asm("mov r8, r2 "); // r8=&anIndex |
|
725 asm("ldmia r0, {r2,r4,r5,r6} "); // r2=count, r4=iEntries, r5=entry size, r6=key offset |
|
726 asm("cmp r5, #4 "); // check for 4 byte entries |
|
727 asm("ldr r1, [r1, r6] "); // r1=match key |
|
728 asm("beq 1f "); // if 4 byte entries, call simpler routine |
|
729 asm("bl BinarySearchUnsignedKey "); // if not, call general routine |
|
730 asm("b 2f "); |
|
731 asm("1: "); |
|
732 asm("bl BinarySearchUnsigned "); // use simpler routine for 4 byte entries |
|
733 asm("2: "); |
|
734 asm("str r2, [r8] "); |
|
735 __POPRET("r4-r8,"); |
|
736 |
|
737 // Binary search list of unsigned integers |
|
738 // Match key in r1 |
|
739 // List address in r4 |
|
740 // Count in r2 |
|
741 // Match mode in r3 |
|
742 // EntrySize in r5, KeyOffset in r6 |
|
743 // Return with: r0=0 if match found, r0 nonzero otherwise |
|
744 // r2=index of match or next higher |
|
745 // r7 modified |
|
746 asm("BinarySearchUnsignedKey: "); |
|
747 #ifdef _DEBUG |
|
748 asm("cmp r3, #%a0" : : "i" ((TInt)EArrayFindMode_Limit) ); |
|
749 asm("bhs PanicBadArrayFindMode "); |
|
750 #endif |
|
751 asm("mov r3, r3, lsl #30 "); // match mode -> bits 30,31 (00000000=any, 40000000=first, 80000000=last) |
|
752 asm("orr r3, r3, #1 "); // set NOT FOUND flag |
|
753 asm("cmp r2, #0 "); // r2 will be right index |
|
754 asm("beq 0f "); |
|
755 asm("mov r7, #0 "); // r7 will be left index |
|
756 asm("1: "); |
|
757 asm("add r12, r2, r7 "); |
|
758 asm("mov r12, r12, lsr #1 "); // r12 = mid index |
|
759 asm("mla r0, r12, r5, r6 "); // r0 = key offset + entry size * mid index |
|
760 asm("ldr r0, [r4, r0] "); // r0 = key[mid] |
|
761 asm("subs r0, r1, r0 "); // r0 = match - entry[mid] |
|
762 asm("beq 2f "); // if match branch out |
|
763 asm("3: "); |
|
764 asm("addhi r7, r12, #1 "); // else if entry<match left=mid+1 HI = C &~ Z |
|
765 asm("movlo r2, r12 "); // else if entry>match right=mid LO = ~C |
|
766 asm("subs r0, r2, r7 "); // right > left ? |
|
767 asm("bgt 1b "); // r0 = 0 when loop terminates |
|
768 asm("0: "); |
|
769 asm("tst r3, #1 "); // test not found flag |
|
770 asm("mvnnes r0, #0 "); // if set r0=-1 = KErrNotFound |
|
771 __JUMP(,lr); |
|
772 asm("2: "); // N=0 Z=1 C=1 V=0 r0=0 here |
|
773 asm("bics r3, r3, #1 "); // clear NOT FOUND flag, test for find mode ANY (Z set if so) |
|
774 asm("cmpne r3, #0x60000000 "); // HI if LAST, LO if FIRST |
|
775 asm("bne 3b "); // if not ANY, branch back |
|
776 asm("mov r2, r12 "); // if ANY, r2 = mid |
|
777 __JUMP(,lr); // and return with r0 = 0 |
|
778 } |
|
779 |
|
780 EXPORT_C __NAKED__ TInt RArrayBase::BinarySearch(const TAny* /*anEntry*/, TInt& /*anIndex*/, TGeneralLinearOrder /*anOrder*/, TInt /*aMode*/) const |
|
781 { |
|
782 asm("stmfd sp!, {r3-r11,lr} "); |
|
783 // r3 is caller save |
|
784 __EH_FRAME_ADDRESS(sp,4) |
|
785 // we can push the callee save regs |
|
786 __EH_FRAME_PUSH2(r4-r11,lr) |
|
787 asm("ldmia r0, {r5,r6,r11} "); // r5=count, r6=iEntries, r11=entry size |
|
788 asm("ldr r9, [sp, #40] "); // r9 = aMode |
|
789 asm("mov r4, r1 "); // r4=anEntry |
|
790 asm("mov r7, r3 "); // r7=anOrder |
|
791 asm("bl BinarySearchEntries "); |
|
792 asm("str r5, [r2] "); // store index |
|
793 __POPRET("r3-r11,"); |
|
794 |
|
795 // Binary search list of general entries |
|
796 // Pointer to match value in r4 |
|
797 // List address in r6 |
|
798 // Count in r5 |
|
799 // Match mode in r9 |
|
800 // Pointer to ordering function in r7 |
|
801 // Entry size in r11 |
|
802 // Return with: r0=0 if match found, r0 nonzero otherwise |
|
803 // r5=index of match or next higher |
|
804 // r9,r10 modified |
|
805 // r2 preserved |
|
806 asm("BinarySearchEntries: "); |
|
807 #ifdef _DEBUG |
|
808 asm("cmp r9, #%a0" : : "i" ((TInt)EArrayFindMode_Limit) ); |
|
809 asm("bhs PanicBadArrayFindMode "); |
|
810 #endif |
|
811 asm("stmfd sp!, {r2,lr} "); |
|
812 asm("movs r9, r9, lsl #30 "); // match mode -> bits 30,31 (00000000=any, 40000000=first, 80000000=last) |
|
813 asm("eorne r9, r9, #0xC0000000 "); // match mode -> bits 30,31 (00000000=any, 80000000=first, 40000000=last) |
|
814 asm("orr r9, r9, #1 "); // set NOT FOUND flag |
|
815 asm("cmp r5, #0 "); // r5 will be right index |
|
816 asm("beq 0f "); |
|
817 asm("mov r10, #0 "); // r10 will be left index |
|
818 asm("1: "); |
|
819 asm("add r8, r5, r10 "); |
|
820 asm("mov r8, r8, lsr #1 "); // r8 = mid index |
|
821 asm("mla r1, r8, r11, r6 "); // r1 = r8*entry size + list address = &entry[mid] |
|
822 asm("mov r0, r4 "); // r0 points to match value |
|
823 __JUMPL(7); // call ordering function (match, entry) |
|
824 asm("cmp r0, #0 "); |
|
825 asm("biceq r9, r9, #1 "); // if match clear NOT FOUND flag |
|
826 asm("addeqs r0, r0, r9 "); // and add match mode to r0 (>0 if LAST, <0 if FIRST, 0 if ANY) |
|
827 asm("beq 2f "); // branch out if match and ANY |
|
828 asm("addgt r10, r8, #1 "); // else if match > entry, left = mid + 1 |
|
829 asm("movlt r5, r8 "); // else if match < entry, right = mid |
|
830 asm("subs r0, r5, r10 "); // loop if right > left |
|
831 asm("bgt 1b "); // finish loop with r0 = 0 |
|
832 asm("0: "); |
|
833 asm("tst r9, #1 "); // test not found flag |
|
834 asm("mvnnes r0, #0 "); // if set r0=-1 = KErrNotFound |
|
835 __POPRET("r2,"); |
|
836 asm("2: "); |
|
837 asm("mov r5, r8 "); // if ANY, r8 = mid |
|
838 __POPRET("r2,"); // and return with r0 = 0, Z=1 |
|
839 } |
|
840 |
|
841 EXPORT_C __NAKED__ TInt RArrayBase::FindIsqSigned(const TAny* /*anEntry*/) const |
|
842 { |
|
843 asm("mov r2, #0 "); |
|
844 // fall through |
|
845 } |
|
846 |
|
847 EXPORT_C __NAKED__ TInt RArrayBase::FindIsqSigned(const TAny* /*anEntry*/, TInt /*aMode*/) const |
|
848 { |
|
849 #ifdef __EABI__ |
|
850 // sp needs to be aligned correctly |
|
851 asm("stmfd sp!, {r4-r8,lr} "); |
|
852 __EH_FRAME_PUSH2(r4-r8,lr) |
|
853 #else |
|
854 asm("stmfd sp!, {r4-r7,lr} "); |
|
855 #endif |
|
856 asm("mov r3, r2 "); // r3 = match mode |
|
857 asm("ldmia r0, {r2,r4,r5,r6} "); // r2=count, r4=iEntries, r5=entry size, r6=key offset |
|
858 asm("cmp r5, #4 "); // check for 4 byte entries |
|
859 asm("ldr r1, [r1, r6] "); // r1=match key |
|
860 asm("beq 1f "); // use simpler routine for 4 byte entries |
|
861 asm("bl BinarySearchSignedKey "); // else call general routine |
|
862 asm("b 2f "); |
|
863 asm("1: "); |
|
864 asm("bl BinarySearchSigned "); |
|
865 asm("2: "); |
|
866 asm("moveq r0, r2 "); // if match r0=index else r0=KErrNotFound |
|
867 #ifdef __EABI__ |
|
868 __POPRET("r4-r8,"); |
|
869 #else |
|
870 __POPRET("r4-r7,"); |
|
871 #endif |
|
872 } |
|
873 |
|
874 EXPORT_C __NAKED__ TInt RArrayBase::FindIsqUnsigned(const TAny* /*anEntry*/) const |
|
875 { |
|
876 asm("mov r2, #0 "); |
|
877 // fall through |
|
878 } |
|
879 |
|
880 EXPORT_C __NAKED__ TInt RArrayBase::FindIsqUnsigned(const TAny* /*anEntry*/, TInt /*aMode*/) const |
|
881 { |
|
882 #ifdef __EABI__ |
|
883 // sp needs to be aligned correctly |
|
884 asm("stmfd sp!, {r4-r8,lr} "); |
|
885 __EH_FRAME_PUSH2(r4-r8,lr) |
|
886 #else |
|
887 asm("stmfd sp!, {r4-r7,lr} "); |
|
888 #endif |
|
889 asm("mov r3, r2 "); // r3 = match mode |
|
890 asm("ldmia r0, {r2,r4,r5,r6} "); // r2=count, r4=iEntries, r5=entry size, r6=key offset |
|
891 asm("cmp r5, #4 "); // check for 4 byte entries |
|
892 asm("ldr r1, [r1, r6] "); // r1=match key |
|
893 asm("beq 1f "); // use simpler routine for 4 byte entries |
|
894 asm("bl BinarySearchUnsignedKey "); // else call general routine |
|
895 asm("b 2f "); |
|
896 asm("1: "); |
|
897 asm("bl BinarySearchUnsigned "); |
|
898 asm("2: "); |
|
899 asm("moveq r0, r2 "); // if match r0=index else r0=KErrNotFound |
|
900 #ifdef __EABI__ |
|
901 __POPRET("r4-r8,"); |
|
902 #else |
|
903 __POPRET("r4-r7,"); |
|
904 #endif |
|
905 } |
|
906 |
|
907 EXPORT_C __NAKED__ TInt RArrayBase::FindIsq(const TAny* /*anEntry*/, TGeneralLinearOrder /*anOrder*/) const |
|
908 { |
|
909 asm("mov r3, #0 "); |
|
910 // fall through |
|
911 } |
|
912 |
|
913 EXPORT_C __NAKED__ TInt RArrayBase::FindIsq(const TAny* /*anEntry*/, TGeneralLinearOrder /*anOrder*/, TInt /*aMode*/) const |
|
914 { |
|
915 asm("stmfd sp!, {r3-r11,lr} "); |
|
916 // r3 is caller save |
|
917 __EH_FRAME_ADDRESS(sp,4) |
|
918 // we can push the callee save regs |
|
919 __EH_FRAME_PUSH2(r4-r11,lr) |
|
920 asm("ldmia r0, {r5,r6,r11} "); // r5=count, r6=iEntries, r11=entry size |
|
921 asm("mov r4, r1 "); // r4=anEntry |
|
922 asm("mov r7, r2 "); // r7=anOrder |
|
923 asm("mov r9, r3 "); // r9 = aMode |
|
924 asm("bl BinarySearchEntries "); |
|
925 asm("moveq r0, r5 "); // if match r0=index |
|
926 __POPRET("r3-r11,"); |
|
927 } |
|
928 |
|
929 #ifndef __KERNEL_MODE__ |
|
930 EXPORT_C __NAKED__ void RArrayBase::HeapSortSigned() |
|
931 { |
|
932 #ifdef __EABI__ |
|
933 // need sp aligned correctly |
|
934 asm("stmfd sp!, {r3-r11,lr} "); |
|
935 __EH_FRAME_ADDRESS(sp,4) |
|
936 __EH_FRAME_PUSH2(r4-r11,lr) |
|
937 #else |
|
938 asm("stmfd sp!, {r4-r11,lr} "); |
|
939 #endif |
|
940 asm("ldmia r0, {r4,r5,r10,r11} "); // r4=iCount, r5=iEntries, r10=entry size, r11=key offset |
|
941 asm("cmp r10, #4 "); |
|
942 asm("bleq HeapSortSigned "); |
|
943 asm("cmp r10, #4 "); |
|
944 asm("blne HeapSortSignedKey "); |
|
945 #ifdef __EABI__ |
|
946 __POPRET("r3-r11,"); |
|
947 #else |
|
948 __POPRET("r4-r11,"); |
|
949 #endif |
|
950 |
|
951 // Heap sort list of entries by signed key |
|
952 // List address in r5, count in r4, entry size in r10, key offset in r11 |
|
953 // r4=ss, r6=sh |
|
954 // r8,r9 modified |
|
955 asm("HeapSortSignedKey: "); |
|
956 asm("cmp r4, #1 "); |
|
957 __JUMP(le,lr); |
|
958 asm("mov r7, lr "); // save lr in r7 |
|
959 asm("sub sp, sp, r10 "); // get some temporary space on the stack |
|
960 asm("mov r6, r4, lsr #1 "); |
|
961 asm("hssk_loop_start1: "); |
|
962 asm("sub r6, r6, #1 "); |
|
963 asm("mla r1, r6, r10, r5 "); // [sp]=entry[r6] |
|
964 asm("mov r0, sp "); |
|
965 asm("mov r2, r10 "); |
|
966 asm("bl wordmove "); |
|
967 asm("mov r8, r6 "); |
|
968 asm("mov r9, r6 "); |
|
969 asm("b hssk_loop_start2 "); |
|
970 asm("hssk_loop_inner: "); |
|
971 asm("mla r0, r9, r10, r5 "); // r0=&entry[r9] |
|
972 asm("mla r1, r8, r10, r5 "); // r1=&entry[r8] |
|
973 asm("mov r2, r10 "); |
|
974 asm("bl wordmove "); // entry[r9]=entry[r8] |
|
975 asm("mov r9, r8 "); |
|
976 asm("hssk_loop_start2: "); |
|
977 asm("add r8, r8, #1 "); |
|
978 asm("add r8, r8, r8 "); |
|
979 asm("cmp r8, r4 "); |
|
980 asm("bgt hssk_loop_inner_end "); |
|
981 asm("mla r0, r8, r10, r5 "); |
|
982 asm("ldrne r2, [r0, r11]! "); // r2=key[r8] |
|
983 asm("addeq r0, r0, r11 "); |
|
984 asm("ldr r1, [r0, -r10] "); // r1=key[r8-1] |
|
985 asm("subeq r8, r8, #1 "); |
|
986 asm("beq hssk_loop_inner2 "); |
|
987 asm("cmp r1, r2 "); |
|
988 asm("subgt r8, r8, #1 "); |
|
989 asm("movle r1, r2 "); |
|
990 asm("hssk_loop_inner2: "); |
|
991 asm("ldr r2, [sp, r11] "); // r2=key[sp] |
|
992 asm("cmp r1, r2 "); |
|
993 asm("bgt hssk_loop_inner "); |
|
994 asm("hssk_loop_inner_end: "); |
|
995 asm("mla r0, r9, r10, r5 "); // r0=&entry[r9] |
|
996 asm("mov r1, sp "); |
|
997 asm("mov r2, r10 "); |
|
998 asm("bl wordmove "); // entry[r9]=[sp] |
|
999 asm("cmp r6, #0 "); |
|
1000 asm("bne hssk_loop_start1 "); |
|
1001 asm("sub r4, r4, #1 "); |
|
1002 asm("mla r1, r4, r10, r5 "); // r1=&entry[r4] |
|
1003 asm("mov r0, sp "); |
|
1004 asm("mov r2, r10 "); |
|
1005 asm("bl wordmove "); // [sp]=entry[r4] |
|
1006 asm("mla r0, r4, r10, r5 "); // r0=&entry[r4] |
|
1007 asm("mov r1, r5 "); // r1=&entry[0] |
|
1008 asm("mov r2, r10 "); |
|
1009 asm("bl wordmove "); // entry[0]=entry[r4] |
|
1010 asm("cmp r4, #1 "); |
|
1011 asm("mov r8, r6 "); |
|
1012 asm("mov r9, r6 "); |
|
1013 asm("bgt hssk_loop_start2 "); |
|
1014 asm("mov r0, r5 "); // r0=&entry[0] |
|
1015 asm("mov r1, sp "); |
|
1016 asm("mov r2, r10 "); |
|
1017 asm("bl wordmove "); // entry[0]=[sp] |
|
1018 asm("add sp, sp, r10 "); // free temporary stack space |
|
1019 __JUMP(,r7); |
|
1020 } |
|
1021 |
|
1022 EXPORT_C __NAKED__ void RArrayBase::HeapSortUnsigned() |
|
1023 { |
|
1024 #ifdef __EABI__ |
|
1025 // need sp aligned correctly |
|
1026 asm("stmfd sp!, {r3-r11,lr} "); |
|
1027 __EH_FRAME_ADDRESS(sp,4) |
|
1028 __EH_FRAME_PUSH2(r4-r11,lr) |
|
1029 #else |
|
1030 asm("stmfd sp!, {r4-r11,lr} "); |
|
1031 #endif |
|
1032 asm("ldmia r0, {r4,r5,r10,r11} "); // r4=iCount, r5=iEntries, r10=entry size, r11=key offset |
|
1033 asm("cmp r10, #4 "); |
|
1034 asm("bleq HeapSortUnsigned "); |
|
1035 asm("cmp r10, #4 "); |
|
1036 asm("blne HeapSortUnsignedKey "); |
|
1037 #ifdef __EABI__ |
|
1038 __POPRET("r3-r11,"); |
|
1039 #else |
|
1040 __POPRET("r4-r11,"); |
|
1041 #endif |
|
1042 |
|
1043 // Heap sort list of entries by unsigned key |
|
1044 // List address in r5, count in r4, entry size in r10, key offset in r11 |
|
1045 // r4=ss, r6=sh |
|
1046 // r8,r9 modified |
|
1047 asm("HeapSortUnsignedKey: "); |
|
1048 asm("cmp r4, #1 "); |
|
1049 __JUMP(le,lr); |
|
1050 asm("mov r7, lr "); // save lr in r7 |
|
1051 asm("sub sp, sp, r10 "); // get some temporary space on the stack |
|
1052 asm("mov r6, r4, lsr #1 "); |
|
1053 asm("hsuk_loop_start1: "); |
|
1054 asm("sub r6, r6, #1 "); |
|
1055 asm("mla r1, r6, r10, r5 "); // [sp]=entry[r6] |
|
1056 asm("mov r0, sp "); |
|
1057 asm("mov r2, r10 "); |
|
1058 asm("bl wordmove "); |
|
1059 asm("mov r8, r6 "); |
|
1060 asm("mov r9, r6 "); |
|
1061 asm("b hsuk_loop_start2 "); |
|
1062 asm("hsuk_loop_inner: "); |
|
1063 asm("mla r0, r9, r10, r5 "); // r0=&entry[r9] |
|
1064 asm("mla r1, r8, r10, r5 "); // r1=&entry[r8] |
|
1065 asm("mov r2, r10 "); |
|
1066 asm("bl wordmove "); // entry[r9]=entry[r8] |
|
1067 asm("mov r9, r8 "); |
|
1068 asm("hsuk_loop_start2: "); |
|
1069 asm("add r8, r8, #1 "); |
|
1070 asm("add r8, r8, r8 "); |
|
1071 asm("cmp r8, r4 "); |
|
1072 asm("bgt hsuk_loop_inner_end "); |
|
1073 asm("mla r0, r8, r10, r5 "); |
|
1074 asm("ldrne r2, [r0, r11]! "); // r2=key[r8] |
|
1075 asm("addeq r0, r0, r11 "); |
|
1076 asm("ldr r1, [r0, -r10] "); // r1=key[r8-1] |
|
1077 asm("subeq r8, r8, #1 "); |
|
1078 asm("beq hsuk_loop_inner2 "); |
|
1079 asm("cmp r1, r2 "); |
|
1080 asm("subhi r8, r8, #1 "); |
|
1081 asm("movls r1, r2 "); |
|
1082 asm("hsuk_loop_inner2: "); |
|
1083 asm("ldr r2, [sp, r11] "); // r2=key[sp] |
|
1084 asm("cmp r1, r2 "); |
|
1085 asm("bhi hsuk_loop_inner "); |
|
1086 asm("hsuk_loop_inner_end: "); |
|
1087 asm("mla r0, r9, r10, r5 "); // r0=&entry[r9] |
|
1088 asm("mov r1, sp "); |
|
1089 asm("mov r2, r10 "); |
|
1090 asm("bl wordmove "); // entry[r9]=[sp] |
|
1091 asm("cmp r6, #0 "); |
|
1092 asm("bne hsuk_loop_start1 "); |
|
1093 asm("sub r4, r4, #1 "); |
|
1094 asm("mla r1, r4, r10, r5 "); // r1=&entry[r4] |
|
1095 asm("mov r0, sp "); |
|
1096 asm("mov r2, r10 "); |
|
1097 asm("bl wordmove "); // [sp]=entry[r4] |
|
1098 asm("mla r0, r4, r10, r5 "); // r0=&entry[r4] |
|
1099 asm("mov r1, r5 "); // r1=&entry[0] |
|
1100 asm("mov r2, r10 "); |
|
1101 asm("bl wordmove "); // entry[0]=entry[r4] |
|
1102 asm("cmp r4, #1 "); |
|
1103 asm("mov r8, r6 "); |
|
1104 asm("mov r9, r6 "); |
|
1105 asm("bgt hsuk_loop_start2 "); |
|
1106 asm("mov r0, r5 "); // r0=&entry[0] |
|
1107 asm("mov r1, sp "); |
|
1108 asm("mov r2, r10 "); |
|
1109 asm("bl wordmove "); // entry[0]=[sp] |
|
1110 asm("add sp, sp, r10 "); // free temporary stack space |
|
1111 __JUMP(,r7); |
|
1112 } |
|
1113 |
|
1114 EXPORT_C __NAKED__ void RArrayBase::HeapSort(TGeneralLinearOrder anOrder) |
|
1115 { |
|
1116 #ifdef __EABI__ |
|
1117 // need sp aligned correctly |
|
1118 asm("stmfd sp!, {r3-r11,lr} "); |
|
1119 __EH_FRAME_ADDRESS(sp,4) |
|
1120 __EH_FRAME_PUSH2(r4-r11,lr) |
|
1121 #else |
|
1122 asm("stmfd sp!, {r4-r11,lr} "); |
|
1123 #endif |
|
1124 asm("ldmia r0, {r4,r5,r10,r11} "); |
|
1125 asm("mov r7, r1 "); |
|
1126 asm("bl HeapSortEntries "); |
|
1127 #ifdef __EABI__ |
|
1128 __POPRET("r3-r11,"); |
|
1129 #else |
|
1130 __POPRET("r4-r11,"); |
|
1131 #endif |
|
1132 |
|
1133 // Heap sort list of entries |
|
1134 // List address in r5, count in r4, entry size in r10, key offset in r11 |
|
1135 // Address of ordering function in r7 |
|
1136 // r4=ss, r6=sh |
|
1137 // r8,r9 modified |
|
1138 asm("HeapSortEntries: "); |
|
1139 asm("cmp r4, #1 "); |
|
1140 __JUMP(le,lr); |
|
1141 asm("str lr, [sp, #-4]! "); |
|
1142 asm("mov r8, sp "); // original SP |
|
1143 asm("sub sp, sp, r10 "); // get some temporary space on the stack |
|
1144 asm("sub sp, sp, #4 "); // make room for original SP |
|
1145 asm("bic sp, sp, #4 "); // align stack to 8 byte boundary |
|
1146 asm("str r8, [sp, r10] "); // save original SP |
|
1147 asm("mov r6, r4, lsr #1 "); |
|
1148 asm("hse_loop_start1: "); |
|
1149 asm("sub r6, r6, #1 "); |
|
1150 asm("mla r1, r6, r10, r5 "); // [sp]=entry[r6] |
|
1151 asm("mov r0, sp "); |
|
1152 asm("mov r2, r10 "); |
|
1153 asm("bl wordmove "); |
|
1154 asm("mov r8, r6 "); |
|
1155 asm("mov r9, r6 "); |
|
1156 asm("b hse_loop_start2 "); |
|
1157 asm("hse_loop_inner: "); |
|
1158 asm("mla r0, r9, r10, r5 "); // r0=&entry[r9] |
|
1159 asm("mla r1, r8, r10, r5 "); // r1=&entry[r8] |
|
1160 asm("mov r2, r10 "); |
|
1161 asm("bl wordmove "); // entry[r9]=entry[r8] |
|
1162 asm("mov r9, r8 "); |
|
1163 asm("hse_loop_start2: "); |
|
1164 asm("add r8, r8, #1 "); |
|
1165 asm("add r8, r8, r8 "); |
|
1166 asm("cmp r8, r4 "); |
|
1167 asm("bgt hse_loop_inner_end "); |
|
1168 asm("subeq r8, r8, #1 "); |
|
1169 asm("beq hse_loop_inner2 "); |
|
1170 asm("mla r1, r8, r10, r5 "); // r1=&entry[r8] |
|
1171 asm("sub r0, r1, r10 "); // r0=&entry[r8-1] |
|
1172 __JUMPL(7); |
|
1173 asm("cmp r0, #0 "); // compare entry[r8-1] with entry[r8] |
|
1174 asm("subgt r8, r8, #1 "); |
|
1175 asm("hse_loop_inner2: "); |
|
1176 asm("mla r0, r8, r10, r5 "); // r0=&entry[r8] |
|
1177 asm("mov r1, sp "); |
|
1178 __JUMPL(7); |
|
1179 asm("cmp r0, #0 "); // compare entry[r8] with [sp] |
|
1180 asm("bgt hse_loop_inner "); |
|
1181 asm("hse_loop_inner_end: "); |
|
1182 asm("mla r0, r9, r10, r5 "); // r0=&entry[r9] |
|
1183 asm("mov r1, sp "); |
|
1184 asm("mov r2, r10 "); |
|
1185 asm("bl wordmove "); // entry[r9]=[sp] |
|
1186 asm("cmp r6, #0 "); |
|
1187 asm("bne hse_loop_start1 "); |
|
1188 asm("sub r4, r4, #1 "); |
|
1189 asm("mla r1, r4, r10, r5 "); // r1=&entry[r4] |
|
1190 asm("mov r0, sp "); |
|
1191 asm("mov r2, r10 "); |
|
1192 asm("bl wordmove "); // [sp]=entry[r4] |
|
1193 asm("mla r0, r4, r10, r5 "); // r0=&entry[r4] |
|
1194 asm("mov r1, r5 "); // r1=&entry[0] |
|
1195 asm("mov r2, r10 "); |
|
1196 asm("bl wordmove "); // entry[0]=entry[r4] |
|
1197 asm("cmp r4, #1 "); |
|
1198 asm("mov r8, r6 "); |
|
1199 asm("mov r9, r6 "); |
|
1200 asm("bgt hse_loop_start2 "); |
|
1201 asm("mov r0, r5 "); // r0=&entry[0] |
|
1202 asm("mov r1, sp "); |
|
1203 asm("mov r2, r10 "); |
|
1204 asm("bl wordmove "); // entry[0]=[sp] |
|
1205 asm("ldr sp, [sp, r10] "); // restore stack pointer, freeing temporary stack space |
|
1206 __POPRET(""); |
|
1207 } |
|
1208 #endif // __KERNEL_MODE__ |
|
1209 #endif // __ARRAY_MACHINE_CODED__ |