|
1 /* |
|
2 * Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
3 * All rights reserved. |
|
4 * This component and the accompanying materials are made available |
|
5 * under the terms of the License "Eclipse Public License v1.0" |
|
6 * which accompanies this distribution, and is available |
|
7 * at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
8 * |
|
9 * Initial Contributors: |
|
10 * Nokia Corporation - initial contribution. |
|
11 * |
|
12 * Contributors: |
|
13 * |
|
14 * Description: |
|
15 * e32\common\x86\x86hlp.inl |
|
16 * |
|
17 */ |
|
18 |
|
19 |
|
20 #ifdef __GCC32__ |
|
21 #include <x86hlp_gcc.inl> |
|
22 #else |
|
23 |
|
24 /**** MSVC helpers ****/ |
|
25 |
|
26 /*static void DivisionByZero() |
|
27 { |
|
28 _asm int 0; |
|
29 }*/ |
|
30 |
|
31 #pragma warning ( disable : 4414 ) // short jump to function converted to near |
|
32 |
|
33 extern "C" { |
|
34 __NAKED__ void _allmul() |
|
35 // |
|
36 // Multiply two 64 bit integers returning a 64 bit result |
|
37 // On entry: |
|
38 // [esp+4], [esp+8] = arg 1 |
|
39 // [esp+12], [esp+16] = arg 1 |
|
40 // Return result in edx:eax |
|
41 // Remove arguments from stack |
|
42 // |
|
43 { |
|
44 _asm mov eax, [esp+4] // eax = low1 |
|
45 _asm mul dword ptr [esp+16] // edx:eax = low1*high2 |
|
46 _asm mov ecx, eax // keep low 32 bits of product |
|
47 _asm mov eax, [esp+8] // eax = high1 |
|
48 _asm mul dword ptr [esp+12] // edx:eax = high1*low2 |
|
49 _asm add ecx, eax // accumulate low 32 bits of product |
|
50 _asm mov eax, [esp+4] // eax = low1 |
|
51 _asm mul dword ptr [esp+12] // edx:eax = low1*low2 |
|
52 _asm add edx, ecx // add cross terms to high 32 bits |
|
53 _asm ret 16 |
|
54 } |
|
55 |
|
56 void udiv64_divby0() |
|
57 { |
|
58 _asm int 0 // division by zero exception |
|
59 _asm ret |
|
60 } |
|
61 |
|
62 __NAKED__ void UDiv64() |
|
63 { |
|
64 // unsigned divide edx:eax by edi:esi |
|
65 // quotient in ebx:eax, remainder in edi:edx |
|
66 // ecx, ebp, esi also modified |
|
67 _asm test edi, edi |
|
68 _asm jnz short UDiv64a // branch if divisor >= 2^32 |
|
69 _asm test esi, esi |
|
70 // _ASM_j(z,DivisionByZero) // if divisor=0, branch to error routine |
|
71 _asm jz udiv64_divby0 |
|
72 _asm mov ebx, eax // ebx=dividend low |
|
73 _asm mov eax, edx // eax=dividend high |
|
74 _asm xor edx, edx // edx=0 |
|
75 _asm div esi // quotient high now in eax |
|
76 _asm xchg eax, ebx // quotient high in ebx, dividend low in eax |
|
77 _asm div esi // quotient now in ebx:eax, remainder in edi:edx |
|
78 _asm ret |
|
79 UDiv64e: |
|
80 _asm xor eax, eax // set result to 0xFFFFFFFF |
|
81 _asm dec eax |
|
82 _asm jmp short UDiv64f |
|
83 UDiv64a: |
|
84 _asm js short UDiv64b // skip if divisor msb set |
|
85 _asm bsr ecx, edi // ecx=bit number of divisor msb - 32 |
|
86 _asm inc cl |
|
87 _asm push edi // save divisor high |
|
88 _asm push esi // save divisor low |
|
89 _asm shrd esi, edi, cl // shift divisor right so that msb is bit 31 |
|
90 _asm mov ebx, edx // dividend into ebx:ebp |
|
91 _asm mov ebp, eax |
|
92 _asm shrd eax, edx, cl // shift dividend right same number of bits |
|
93 _asm shr edx, cl |
|
94 _asm cmp edx, esi // check if approx quotient will be 2^32 |
|
95 _asm jae short UDiv64e // if so, true result must be 0xFFFFFFFF |
|
96 _asm div esi // approximate quotient now in eax |
|
97 UDiv64f: |
|
98 _asm mov ecx, eax // into ecx |
|
99 _asm mul edi // multiply approx. quotient by divisor high |
|
100 _asm mov esi, eax // ls dword into esi, ms into edi |
|
101 _asm mov edi, edx |
|
102 _asm mov eax, ecx // approx. quotient into eax |
|
103 _asm mul dword ptr [esp] // multiply approx. quotient by divisor low |
|
104 _asm add edx, esi // edi:edx:eax now equals approx. quotient * divisor |
|
105 _asm adc edi, 0 |
|
106 _asm xor esi, esi |
|
107 _asm sub ebp, eax // subtract dividend - approx. quotient *divisor |
|
108 _asm sbb ebx, edx |
|
109 _asm sbb esi, edi |
|
110 _asm jnc short UDiv64c // if no borrow, result OK |
|
111 _asm dec ecx // else result is one too big |
|
112 _asm add ebp, [esp] // and add divisor to get correct remainder |
|
113 _asm adc ebx, [esp+4] |
|
114 UDiv64c: |
|
115 _asm mov eax, ecx // result into ebx:eax, remainder into edi:edx |
|
116 _asm mov edi, ebx |
|
117 _asm mov edx, ebp |
|
118 _asm xor ebx, ebx |
|
119 _asm add esp, 8 // remove temporary values from stack |
|
120 _asm ret |
|
121 UDiv64b: |
|
122 _asm mov ebx, 1 |
|
123 _asm sub eax, esi // subtract divisor from dividend |
|
124 _asm sbb edx, edi |
|
125 _asm jnc short UDiv64d // if no borrow, result=1, remainder in edx:eax |
|
126 _asm add eax, esi // else add back |
|
127 _asm adc edx, edi |
|
128 _asm dec ebx // and decrement quotient |
|
129 UDiv64d: |
|
130 _asm mov edi, edx // remainder into edi:edx |
|
131 _asm mov edx, eax |
|
132 _asm mov eax, ebx // result in ebx:eax |
|
133 _asm xor ebx, ebx |
|
134 _asm ret |
|
135 } |
|
136 |
|
137 __NAKED__ void _aulldvrm() |
|
138 // |
|
139 // Divide two 64 bit unsigned integers, returning a 64 bit result |
|
140 // and a 64 bit remainder |
|
141 // |
|
142 // On entry: |
|
143 // [esp+4], [esp+8] = dividend |
|
144 // [esp+12], [esp+16] = divisor |
|
145 // |
|
146 // Return (dividend / divisor) in edx:eax |
|
147 // Return (dividend % divisor) in ebx:ecx |
|
148 // |
|
149 // Remove arguments from stack |
|
150 // |
|
151 { |
|
152 _asm push ebp |
|
153 _asm push edi |
|
154 _asm push esi |
|
155 _asm mov eax, [esp+16] |
|
156 _asm mov edx, [esp+20] |
|
157 _asm mov esi, [esp+24] |
|
158 _asm mov edi, [esp+28] |
|
159 _asm call UDiv64 |
|
160 _asm mov ecx, edx |
|
161 _asm mov edx, ebx |
|
162 _asm mov ebx, edi |
|
163 _asm pop esi |
|
164 _asm pop edi |
|
165 _asm pop ebp |
|
166 _asm ret 16 |
|
167 } |
|
168 |
|
169 __NAKED__ void _alldvrm() |
|
170 // |
|
171 // Divide two 64 bit signed integers, returning a 64 bit result |
|
172 // and a 64 bit remainder |
|
173 // |
|
174 // On entry: |
|
175 // [esp+4], [esp+8] = dividend |
|
176 // [esp+12], [esp+16] = divisor |
|
177 // |
|
178 // Return (dividend / divisor) in edx:eax |
|
179 // Return (dividend % divisor) in ebx:ecx |
|
180 // |
|
181 // Remove arguments from stack |
|
182 // |
|
183 { |
|
184 _asm push ebp |
|
185 _asm push edi |
|
186 _asm push esi |
|
187 _asm mov eax, [esp+16] |
|
188 _asm mov edx, [esp+20] |
|
189 _asm mov esi, [esp+24] |
|
190 _asm mov edi, [esp+28] |
|
191 _asm test edx, edx |
|
192 _asm jns dividend_nonnegative |
|
193 _asm neg edx |
|
194 _asm neg eax |
|
195 _asm sbb edx, 0 |
|
196 dividend_nonnegative: |
|
197 _asm test edi, edi |
|
198 _asm jns divisor_nonnegative |
|
199 _asm neg edi |
|
200 _asm neg esi |
|
201 _asm sbb edi, 0 |
|
202 divisor_nonnegative: |
|
203 _asm call UDiv64 |
|
204 _asm mov ebp, [esp+20] |
|
205 _asm mov ecx, edx |
|
206 _asm xor ebp, [esp+28] |
|
207 _asm mov edx, ebx |
|
208 _asm mov ebx, edi |
|
209 _asm jns quotient_nonnegative |
|
210 _asm neg edx |
|
211 _asm neg eax |
|
212 _asm sbb edx, 0 |
|
213 quotient_nonnegative: |
|
214 _asm cmp dword ptr [esp+20], 0 |
|
215 _asm jns rem_nonnegative |
|
216 _asm neg ebx |
|
217 _asm neg ecx |
|
218 _asm sbb ebx, 0 |
|
219 rem_nonnegative: |
|
220 _asm pop esi |
|
221 _asm pop edi |
|
222 _asm pop ebp |
|
223 _asm ret 16 |
|
224 } |
|
225 |
|
226 __NAKED__ void _aulldiv() |
|
227 // |
|
228 // Divide two 64 bit unsigned integers returning a 64 bit result |
|
229 // On entry: |
|
230 // [esp+4], [esp+8] = dividend |
|
231 // [esp+12], [esp+16] = divisor |
|
232 // Return result in edx:eax |
|
233 // Remove arguments from stack |
|
234 // |
|
235 { |
|
236 _asm push ebp |
|
237 _asm push edi |
|
238 _asm push esi |
|
239 _asm push ebx |
|
240 _asm mov eax, [esp+20] |
|
241 _asm mov edx, [esp+24] |
|
242 _asm mov esi, [esp+28] |
|
243 _asm mov edi, [esp+32] |
|
244 _asm call UDiv64 |
|
245 _asm mov edx, ebx |
|
246 _asm pop ebx |
|
247 _asm pop esi |
|
248 _asm pop edi |
|
249 _asm pop ebp |
|
250 _asm ret 16 |
|
251 } |
|
252 |
|
253 __NAKED__ void _alldiv() |
|
254 // |
|
255 // Divide two 64 bit signed integers returning a 64 bit result |
|
256 // On entry: |
|
257 // [esp+4], [esp+8] = dividend |
|
258 // [esp+12], [esp+16] = divisor |
|
259 // Return result in edx:eax |
|
260 // Remove arguments from stack |
|
261 // |
|
262 { |
|
263 _asm push ebp |
|
264 _asm push edi |
|
265 _asm push esi |
|
266 _asm push ebx |
|
267 _asm mov eax, [esp+20] |
|
268 _asm mov edx, [esp+24] |
|
269 _asm mov esi, [esp+28] |
|
270 _asm mov edi, [esp+32] |
|
271 _asm test edx, edx |
|
272 _asm jns dividend_nonnegative |
|
273 _asm neg edx |
|
274 _asm neg eax |
|
275 _asm sbb edx, 0 |
|
276 dividend_nonnegative: |
|
277 _asm test edi, edi |
|
278 _asm jns divisor_nonnegative |
|
279 _asm neg edi |
|
280 _asm neg esi |
|
281 _asm sbb edi, 0 |
|
282 divisor_nonnegative: |
|
283 _asm call UDiv64 |
|
284 _asm mov ecx, [esp+24] |
|
285 _asm mov edx, ebx |
|
286 _asm xor ecx, [esp+32] |
|
287 _asm jns quotient_nonnegative |
|
288 _asm neg edx |
|
289 _asm neg eax |
|
290 _asm sbb edx, 0 |
|
291 quotient_nonnegative: |
|
292 _asm pop ebx |
|
293 _asm pop esi |
|
294 _asm pop edi |
|
295 _asm pop ebp |
|
296 _asm ret 16 |
|
297 } |
|
298 |
|
299 __NAKED__ void _aullrem() |
|
300 // |
|
301 // Divide two 64 bit unsigned integers and return 64 bit remainder |
|
302 // On entry: |
|
303 // [esp+4], [esp+8] = dividend |
|
304 // [esp+12], [esp+16] = divisor |
|
305 // Return result in edx:eax |
|
306 // Remove arguments from stack |
|
307 // |
|
308 { |
|
309 _asm push ebp |
|
310 _asm push edi |
|
311 _asm push esi |
|
312 _asm push ebx |
|
313 _asm mov eax, [esp+20] |
|
314 _asm mov edx, [esp+24] |
|
315 _asm mov esi, [esp+28] |
|
316 _asm mov edi, [esp+32] |
|
317 _asm call UDiv64 |
|
318 _asm mov eax, edx |
|
319 _asm mov edx, edi |
|
320 _asm pop ebx |
|
321 _asm pop esi |
|
322 _asm pop edi |
|
323 _asm pop ebp |
|
324 _asm ret 16 |
|
325 } |
|
326 |
|
327 __NAKED__ void _allrem() |
|
328 // |
|
329 // Divide two 64 bit signed integers and return 64 bit remainder |
|
330 // On entry: |
|
331 // [esp+4], [esp+8] = dividend |
|
332 // [esp+12], [esp+16] = divisor |
|
333 // Return result in edx:eax |
|
334 // Remove arguments from stack |
|
335 // |
|
336 { |
|
337 _asm push ebp |
|
338 _asm push edi |
|
339 _asm push esi |
|
340 _asm push ebx |
|
341 _asm mov eax, [esp+20] |
|
342 _asm mov edx, [esp+24] |
|
343 _asm mov esi, [esp+28] |
|
344 _asm mov edi, [esp+32] |
|
345 _asm test edx, edx |
|
346 _asm jns dividend_nonnegative |
|
347 _asm neg edx |
|
348 _asm neg eax |
|
349 _asm sbb edx, 0 |
|
350 dividend_nonnegative: |
|
351 _asm test edi, edi |
|
352 _asm jns divisor_nonnegative |
|
353 _asm neg edi |
|
354 _asm neg esi |
|
355 _asm sbb edi, 0 |
|
356 divisor_nonnegative: |
|
357 _asm call UDiv64 |
|
358 _asm mov eax, edx |
|
359 _asm mov edx, edi |
|
360 _asm cmp dword ptr [esp+24], 0 |
|
361 _asm jns rem_nonnegative |
|
362 _asm neg edx |
|
363 _asm neg eax |
|
364 _asm sbb edx, 0 |
|
365 rem_nonnegative: |
|
366 _asm pop ebx |
|
367 _asm pop esi |
|
368 _asm pop edi |
|
369 _asm pop ebp |
|
370 _asm ret 16 |
|
371 } |
|
372 |
|
373 __NAKED__ void _allshr() |
|
374 // |
|
375 // Arithmetic shift right EDX:EAX by CL |
|
376 // |
|
377 { |
|
378 _asm cmp cl, 64 |
|
379 _asm jae asr_count_ge_64 |
|
380 _asm cmp cl, 32 |
|
381 _asm jae asr_count_ge_32 |
|
382 _asm shrd eax, edx, cl |
|
383 _asm sar edx, cl |
|
384 _asm ret |
|
385 asr_count_ge_32: |
|
386 _asm sub cl, 32 |
|
387 _asm mov eax, edx |
|
388 _asm cdq |
|
389 _asm sar eax, cl |
|
390 _asm ret |
|
391 asr_count_ge_64: |
|
392 _asm sar edx, 32 |
|
393 _asm mov eax, edx |
|
394 _asm ret |
|
395 } |
|
396 |
|
397 __NAKED__ void _allshl() |
|
398 // |
|
399 // shift left EDX:EAX by CL |
|
400 // |
|
401 { |
|
402 _asm cmp cl, 64 |
|
403 _asm jae lsl_count_ge_64 |
|
404 _asm cmp cl, 32 |
|
405 _asm jae lsl_count_ge_32 |
|
406 _asm shld edx, eax, cl |
|
407 _asm shl eax, cl |
|
408 _asm ret |
|
409 lsl_count_ge_32: |
|
410 _asm sub cl, 32 |
|
411 _asm mov edx, eax |
|
412 _asm xor eax, eax |
|
413 _asm shl edx, cl |
|
414 _asm ret |
|
415 lsl_count_ge_64: |
|
416 _asm xor edx, edx |
|
417 _asm xor eax, eax |
|
418 _asm ret |
|
419 } |
|
420 |
|
421 __NAKED__ void _aullshr() |
|
422 // |
|
423 // Logical shift right EDX:EAX by CL |
|
424 // |
|
425 { |
|
426 _asm cmp cl, 64 |
|
427 _asm jae lsr_count_ge_64 |
|
428 _asm cmp cl, 32 |
|
429 _asm jae lsr_count_ge_32 |
|
430 _asm shrd eax, edx, cl |
|
431 _asm shr edx, cl |
|
432 _asm ret |
|
433 lsr_count_ge_32: |
|
434 _asm sub cl, 32 |
|
435 _asm mov eax, edx |
|
436 _asm xor edx, edx |
|
437 _asm shr eax, cl |
|
438 _asm ret |
|
439 lsr_count_ge_64: |
|
440 _asm xor edx, edx |
|
441 _asm xor eax, eax |
|
442 _asm ret |
|
443 } |
|
444 |
|
445 |
|
446 } |
|
447 |
|
448 |
|
449 #endif |