|
1 /* |
|
2 * Copyright (c) 1995-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
3 * All rights reserved. |
|
4 * This component and the accompanying materials are made available |
|
5 * under the terms of the License "Eclipse Public License v1.0" |
|
6 * which accompanies this distribution, and is available |
|
7 * at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
8 * |
|
9 * Initial Contributors: |
|
10 * Nokia Corporation - initial contribution. |
|
11 * |
|
12 * Contributors: |
|
13 * |
|
14 * Description: |
|
15 * e32\nklib\x86\vchelp.cpp |
|
16 * |
|
17 */ |
|
18 |
|
19 |
|
20 #ifndef __NAKED__ |
|
21 #define __NAKED__ __declspec(naked) |
|
22 #endif |
|
23 |
|
24 #include <e32def.h> |
|
25 |
|
26 #pragma warning ( disable : 4414 ) // short jump to function converted to near |
|
27 |
|
28 extern "C" { |
|
29 __NAKED__ void _allmul() |
|
30 // |
|
31 // Multiply two 64 bit integers returning a 64 bit result |
|
32 // On entry: |
|
33 // [esp+4], [esp+8] = arg 1 |
|
34 // [esp+12], [esp+16] = arg 1 |
|
35 // Return result in edx:eax |
|
36 // Remove arguments from stack |
|
37 // |
|
38 { |
|
39 _asm mov eax, [esp+4] // eax = low1 |
|
40 _asm mul dword ptr [esp+16] // edx:eax = low1*high2 |
|
41 _asm mov ecx, eax // keep low 32 bits of product |
|
42 _asm mov eax, [esp+8] // eax = high1 |
|
43 _asm mul dword ptr [esp+12] // edx:eax = high1*low2 |
|
44 _asm add ecx, eax // accumulate low 32 bits of product |
|
45 _asm mov eax, [esp+4] // eax = low1 |
|
46 _asm mul dword ptr [esp+12] // edx:eax = low1*low2 |
|
47 _asm add edx, ecx // add cross terms to high 32 bits |
|
48 _asm ret 16 |
|
49 } |
|
50 |
|
51 void udiv64_divby0() |
|
52 { |
|
53 _asm int 0 // division by zero exception |
|
54 _asm ret |
|
55 } |
|
56 |
|
57 __NAKED__ void UDiv64() |
|
58 { |
|
59 // unsigned divide edx:eax by edi:esi |
|
60 // quotient in ebx:eax, remainder in edi:edx |
|
61 // ecx, ebp, esi also modified |
|
62 _asm test edi, edi |
|
63 _asm jnz short UDiv64a // branch if divisor >= 2^32 |
|
64 _asm test esi, esi |
|
65 // _ASM_j(z,DivisionByZero) // if divisor=0, branch to error routine |
|
66 _asm jz udiv64_divby0 |
|
67 _asm mov ebx, eax // ebx=dividend low |
|
68 _asm mov eax, edx // eax=dividend high |
|
69 _asm xor edx, edx // edx=0 |
|
70 _asm div esi // quotient high now in eax |
|
71 _asm xchg eax, ebx // quotient high in ebx, dividend low in eax |
|
72 _asm div esi // quotient now in ebx:eax, remainder in edi:edx |
|
73 _asm ret |
|
74 UDiv64e: |
|
75 _asm xor eax, eax // set result to 0xFFFFFFFF |
|
76 _asm dec eax |
|
77 _asm jmp short UDiv64f |
|
78 UDiv64a: |
|
79 _asm js short UDiv64b // skip if divisor msb set |
|
80 _asm bsr ecx, edi // ecx=bit number of divisor msb - 32 |
|
81 _asm inc cl |
|
82 _asm push edi // save divisor high |
|
83 _asm push esi // save divisor low |
|
84 _asm shrd esi, edi, cl // shift divisor right so that msb is bit 31 |
|
85 _asm mov ebx, edx // dividend into ebx:ebp |
|
86 _asm mov ebp, eax |
|
87 _asm shrd eax, edx, cl // shift dividend right same number of bits |
|
88 _asm shr edx, cl |
|
89 _asm cmp edx, esi // check if approx quotient will be 2^32 |
|
90 _asm jae short UDiv64e // if so, true result must be 0xFFFFFFFF |
|
91 _asm div esi // approximate quotient now in eax |
|
92 UDiv64f: |
|
93 _asm mov ecx, eax // into ecx |
|
94 _asm mul edi // multiply approx. quotient by divisor high |
|
95 _asm mov esi, eax // ls dword into esi, ms into edi |
|
96 _asm mov edi, edx |
|
97 _asm mov eax, ecx // approx. quotient into eax |
|
98 _asm mul dword ptr [esp] // multiply approx. quotient by divisor low |
|
99 _asm add edx, esi // edi:edx:eax now equals approx. quotient * divisor |
|
100 _asm adc edi, 0 |
|
101 _asm xor esi, esi |
|
102 _asm sub ebp, eax // subtract dividend - approx. quotient *divisor |
|
103 _asm sbb ebx, edx |
|
104 _asm sbb esi, edi |
|
105 _asm jnc short UDiv64c // if no borrow, result OK |
|
106 _asm dec ecx // else result is one too big |
|
107 _asm add ebp, [esp] // and add divisor to get correct remainder |
|
108 _asm adc ebx, [esp+4] |
|
109 UDiv64c: |
|
110 _asm mov eax, ecx // result into ebx:eax, remainder into edi:edx |
|
111 _asm mov edi, ebx |
|
112 _asm mov edx, ebp |
|
113 _asm xor ebx, ebx |
|
114 _asm add esp, 8 // remove temporary values from stack |
|
115 _asm ret |
|
116 UDiv64b: |
|
117 _asm mov ebx, 1 |
|
118 _asm sub eax, esi // subtract divisor from dividend |
|
119 _asm sbb edx, edi |
|
120 _asm jnc short UDiv64d // if no borrow, result=1, remainder in edx:eax |
|
121 _asm add eax, esi // else add back |
|
122 _asm adc edx, edi |
|
123 _asm dec ebx // and decrement quotient |
|
124 UDiv64d: |
|
125 _asm mov edi, edx // remainder into edi:edx |
|
126 _asm mov edx, eax |
|
127 _asm mov eax, ebx // result in ebx:eax |
|
128 _asm xor ebx, ebx |
|
129 _asm ret |
|
130 } |
|
131 |
|
132 __NAKED__ void _aulldiv() |
|
133 // |
|
134 // Divide two 64 bit unsigned integers returning a 64 bit result |
|
135 // On entry: |
|
136 // [esp+4], [esp+8] = dividend |
|
137 // [esp+12], [esp+16] = divisor |
|
138 // Return result in edx:eax |
|
139 // Remove arguments from stack |
|
140 // |
|
141 { |
|
142 _asm push ebp |
|
143 _asm push edi |
|
144 _asm push esi |
|
145 _asm push ebx |
|
146 _asm mov eax, [esp+20] |
|
147 _asm mov edx, [esp+24] |
|
148 _asm mov esi, [esp+28] |
|
149 _asm mov edi, [esp+32] |
|
150 _asm call UDiv64 |
|
151 _asm mov edx, ebx |
|
152 _asm pop ebx |
|
153 _asm pop esi |
|
154 _asm pop edi |
|
155 _asm pop ebp |
|
156 _asm ret 16 |
|
157 } |
|
158 |
|
159 __NAKED__ void _alldiv() |
|
160 // |
|
161 // Divide two 64 bit signed integers returning a 64 bit result |
|
162 // On entry: |
|
163 // [esp+4], [esp+8] = dividend |
|
164 // [esp+12], [esp+16] = divisor |
|
165 // Return result in edx:eax |
|
166 // Remove arguments from stack |
|
167 // |
|
168 { |
|
169 _asm push ebp |
|
170 _asm push edi |
|
171 _asm push esi |
|
172 _asm push ebx |
|
173 _asm mov eax, [esp+20] |
|
174 _asm mov edx, [esp+24] |
|
175 _asm mov esi, [esp+28] |
|
176 _asm mov edi, [esp+32] |
|
177 _asm test edx, edx |
|
178 _asm jns dividend_nonnegative |
|
179 _asm neg edx |
|
180 _asm neg eax |
|
181 _asm sbb edx, 0 |
|
182 dividend_nonnegative: |
|
183 _asm test edi, edi |
|
184 _asm jns divisor_nonnegative |
|
185 _asm neg edi |
|
186 _asm neg esi |
|
187 _asm sbb edi, 0 |
|
188 divisor_nonnegative: |
|
189 _asm call UDiv64 |
|
190 _asm mov ecx, [esp+24] |
|
191 _asm mov edx, ebx |
|
192 _asm xor ecx, [esp+32] |
|
193 _asm jns quotient_nonnegative |
|
194 _asm neg edx |
|
195 _asm neg eax |
|
196 _asm sbb edx, 0 |
|
197 quotient_nonnegative: |
|
198 _asm pop ebx |
|
199 _asm pop esi |
|
200 _asm pop edi |
|
201 _asm pop ebp |
|
202 _asm ret 16 |
|
203 } |
|
204 |
|
205 __NAKED__ void _aullrem() |
|
206 // |
|
207 // Divide two 64 bit unsigned integers and return 64 bit remainder |
|
208 // On entry: |
|
209 // [esp+4], [esp+8] = dividend |
|
210 // [esp+12], [esp+16] = divisor |
|
211 // Return result in edx:eax |
|
212 // Remove arguments from stack |
|
213 // |
|
214 { |
|
215 _asm push ebp |
|
216 _asm push edi |
|
217 _asm push esi |
|
218 _asm push ebx |
|
219 _asm mov eax, [esp+20] |
|
220 _asm mov edx, [esp+24] |
|
221 _asm mov esi, [esp+28] |
|
222 _asm mov edi, [esp+32] |
|
223 _asm call UDiv64 |
|
224 _asm mov eax, edx |
|
225 _asm mov edx, edi |
|
226 _asm pop ebx |
|
227 _asm pop esi |
|
228 _asm pop edi |
|
229 _asm pop ebp |
|
230 _asm ret 16 |
|
231 } |
|
232 |
|
233 __NAKED__ void _allrem() |
|
234 // |
|
235 // Divide two 64 bit signed integers and return 64 bit remainder |
|
236 // On entry: |
|
237 // [esp+4], [esp+8] = dividend |
|
238 // [esp+12], [esp+16] = divisor |
|
239 // Return result in edx:eax |
|
240 // Remove arguments from stack |
|
241 // |
|
242 { |
|
243 _asm push ebp |
|
244 _asm push edi |
|
245 _asm push esi |
|
246 _asm push ebx |
|
247 _asm mov eax, [esp+20] |
|
248 _asm mov edx, [esp+24] |
|
249 _asm mov esi, [esp+28] |
|
250 _asm mov edi, [esp+32] |
|
251 _asm test edx, edx |
|
252 _asm jns dividend_nonnegative |
|
253 _asm neg edx |
|
254 _asm neg eax |
|
255 _asm sbb edx, 0 |
|
256 dividend_nonnegative: |
|
257 _asm test edi, edi |
|
258 _asm jns divisor_nonnegative |
|
259 _asm neg edi |
|
260 _asm neg esi |
|
261 _asm sbb edi, 0 |
|
262 divisor_nonnegative: |
|
263 _asm call UDiv64 |
|
264 _asm mov eax, edx |
|
265 _asm mov edx, edi |
|
266 _asm cmp dword ptr [esp+24], 0 |
|
267 _asm jns rem_nonnegative |
|
268 _asm neg edx |
|
269 _asm neg eax |
|
270 _asm sbb edx, 0 |
|
271 rem_nonnegative: |
|
272 _asm pop ebx |
|
273 _asm pop esi |
|
274 _asm pop edi |
|
275 _asm pop ebp |
|
276 _asm ret 16 |
|
277 } |
|
278 |
|
279 __NAKED__ void _allshr() |
|
280 // |
|
281 // Arithmetic shift right EDX:EAX by ECX |
|
282 // |
|
283 { |
|
284 _asm cmp ecx, 64 |
|
285 _asm jae asr_count_ge_64 |
|
286 _asm cmp cl, 32 |
|
287 _asm jae asr_count_ge_32 |
|
288 _asm shrd eax, edx, cl |
|
289 _asm sar edx, cl |
|
290 _asm ret |
|
291 asr_count_ge_32: |
|
292 _asm sub cl, 32 |
|
293 _asm mov eax, edx |
|
294 _asm cdq |
|
295 _asm sar eax, cl |
|
296 _asm ret |
|
297 asr_count_ge_64: |
|
298 _asm sar edx, 32 |
|
299 _asm mov eax, edx |
|
300 _asm ret |
|
301 } |
|
302 |
|
303 __NAKED__ void _allshl() |
|
304 // |
|
305 // shift left EDX:EAX by ECX |
|
306 // |
|
307 { |
|
308 _asm cmp ecx, 64 |
|
309 _asm jae lsl_count_ge_64 |
|
310 _asm cmp cl, 32 |
|
311 _asm jae lsl_count_ge_32 |
|
312 _asm shld edx, eax, cl |
|
313 _asm shl eax, cl |
|
314 _asm ret |
|
315 lsl_count_ge_32: |
|
316 _asm sub cl, 32 |
|
317 _asm mov edx, eax |
|
318 _asm xor eax, eax |
|
319 _asm shl edx, cl |
|
320 _asm ret |
|
321 lsl_count_ge_64: |
|
322 _asm xor edx, edx |
|
323 _asm xor eax, eax |
|
324 _asm ret |
|
325 } |
|
326 |
|
327 __NAKED__ void _aullshr() |
|
328 // |
|
329 // Logical shift right EDX:EAX by ECX |
|
330 // |
|
331 { |
|
332 _asm cmp ecx, 64 |
|
333 _asm jae lsr_count_ge_64 |
|
334 _asm cmp cl, 32 |
|
335 _asm jae lsr_count_ge_32 |
|
336 _asm shrd eax, edx, cl |
|
337 _asm shr edx, cl |
|
338 _asm ret |
|
339 lsr_count_ge_32: |
|
340 _asm sub cl, 32 |
|
341 _asm mov eax, edx |
|
342 _asm xor edx, edx |
|
343 _asm shr eax, cl |
|
344 _asm ret |
|
345 lsr_count_ge_64: |
|
346 _asm xor edx, edx |
|
347 _asm xor eax, eax |
|
348 _asm ret |
|
349 } |
|
350 } |
|
351 |