kernel/eka/include/x86hlp.inl
changeset 0 a41df078684a
equal deleted inserted replaced
-1:000000000000 0:a41df078684a
       
     1 // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
       
     2 // All rights reserved.
       
     3 // This component and the accompanying materials are made available
       
     4 // under the terms of the License "Eclipse Public License v1.0"
       
     5 // which accompanies this distribution, and is available
       
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
       
     7 //
       
     8 // Initial Contributors:
       
     9 // Nokia Corporation - initial contribution.
       
    10 //
       
    11 // Contributors:
       
    12 //
       
    13 // Description:
       
    14 // e32\common\x86\x86hlp.inl
       
    15 // 
       
    16 //
       
    17 
       
    18 #ifdef __GCC32__
       
    19 #include "x86hlp_gcc.inl"
       
    20 #else
       
    21 
       
    22 /**** MSVC helpers ****/
       
    23 
       
    24 /*static void DivisionByZero()
       
    25 	{
       
    26 	_asm int 0;
       
    27 	}*/
       
    28 
       
    29 #pragma warning ( disable : 4414 )  // short jump to function converted to near
       
    30 
       
    31 extern "C" {
       
    32 __NAKED__ void _allmul()
       
    33 //
       
    34 // Multiply two 64 bit integers returning a 64 bit result
       
    35 // On entry:
       
    36 //		[esp+4], [esp+8] = arg 1
       
    37 //		[esp+12], [esp+16] = arg 1
       
    38 // Return result in edx:eax
       
    39 // Remove arguments from stack
       
    40 //
       
    41 	{
       
    42 	_asm mov eax, [esp+4]			// eax = low1
       
    43 	_asm mul dword ptr [esp+16]		// edx:eax = low1*high2
       
    44 	_asm mov ecx, eax				// keep low 32 bits of product
       
    45 	_asm mov eax, [esp+8]			// eax = high1
       
    46 	_asm mul dword ptr [esp+12]		// edx:eax = high1*low2
       
    47 	_asm add ecx, eax				// accumulate low 32 bits of product
       
    48 	_asm mov eax, [esp+4]			// eax = low1
       
    49 	_asm mul dword ptr [esp+12]		// edx:eax = low1*low2
       
    50 	_asm add edx, ecx				// add cross terms to high 32 bits
       
    51 	_asm ret 16
       
    52 	}
       
    53 
       
    54 void udiv64_divby0()
       
    55 	{
       
    56 	_asm int 0						// division by zero exception
       
    57 	_asm ret
       
    58 	}
       
    59 
       
    60 __NAKED__ void UDiv64()
       
    61 	{
       
    62 	// unsigned divide edx:eax by edi:esi
       
    63 	// quotient in ebx:eax, remainder in edi:edx
       
    64 	// ecx, ebp, esi also modified
       
    65 	_asm test edi, edi
       
    66 	_asm jnz short UDiv64a				// branch if divisor >= 2^32
       
    67 	_asm test esi, esi
       
    68 //	_ASM_j(z,DivisionByZero)			// if divisor=0, branch to error routine
       
    69 	_asm jz udiv64_divby0
       
    70 	_asm mov ebx, eax					// ebx=dividend low
       
    71 	_asm mov eax, edx					// eax=dividend high
       
    72 	_asm xor edx, edx					// edx=0
       
    73 	_asm div esi						// quotient high now in eax
       
    74 	_asm xchg eax, ebx					// quotient high in ebx, dividend low in eax
       
    75 	_asm div esi						// quotient now in ebx:eax, remainder in edi:edx
       
    76 	_asm ret
       
    77 	UDiv64e:
       
    78 	_asm xor eax, eax					// set result to 0xFFFFFFFF
       
    79 	_asm dec eax
       
    80 	_asm jmp short UDiv64f
       
    81 	UDiv64a:
       
    82 	_asm js short UDiv64b				// skip if divisor msb set
       
    83 	_asm bsr ecx, edi					// ecx=bit number of divisor msb - 32
       
    84 	_asm inc cl
       
    85 	_asm push edi						// save divisor high
       
    86 	_asm push esi						// save divisor low
       
    87 	_asm shrd esi, edi, cl				// shift divisor right so that msb is bit 31
       
    88 	_asm mov ebx, edx					// dividend into ebx:ebp
       
    89 	_asm mov ebp, eax
       
    90 	_asm shrd eax, edx, cl				// shift dividend right same number of bits
       
    91 	_asm shr edx, cl
       
    92 	_asm cmp edx, esi					// check if approx quotient will be 2^32
       
    93 	_asm jae short UDiv64e				// if so, true result must be 0xFFFFFFFF
       
    94 	_asm div esi						// approximate quotient now in eax
       
    95 	UDiv64f:
       
    96 	_asm mov ecx, eax					// into ecx
       
    97 	_asm mul edi						// multiply approx. quotient by divisor high
       
    98 	_asm mov esi, eax					// ls dword into esi, ms into edi
       
    99 	_asm mov edi, edx
       
   100 	_asm mov eax, ecx					// approx. quotient into eax
       
   101 	_asm mul dword ptr [esp]			// multiply approx. quotient by divisor low
       
   102 	_asm add edx, esi					// edi:edx:eax now equals approx. quotient * divisor
       
   103 	_asm adc edi, 0
       
   104 	_asm xor esi, esi
       
   105 	_asm sub ebp, eax					// subtract dividend - approx. quotient *divisor
       
   106 	_asm sbb ebx, edx
       
   107 	_asm sbb esi, edi
       
   108 	_asm jnc short UDiv64c				// if no borrow, result OK
       
   109 	_asm dec ecx						// else result is one too big
       
   110 	_asm add ebp, [esp]					// and add divisor to get correct remainder
       
   111 	_asm adc ebx, [esp+4]
       
   112 	UDiv64c:
       
   113 	_asm mov eax, ecx					// result into ebx:eax, remainder into edi:edx
       
   114 	_asm mov edi, ebx
       
   115 	_asm mov edx, ebp
       
   116 	_asm xor ebx, ebx
       
   117 	_asm add esp, 8						// remove temporary values from stack
       
   118 	_asm ret
       
   119 	UDiv64b:
       
   120 	_asm mov ebx, 1
       
   121 	_asm sub eax, esi					// subtract divisor from dividend
       
   122 	_asm sbb edx, edi
       
   123 	_asm jnc short UDiv64d				// if no borrow, result=1, remainder in edx:eax
       
   124 	_asm add eax, esi					// else add back
       
   125 	_asm adc edx, edi
       
   126 	_asm dec ebx						// and decrement quotient
       
   127 	UDiv64d:
       
   128 	_asm mov edi, edx					// remainder into edi:edx
       
   129 	_asm mov edx, eax
       
   130 	_asm mov eax, ebx					// result in ebx:eax
       
   131 	_asm xor ebx, ebx
       
   132 	_asm ret
       
   133 	}
       
   134 
       
   135 __NAKED__ void _aulldvrm()
       
   136 //
       
   137 // Divide two 64 bit unsigned integers, returning a 64 bit result
       
   138 // and a 64 bit remainder
       
   139 //
       
   140 // On entry:
       
   141 //		[esp+4], [esp+8] = dividend
       
   142 //		[esp+12], [esp+16] = divisor
       
   143 //
       
   144 // Return (dividend / divisor) in edx:eax
       
   145 // Return (dividend % divisor) in ebx:ecx
       
   146 //
       
   147 // Remove arguments from stack
       
   148 //
       
   149 	{
       
   150 	_asm push ebp
       
   151 	_asm push edi
       
   152 	_asm push esi
       
   153 	_asm mov eax, [esp+16]
       
   154 	_asm mov edx, [esp+20]
       
   155 	_asm mov esi, [esp+24]
       
   156 	_asm mov edi, [esp+28]
       
   157 	_asm call UDiv64
       
   158 	_asm mov ecx, edx
       
   159 	_asm mov edx, ebx
       
   160 	_asm mov ebx, edi
       
   161 	_asm pop esi
       
   162 	_asm pop edi
       
   163 	_asm pop ebp
       
   164 	_asm ret 16
       
   165 	}
       
   166 
       
   167 __NAKED__ void _alldvrm()
       
   168 //
       
   169 // Divide two 64 bit signed integers, returning a 64 bit result
       
   170 // and a 64 bit remainder
       
   171 //
       
   172 // On entry:
       
   173 //		[esp+4], [esp+8] = dividend
       
   174 //		[esp+12], [esp+16] = divisor
       
   175 //
       
   176 // Return (dividend / divisor) in edx:eax
       
   177 // Return (dividend % divisor) in ebx:ecx
       
   178 //
       
   179 // Remove arguments from stack
       
   180 //
       
   181 	{
       
   182 	_asm push ebp
       
   183 	_asm push edi
       
   184 	_asm push esi
       
   185 	_asm mov eax, [esp+16]
       
   186 	_asm mov edx, [esp+20]
       
   187 	_asm mov esi, [esp+24]
       
   188 	_asm mov edi, [esp+28]
       
   189 	_asm test edx, edx
       
   190 	_asm jns dividend_nonnegative
       
   191 	_asm neg edx
       
   192 	_asm neg eax
       
   193 	_asm sbb edx, 0
       
   194 	dividend_nonnegative:
       
   195 	_asm test edi, edi
       
   196 	_asm jns divisor_nonnegative
       
   197 	_asm neg edi
       
   198 	_asm neg esi
       
   199 	_asm sbb edi, 0
       
   200 	divisor_nonnegative:
       
   201 	_asm call UDiv64
       
   202 	_asm mov ebp, [esp+20]
       
   203 	_asm mov ecx, edx
       
   204 	_asm xor ebp, [esp+28]
       
   205 	_asm mov edx, ebx
       
   206 	_asm mov ebx, edi
       
   207 	_asm jns quotient_nonnegative
       
   208 	_asm neg edx
       
   209 	_asm neg eax
       
   210 	_asm sbb edx, 0
       
   211 	quotient_nonnegative:
       
   212 	_asm cmp dword ptr [esp+20], 0
       
   213 	_asm jns rem_nonnegative
       
   214 	_asm neg ebx
       
   215 	_asm neg ecx
       
   216 	_asm sbb ebx, 0
       
   217 	rem_nonnegative:
       
   218 	_asm pop esi
       
   219 	_asm pop edi
       
   220 	_asm pop ebp
       
   221 	_asm ret 16
       
   222 	}
       
   223 
       
   224 __NAKED__ void _aulldiv()
       
   225 //
       
   226 // Divide two 64 bit unsigned integers returning a 64 bit result
       
   227 // On entry:
       
   228 //		[esp+4], [esp+8] = dividend
       
   229 //		[esp+12], [esp+16] = divisor
       
   230 // Return result in edx:eax
       
   231 // Remove arguments from stack
       
   232 //
       
   233 	{
       
   234 	_asm push ebp
       
   235 	_asm push edi
       
   236 	_asm push esi
       
   237 	_asm push ebx
       
   238 	_asm mov eax, [esp+20]
       
   239 	_asm mov edx, [esp+24]
       
   240 	_asm mov esi, [esp+28]
       
   241 	_asm mov edi, [esp+32]
       
   242 	_asm call UDiv64
       
   243 	_asm mov edx, ebx
       
   244 	_asm pop ebx
       
   245 	_asm pop esi
       
   246 	_asm pop edi
       
   247 	_asm pop ebp
       
   248 	_asm ret 16
       
   249 	}
       
   250 
       
   251 __NAKED__ void _alldiv()
       
   252 //
       
   253 // Divide two 64 bit signed integers returning a 64 bit result
       
   254 // On entry:
       
   255 //		[esp+4], [esp+8] = dividend
       
   256 //		[esp+12], [esp+16] = divisor
       
   257 // Return result in edx:eax
       
   258 // Remove arguments from stack
       
   259 //
       
   260 	{
       
   261 	_asm push ebp
       
   262 	_asm push edi
       
   263 	_asm push esi
       
   264 	_asm push ebx
       
   265 	_asm mov eax, [esp+20]
       
   266 	_asm mov edx, [esp+24]
       
   267 	_asm mov esi, [esp+28]
       
   268 	_asm mov edi, [esp+32]
       
   269 	_asm test edx, edx
       
   270 	_asm jns dividend_nonnegative
       
   271 	_asm neg edx
       
   272 	_asm neg eax
       
   273 	_asm sbb edx, 0
       
   274 	dividend_nonnegative:
       
   275 	_asm test edi, edi
       
   276 	_asm jns divisor_nonnegative
       
   277 	_asm neg edi
       
   278 	_asm neg esi
       
   279 	_asm sbb edi, 0
       
   280 	divisor_nonnegative:
       
   281 	_asm call UDiv64
       
   282 	_asm mov ecx, [esp+24]
       
   283 	_asm mov edx, ebx
       
   284 	_asm xor ecx, [esp+32]
       
   285 	_asm jns quotient_nonnegative
       
   286 	_asm neg edx
       
   287 	_asm neg eax
       
   288 	_asm sbb edx, 0
       
   289 	quotient_nonnegative:
       
   290 	_asm pop ebx
       
   291 	_asm pop esi
       
   292 	_asm pop edi
       
   293 	_asm pop ebp
       
   294 	_asm ret 16
       
   295 	}
       
   296 
       
   297 __NAKED__ void _aullrem()
       
   298 //
       
   299 // Divide two 64 bit unsigned integers and return 64 bit remainder
       
   300 // On entry:
       
   301 //		[esp+4], [esp+8] = dividend
       
   302 //		[esp+12], [esp+16] = divisor
       
   303 // Return result in edx:eax
       
   304 // Remove arguments from stack
       
   305 //
       
   306 	{
       
   307 	_asm push ebp
       
   308 	_asm push edi
       
   309 	_asm push esi
       
   310 	_asm push ebx
       
   311 	_asm mov eax, [esp+20]
       
   312 	_asm mov edx, [esp+24]
       
   313 	_asm mov esi, [esp+28]
       
   314 	_asm mov edi, [esp+32]
       
   315 	_asm call UDiv64
       
   316 	_asm mov eax, edx
       
   317 	_asm mov edx, edi
       
   318 	_asm pop ebx
       
   319 	_asm pop esi
       
   320 	_asm pop edi
       
   321 	_asm pop ebp
       
   322 	_asm ret 16
       
   323 	}
       
   324 
       
   325 __NAKED__ void _allrem()
       
   326 //
       
   327 // Divide two 64 bit signed integers and return 64 bit remainder
       
   328 // On entry:
       
   329 //		[esp+4], [esp+8] = dividend
       
   330 //		[esp+12], [esp+16] = divisor
       
   331 // Return result in edx:eax
       
   332 // Remove arguments from stack
       
   333 //
       
   334 	{
       
   335 	_asm push ebp
       
   336 	_asm push edi
       
   337 	_asm push esi
       
   338 	_asm push ebx
       
   339 	_asm mov eax, [esp+20]
       
   340 	_asm mov edx, [esp+24]
       
   341 	_asm mov esi, [esp+28]
       
   342 	_asm mov edi, [esp+32]
       
   343 	_asm test edx, edx
       
   344 	_asm jns dividend_nonnegative
       
   345 	_asm neg edx
       
   346 	_asm neg eax
       
   347 	_asm sbb edx, 0
       
   348 	dividend_nonnegative:
       
   349 	_asm test edi, edi
       
   350 	_asm jns divisor_nonnegative
       
   351 	_asm neg edi
       
   352 	_asm neg esi
       
   353 	_asm sbb edi, 0
       
   354 	divisor_nonnegative:
       
   355 	_asm call UDiv64
       
   356 	_asm mov eax, edx
       
   357 	_asm mov edx, edi
       
   358 	_asm cmp dword ptr [esp+24], 0
       
   359 	_asm jns rem_nonnegative
       
   360 	_asm neg edx
       
   361 	_asm neg eax
       
   362 	_asm sbb edx, 0
       
   363 	rem_nonnegative:
       
   364 	_asm pop ebx
       
   365 	_asm pop esi
       
   366 	_asm pop edi
       
   367 	_asm pop ebp
       
   368 	_asm ret 16
       
   369 	}
       
   370 
       
   371 __NAKED__ void _allshr()
       
   372 //
       
   373 // Arithmetic shift right EDX:EAX by CL
       
   374 //
       
   375 	{
       
   376 	_asm cmp cl, 64
       
   377 	_asm jae asr_count_ge_64
       
   378 	_asm cmp cl, 32
       
   379 	_asm jae asr_count_ge_32
       
   380 	_asm shrd eax, edx, cl
       
   381 	_asm sar edx, cl
       
   382 	_asm ret
       
   383 	asr_count_ge_32:
       
   384 	_asm sub cl, 32
       
   385 	_asm mov eax, edx
       
   386 	_asm cdq
       
   387 	_asm sar eax, cl
       
   388 	_asm ret
       
   389 	asr_count_ge_64:
       
   390 	_asm sar edx, 32
       
   391 	_asm mov eax, edx
       
   392 	_asm ret
       
   393 	}
       
   394 
       
   395 __NAKED__ void _allshl()
       
   396 //
       
   397 // shift left EDX:EAX by CL
       
   398 //
       
   399 	{
       
   400 	_asm cmp cl, 64
       
   401 	_asm jae lsl_count_ge_64
       
   402 	_asm cmp cl, 32
       
   403 	_asm jae lsl_count_ge_32
       
   404 	_asm shld edx, eax, cl
       
   405 	_asm shl eax, cl
       
   406 	_asm ret
       
   407 	lsl_count_ge_32:
       
   408 	_asm sub cl, 32
       
   409 	_asm mov edx, eax
       
   410 	_asm xor eax, eax
       
   411 	_asm shl edx, cl
       
   412 	_asm ret
       
   413 	lsl_count_ge_64:
       
   414 	_asm xor edx, edx
       
   415 	_asm xor eax, eax
       
   416 	_asm ret
       
   417 	}
       
   418 
       
   419 __NAKED__ void _aullshr()
       
   420 //
       
   421 // Logical shift right EDX:EAX by CL
       
   422 //
       
   423 	{
       
   424 	_asm cmp cl, 64
       
   425 	_asm jae lsr_count_ge_64
       
   426 	_asm cmp cl, 32
       
   427 	_asm jae lsr_count_ge_32
       
   428 	_asm shrd eax, edx, cl
       
   429 	_asm shr edx, cl
       
   430 	_asm ret
       
   431 	lsr_count_ge_32:
       
   432 	_asm sub cl, 32
       
   433 	_asm mov eax, edx
       
   434 	_asm xor edx, edx
       
   435 	_asm shr eax, cl
       
   436 	_asm ret
       
   437 	lsr_count_ge_64:
       
   438 	_asm xor edx, edx
       
   439 	_asm xor eax, eax
       
   440 	_asm ret
       
   441 	}
       
   442 
       
   443 
       
   444 }
       
   445 
       
   446 
       
   447 #endif