kernel/eka/include/x86hlp_gcc.inl
changeset 0 a41df078684a
equal deleted inserted replaced
-1:000000000000 0:a41df078684a
       
     1 // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
       
     2 // All rights reserved.
       
     3 // This component and the accompanying materials are made available
       
     4 // under the terms of the License "Eclipse Public License v1.0"
       
     5 // which accompanies this distribution, and is available
       
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
       
     7 //
       
     8 // Initial Contributors:
       
     9 // Nokia Corporation - initial contribution.
       
    10 //
       
    11 // Contributors:
       
    12 //
       
    13 // Description:
       
    14 // e32\common\x86\x86hlp_gcc.inl
       
    15 // If there are no exports then GCC 3.4.x does not generate a .reloc 
       
    16 // section, without which rombuild can't relocate the .code section
       
    17 // to its ROM address. Your ROM then goes boom early in the boot sequence.
       
    18 // This unused export forces the PE to be generated with a .reloc section.
       
    19 // 
       
    20 //
       
    21 
       
    22 EXPORT_C void __ignore_this_export()
       
    23 	{
       
    24 	}
       
    25 
       
    26 static void DivisionByZero()
       
    27 	{
       
    28 	asm("int 0");
       
    29 	}
       
    30 
       
    31 extern "C" {
       
    32 
       
    33 void __NAKED__ _alloca()
       
    34 {
       
    35 	// GCC passes the param in eax and expects no return value
       
    36 	asm("pop ecx");
       
    37 	asm("sub esp, eax");
       
    38 	asm("push ecx");
       
    39 	asm("ret");
       
    40 }
       
    41 
       
    42 void __NAKED__ _allmul()
       
    43 //
       
    44 // Multiply two 64 bit integers returning a 64 bit result
       
    45 // On entry:
       
    46 //		[esp+4], [esp+8] = arg 1
       
    47 //		[esp+12], [esp+16] = arg 1
       
    48 // Return result in edx:eax
       
    49 // Remove arguments from stack
       
    50 //
       
    51 	{
       
    52 	asm("mov eax, [esp+4]");		// eax = low1
       
    53 	asm("mul dword ptr [esp+16]");	// edx:eax = low1*high2
       
    54 	asm("mov ecx, eax");			// keep low 32 bits of product
       
    55 	asm("mov eax, [esp+8]");		// eax = high1
       
    56 	asm("mul dword ptr [esp+12]");	// edx:eax = high1*low2
       
    57 	asm("add ecx, eax");			// accumulate low 32 bits of product
       
    58 	asm("mov eax, [esp+4]");		// eax = low1
       
    59 	asm("mul dword ptr [esp+12]");	// edx:eax = low1*low2
       
    60 	asm("add edx, ecx");			// add cross terms to high 32 bits
       
    61 	asm("ret");
       
    62 	}
       
    63 
       
    64 void __NAKED__ udiv64_divby0()
       
    65 	{
       
    66 	asm("int 0");					// division by zero exception
       
    67 	asm("ret");
       
    68 	}
       
    69 
       
    70 __NAKED__ /*LOCAL_C*/ void UDiv64()
       
    71 	{
       
    72 	// unsigned divide edx:eax by edi:esi
       
    73 	// quotient in ebx:eax, remainder in edi:edx
       
    74 	// ecx, ebp, esi also modified
       
    75 	asm("test edi, edi");
       
    76 	asm("jnz short UDiv64a");			// branch if divisor >= 2^32
       
    77 	asm("test esi, esi");
       
    78 	asm("jz %a0": : "i"(&DivisionByZero)); // if divisor=0, branch to error routine
       
    79 	asm("mov ebx, eax");				// ebx=dividend low
       
    80 	asm("mov eax, edx");				// eax=dividend high
       
    81 	asm("xor edx, edx");				// edx=0
       
    82 	asm("div esi");						// quotient high now in eax
       
    83 	asm("xchg eax, ebx");				// quotient high in ebx, dividend low in eax
       
    84 	asm("div esi");						// quotient now in ebx:eax, remainder in edi:edx
       
    85 	asm("ret");
       
    86 	asm("UDiv64e:");
       
    87 	asm("xor eax, eax");				// set result to 0xFFFFFFFF
       
    88 	asm("dec eax");
       
    89 	asm("jmp short UDiv64f");
       
    90 	asm("UDiv64a:");
       
    91 	asm("js short UDiv64b");			// skip if divisor msb set
       
    92 	asm("bsr ecx, edi");				// ecx=bit number of divisor msb - 32
       
    93 	asm("inc cl");
       
    94 	asm("push edi");					// save divisor high
       
    95 	asm("push esi");					// save divisor low
       
    96 	asm("shrd esi, edi, cl");			// shift divisor right so that msb is bit 31
       
    97 	asm("mov ebx, edx");				// dividend into ebx:ebp
       
    98 	asm("mov ebp, eax");
       
    99 	asm("shrd eax, edx, cl");			// shift dividend right same number of bits
       
   100 	asm("shr edx, cl");
       
   101 	asm("cmp edx, esi");				// check if approx quotient will be 2^32
       
   102 	asm("jae short UDiv64e");			// if so, true result must be 0xFFFFFFFF
       
   103 	asm("div esi");						// approximate quotient now in eax
       
   104 	asm("UDiv64f:");
       
   105 	asm("mov ecx, eax");				// into ecx
       
   106 	asm("mul edi");						// multiply approx. quotient by divisor high
       
   107 	asm("mov esi, eax");				// ls dword into esi, ms into edi
       
   108 	asm("mov edi, edx");
       
   109 	asm("mov eax, ecx");				// approx. quotient into eax
       
   110 	asm("mul dword ptr [esp]");			// multiply approx. quotient by divisor low
       
   111 	asm("add edx, esi");				// edi:edx:eax now equals approx. quotient * divisor
       
   112 	asm("adc edi, 0");
       
   113 	asm("xor esi, esi");
       
   114 	asm("sub ebp, eax");				// subtract dividend - approx. quotient *divisor
       
   115 	asm("sbb ebx, edx");
       
   116 	asm("sbb esi, edi");
       
   117 	asm("jnc short UDiv64c");			// if no borrow, result OK
       
   118 	asm("dec ecx");						// else result is one too big
       
   119 	asm("add ebp, [esp]");				// and add divisor to get correct remainder
       
   120 	asm("adc ebx, [esp+4]");
       
   121 	asm("UDiv64c:");
       
   122 	asm("mov eax, ecx");				// result into ebx:eax, remainder into edi:edx
       
   123 	asm("mov edi, ebx");
       
   124 	asm("mov edx, ebp");
       
   125 	asm("xor ebx, ebx");
       
   126 	asm("add esp, 8");					// remove temporary values from stack
       
   127 	asm("ret");
       
   128 	asm("UDiv64b:");
       
   129 	asm("mov ebx, 1");
       
   130 	asm("sub eax, esi");				// subtract divisor from dividend
       
   131 	asm("sbb edx, edi");
       
   132 	asm("jnc short UDiv64d");			// if no borrow, result=1, remainder in edx:eax
       
   133 	asm("add eax, esi");				// else add back
       
   134 	asm("adc edx, edi");
       
   135 	asm("dec ebx");						// and decrement quotient
       
   136 	asm("UDiv64d:");
       
   137 	asm("mov edi, edx");				// remainder into edi:edx
       
   138 	asm("mov edx, eax");
       
   139 	asm("mov eax, ebx");				// result in ebx:eax
       
   140 	asm("xor ebx, ebx");
       
   141 	asm("ret");
       
   142 	}
       
   143 
       
   144 __NAKED__ void _aulldvrm()
       
   145 //
       
   146 // Divide two 64 bit unsigned integers, returning a 64 bit result
       
   147 // and a 64 bit remainder
       
   148 //
       
   149 // On entry:
       
   150 //		[esp+4], [esp+8] = dividend
       
   151 //		[esp+12], [esp+16] = divisor
       
   152 //
       
   153 // Return (dividend / divisor) in edx:eax
       
   154 // Return (dividend % divisor) in ebx:ecx
       
   155 //
       
   156 // Remove arguments from stack
       
   157 //
       
   158 	{
       
   159 	asm("push ebp");
       
   160 	asm("push edi");
       
   161 	asm("push esi");
       
   162 	asm("mov eax, [esp+16]");
       
   163 	asm("mov edx, [esp+20]");
       
   164 	asm("mov esi, [esp+24]");
       
   165 	asm("mov edi, [esp+28]");
       
   166 	asm("call %a0": : "i"(&UDiv64));
       
   167 	asm("mov ecx, edx");
       
   168 	asm("mov edx, ebx");
       
   169 	asm("mov ebx, edi");
       
   170 	asm("pop esi");
       
   171 	asm("pop edi");
       
   172 	asm("pop ebp");
       
   173 	asm("ret");
       
   174 	}
       
   175 
       
   176 __NAKED__ void _alldvrm()
       
   177 //
       
   178 // Divide two 64 bit signed integers, returning a 64 bit result
       
   179 // and a 64 bit remainder
       
   180 //
       
   181 // On entry:
       
   182 //		[esp+4], [esp+8] = dividend
       
   183 //		[esp+12], [esp+16] = divisor
       
   184 //
       
   185 // Return (dividend / divisor) in edx:eax
       
   186 // Return (dividend % divisor) in ebx:ecx
       
   187 //
       
   188 // Remove arguments from stack
       
   189 //
       
   190 	{
       
   191 	asm("push ebp");
       
   192 	asm("push edi");
       
   193 	asm("push esi");
       
   194 	asm("mov eax, [esp+16]");
       
   195 	asm("mov edx, [esp+20]");
       
   196 	asm("mov esi, [esp+24]");
       
   197 	asm("mov edi, [esp+28]");
       
   198 	asm("test edx, edx");
       
   199 	asm("jns alldrvm_dividend_nonnegative");
       
   200 	asm("neg edx");
       
   201 	asm("neg eax");
       
   202 	asm("sbb edx, 0");
       
   203 	asm("alldrvm_dividend_nonnegative:");
       
   204 	asm("test edi, edi");
       
   205 	asm("jns alldrvm_divisor_nonnegative");
       
   206 	asm("neg edi");
       
   207 	asm("neg esi");
       
   208 	asm("sbb edi, 0");
       
   209 	asm("alldrvm_divisor_nonnegative:");
       
   210 	asm("call %a0": : "i"(&UDiv64));
       
   211 	asm("mov ebp, [esp+20]");
       
   212 	asm("mov ecx, edx");
       
   213 	asm("xor ebp, [esp+28]");
       
   214 	asm("mov edx, ebx");
       
   215 	asm("mov ebx, edi");
       
   216 	asm("jns alldrvm_quotient_nonnegative");
       
   217 	asm("neg edx");
       
   218 	asm("neg eax");
       
   219 	asm("sbb edx, 0");
       
   220 	asm("alldrvm_quotient_nonnegative:");
       
   221 	asm("cmp dword ptr [esp+20], 0");
       
   222 	asm("jns alldrvm_rem_nonnegative");
       
   223 	asm("neg ebx");
       
   224 	asm("neg ecx");
       
   225 	asm("sbb ebx, 0");
       
   226 	asm("alldrvm_rem_nonnegative:");
       
   227 	asm("pop esi");
       
   228 	asm("pop edi");
       
   229 	asm("pop ebp");
       
   230 	asm("ret");
       
   231 	}
       
   232 
       
   233 //__NAKED__ void _aulldiv()
       
   234 __NAKED__ void __udivdi3 ()
       
   235 //
       
   236 // Divide two 64 bit unsigned integers returning a 64 bit result
       
   237 // On entry:
       
   238 //		[esp+4], [esp+8] = dividend
       
   239 //		[esp+12], [esp+16] = divisor
       
   240 // Return result in edx:eax
       
   241 // Remove arguments from stack
       
   242 //
       
   243 	{
       
   244 	asm("push ebp");
       
   245 	asm("push edi");
       
   246 	asm("push esi");
       
   247 	asm("push ebx");
       
   248 	asm("mov eax, [esp+20]");
       
   249 	asm("mov edx, [esp+24]");
       
   250 	asm("mov esi, [esp+28]");
       
   251 	asm("mov edi, [esp+32]");
       
   252 	asm("call %a0": : "i"(&UDiv64));
       
   253 	asm("mov edx, ebx");
       
   254 	asm("pop ebx");
       
   255 	asm("pop esi");
       
   256 	asm("pop edi");
       
   257 	asm("pop ebp");
       
   258 	asm("ret");
       
   259 	}
       
   260 
       
   261 
       
   262 __NAKED__ void __divdi3()
       
   263 
       
   264 //
       
   265 // Divide two 64 bit signed integers returning a 64 bit result
       
   266 // On entry:
       
   267 //		[esp+4], [esp+8] = dividend
       
   268 //		[esp+12], [esp+16] = divisor
       
   269 // Return result in edx:eax
       
   270 // Remove arguments from stack
       
   271 //
       
   272 	{
       
   273 	asm("push ebp");
       
   274 	asm("push edi");
       
   275 	asm("push esi");
       
   276 	asm("push ebx");
       
   277 	asm("mov eax, [esp+20]");
       
   278 	asm("mov edx, [esp+24]");
       
   279 	asm("mov esi, [esp+28]");
       
   280 	asm("mov edi, [esp+32]");
       
   281 	asm("test edx, edx");
       
   282 	asm("jns divdi_dividend_nonnegative");
       
   283 	asm("neg edx");
       
   284 	asm("neg eax");
       
   285 	asm("sbb edx, 0");
       
   286 	asm("divdi_dividend_nonnegative:");
       
   287 	asm("test edi, edi");
       
   288 	asm("jns divdi_divisor_nonnegative");
       
   289 	asm("neg edi");
       
   290 	asm("neg esi");
       
   291 	asm("sbb edi, 0");
       
   292 	asm("divdi_divisor_nonnegative:");
       
   293 	asm("call %a0": : "i"(&UDiv64));
       
   294 	asm("mov ecx, [esp+24]");
       
   295 	asm("mov edx, ebx");
       
   296 	asm("xor ecx, [esp+32]");
       
   297 	asm("jns divdi_quotient_nonnegative");
       
   298 	asm("neg edx");
       
   299 	asm("neg eax");
       
   300 	asm("sbb edx, 0");
       
   301 	asm("divdi_quotient_nonnegative:");
       
   302 	asm("pop ebx");
       
   303 	asm("pop esi");
       
   304 	asm("pop edi");
       
   305 	asm("pop ebp");
       
   306 	asm("ret");
       
   307 	}
       
   308 
       
   309 __NAKED__ void __umoddi3()
       
   310 //
       
   311 // Divide two 64 bit unsigned integers and return 64 bit remainder
       
   312 // On entry:
       
   313 //		[esp+4], [esp+8] = dividend
       
   314 //		[esp+12], [esp+16] = divisor
       
   315 // Return result in edx:eax
       
   316 // Remove arguments from stack
       
   317 //
       
   318 	{
       
   319 	asm("push ebp");
       
   320 	asm("push edi");
       
   321 	asm("push esi");
       
   322 	asm("push ebx");
       
   323 	asm("mov eax, [esp+20]");
       
   324 	asm("mov edx, [esp+24]");
       
   325 	asm("mov esi, [esp+28]");
       
   326 	asm("mov edi, [esp+32]");
       
   327 	asm("call %a0": : "i"(&UDiv64));
       
   328 	asm("mov eax, edx");
       
   329 	asm("mov edx, edi");
       
   330 	asm("pop ebx");
       
   331 	asm("pop esi");
       
   332 	asm("pop edi");
       
   333 	asm("pop ebp");
       
   334 	asm("ret");
       
   335 	}
       
   336 
       
   337 __NAKED__ void __moddi3()
       
   338 //
       
   339 // Divide two 64 bit signed integers and return 64 bit remainder
       
   340 // On entry:
       
   341 //		[esp+4], [esp+8] = dividend
       
   342 //		[esp+12], [esp+16] = divisor
       
   343 // Return result in edx:eax
       
   344 // Remove arguments from stack
       
   345 //
       
   346 	{
       
   347 	asm("push ebp");
       
   348 	asm("push edi");
       
   349 	asm("push esi");
       
   350 	asm("push ebx");
       
   351 	asm("mov eax, [esp+20]");
       
   352 	asm("mov edx, [esp+24]");
       
   353 	asm("mov esi, [esp+28]");
       
   354 	asm("mov edi, [esp+32]");
       
   355 	asm("test edx, edx");
       
   356 	asm("jns dividend_nonnegative");
       
   357 	asm("neg edx");
       
   358 	asm("neg eax");
       
   359 	asm("sbb edx, 0");
       
   360 	asm("dividend_nonnegative:");
       
   361 	asm("test edi, edi");
       
   362 	asm("jns divisor_nonnegative");
       
   363 	asm("neg edi");
       
   364 	asm("neg esi");
       
   365 	asm("sbb edi, 0");
       
   366 	asm("divisor_nonnegative:");
       
   367 	asm("call %a0": : "i"(&UDiv64));
       
   368 	asm("mov eax, edx");
       
   369 	asm("mov edx, edi");
       
   370 	asm("cmp dword ptr [esp+24], 0");
       
   371 	asm("jns rem_nonnegative");
       
   372 	asm("neg edx");
       
   373 	asm("neg eax");
       
   374 	asm("sbb edx, 0");
       
   375 	asm("rem_nonnegative:");
       
   376 	asm("pop ebx");
       
   377 	asm("pop esi");
       
   378 	asm("pop edi");
       
   379 	asm("pop ebp");
       
   380 	asm("ret");
       
   381 	}
       
   382 
       
   383 __NAKED__ void _allshr()
       
   384 //
       
   385 // Arithmetic shift right EDX:EAX by CL
       
   386 //
       
   387 	{
       
   388 	asm("cmp cl, 64");
       
   389 	asm("jae asr_count_ge_64");
       
   390 	asm("cmp cl, 32");
       
   391 	asm("jae asr_count_ge_32");
       
   392 	asm("shrd eax, edx, cl");
       
   393 	asm("sar edx, cl");
       
   394 	asm("ret");
       
   395 	asm("asr_count_ge_32:");
       
   396 	asm("sub cl, 32");
       
   397 	asm("mov eax, edx");
       
   398 	asm("cdq");
       
   399 	asm("sar eax, cl");
       
   400 	asm("ret");
       
   401 	asm("asr_count_ge_64:");
       
   402 	asm("sar edx, 32");
       
   403 	asm("mov eax, edx");
       
   404 	asm("ret");
       
   405 	}
       
   406 
       
   407 __NAKED__ void _allshl()
       
   408 //
       
   409 // shift left EDX:EAX by CL
       
   410 //
       
   411 	{
       
   412 	asm("cmp cl, 64");
       
   413 	asm("jae lsl_count_ge_64");
       
   414 	asm("cmp cl, 32");
       
   415 	asm("jae lsl_count_ge_32");
       
   416 	asm("shld edx, eax, cl");
       
   417 	asm("shl eax, cl");
       
   418 	asm("ret");
       
   419 	asm("lsl_count_ge_32:");
       
   420 	asm("sub cl, 32");
       
   421 	asm("mov edx, eax");
       
   422 	asm("xor eax, eax");
       
   423 	asm("shl edx, cl");
       
   424 	asm("ret");
       
   425 	asm("lsl_count_ge_64:");
       
   426 	asm("xor edx, edx");
       
   427 	asm("xor eax, eax");
       
   428 	asm("ret");
       
   429 	}
       
   430 
       
   431 __NAKED__ void _aullshr()
       
   432 //
       
   433 // Logical shift right EDX:EAX by CL
       
   434 //
       
   435 	{
       
   436 	asm("cmp cl, 64");
       
   437 	asm("jae lsr_count_ge_64");
       
   438 	asm("cmp cl, 32");
       
   439 	asm("jae lsr_count_ge_32");
       
   440 	asm("shrd eax, edx, cl");
       
   441 	asm("shr edx, cl");
       
   442 	asm("ret");
       
   443 	asm("lsr_count_ge_32:");
       
   444 	asm("sub cl, 32");
       
   445 	asm("mov eax, edx");
       
   446 	asm("xor edx, edx");
       
   447 	asm("shr eax, cl");
       
   448 	asm("ret");
       
   449 	asm("lsr_count_ge_64:");
       
   450 	asm("xor edx, edx");
       
   451 	asm("xor eax, eax");
       
   452 	asm("ret");
       
   453 	}
       
   454 
       
   455 }