kernel/eka/common/x86/atomic_skeleton.h
changeset 0 a41df078684a
equal deleted inserted replaced
-1:000000000000 0:a41df078684a
       
     1 // Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
       
     2 // All rights reserved.
       
     3 // This component and the accompanying materials are made available
       
     4 // under the terms of the License "Eclipse Public License v1.0"
       
     5 // which accompanies this distribution, and is available
       
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
       
     7 //
       
     8 // Initial Contributors:
       
     9 // Nokia Corporation - initial contribution.
       
    10 //
       
    11 // Contributors:
       
    12 //
       
    13 // Description:
       
    14 // e32\common\x86\atomic_skeleton.h
       
    15 // 
       
    16 //
       
    17 
       
    18 /**
       
    19  Read an 8/16/32 bit quantity with acquire semantics
       
    20  
       
    21  @param	a	Address of data to be read - must be naturally aligned
       
    22  @return		The value read
       
    23 */
       
    24 EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_load_acq)(const volatile TAny* /*a*/)
       
    25 	{
       
    26 	asm("mov ecx, [esp+4] ");
       
    27 	asm("mov " __A_REG__ ", [ecx] ");
       
    28 #ifdef __BARRIERS_NEEDED__
       
    29 	asm("lock add dword ptr [esp], 0 ");
       
    30 #endif
       
    31 	asm("ret ");
       
    32 	}
       
    33 
       
    34 
       
    35 /** Write an 8/16/32 bit quantity with release semantics
       
    36 
       
    37 	@param	a	Address of data to be written - must be naturally aligned
       
    38 	@param	v	The value to be written
       
    39 	@return		The value written
       
    40 */
       
    41 EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_store_rel)(volatile TAny* /*a*/, __TUintX__ /*v*/)
       
    42 	{
       
    43 	asm("mov ecx, [esp+4] ");
       
    44 	asm("mov " __D_REG__ ", [esp+8] ");
       
    45 	asm("mov " __A_REG__ ", " __D_REG__ );
       
    46 	asm(__LOCK__ "xchg [ecx], " __D_REG__ );
       
    47 	asm("ret ");
       
    48 	}
       
    49 
       
    50 
       
    51 /** Write an 8/16/32 bit quantity with full barrier semantics
       
    52 
       
    53 	@param	a	Address of data to be written - must be naturally aligned
       
    54 	@param	v	The value to be written
       
    55 	@return		The value written
       
    56 */
       
    57 EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_store_ord)(volatile TAny* /*a*/, __TUintX__ /*v*/)
       
    58 	{
       
    59 	__redir__(__e32_atomic_store_rel);
       
    60 	}
       
    61 
       
    62 
       
    63 /** Write an 8/16/32 bit quantity to memory and return the original value of the memory.
       
    64 	Relaxed ordering.
       
    65 
       
    66 	@param	a	Address of data to be written - must be naturally aligned
       
    67 	@param	v	The value to be written
       
    68 	@return		The original value of *a
       
    69 */
       
    70 EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_swp_rlx)(volatile TAny* /*a*/, __TUintX__ /*v*/)
       
    71 	{
       
    72 	__redir__(__e32_atomic_swp_ord);
       
    73 	}
       
    74 
       
    75 
       
    76 /** Write an 8/16/32 bit quantity to memory and return the original value of the memory.
       
    77 	Acquire semantics.
       
    78 
       
    79 	@param	a	Address of data to be written - must be naturally aligned
       
    80 	@param	v	The value to be written
       
    81 	@return		The original value of *a
       
    82 */
       
    83 EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_swp_acq)(volatile TAny* /*a*/, __TUintX__ /*v*/)
       
    84 	{
       
    85 	__redir__(__e32_atomic_swp_ord);
       
    86 	}
       
    87 
       
    88 
       
    89 /** Write an 8/16/32 bit quantity to memory and return the original value of the memory.
       
    90 	Release semantics.
       
    91 
       
    92 	@param	a	Address of data to be written - must be naturally aligned
       
    93 	@param	v	The value to be written
       
    94 	@return		The original value of *a
       
    95 */
       
    96 EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_swp_rel)(volatile TAny* /*a*/, __TUintX__ /*v*/)
       
    97 	{
       
    98 	__redir__(__e32_atomic_swp_ord);
       
    99 	}
       
   100 
       
   101 
       
   102 /** Write an 8/16/32 bit quantity to memory and return the original value of the memory.
       
   103 	Full barrier semantics.
       
   104 
       
   105 	@param	a	Address of data to be written - must be naturally aligned
       
   106 	@param	v	The value to be written
       
   107 	@return		The original value of *a
       
   108 */
       
   109 EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_swp_ord)(volatile TAny* /*a*/, __TUintX__ /*v*/)
       
   110 	{
       
   111 	asm("mov ecx, [esp+4] ");
       
   112 	asm("mov " __A_REG__ ", [esp+8] ");
       
   113 	asm(__LOCK__ "xchg [ecx], " __A_REG__ );
       
   114 	asm("ret ");
       
   115 	}
       
   116 
       
   117 
       
   118 /** 8/16/32 bit compare and swap, relaxed ordering.
       
   119 
       
   120 	Atomically performs the following operation:
       
   121 		if (*a == *q)	{ *a = v; return TRUE; }
       
   122 		else			{ *q = *a; return FALSE; }
       
   123 
       
   124 	@param	a	Address of data to be written - must be naturally aligned
       
   125 	@param	q	Address of location containing expected value
       
   126 	@param	v	The new value to be written if the old value is as expected
       
   127 	@return		TRUE if *a was updated, FALSE otherwise
       
   128 */
       
   129 EXPORT_C __NAKED__ TBool		__fname__(__e32_atomic_cas_rlx)(volatile TAny* /*a*/, __TUintX__* /*q*/, __TUintX__ /*v*/)
       
   130 	{
       
   131 	__redir__(__e32_atomic_cas_ord);
       
   132 	}
       
   133 
       
   134 
       
   135 /** 8/16/32 bit compare and swap, acquire semantics.
       
   136 
       
   137 	Atomically performs the following operation:
       
   138 		if (*a == *q)	{ *a = v; return TRUE; }
       
   139 		else			{ *q = *a; return FALSE; }
       
   140 
       
   141 	@param	a	Address of data to be written - must be naturally aligned
       
   142 	@param	q	Address of location containing expected value
       
   143 	@param	v	The new value to be written if the old value is as expected
       
   144 	@return		TRUE if *a was updated, FALSE otherwise
       
   145 */
       
   146 EXPORT_C __NAKED__ TBool		__fname__(__e32_atomic_cas_acq)(volatile TAny* /*a*/, __TUintX__* /*q*/, __TUintX__ /*v*/)
       
   147 	{
       
   148 	__redir__(__e32_atomic_cas_ord);
       
   149 	}
       
   150 
       
   151 
       
   152 /** 8/16/32 bit compare and swap, release semantics.
       
   153 
       
   154 	Atomically performs the following operation:
       
   155 		if (*a == *q)	{ *a = v; return TRUE; }
       
   156 		else			{ *q = *a; return FALSE; }
       
   157 
       
   158 	@param	a	Address of data to be written - must be naturally aligned
       
   159 	@param	q	Address of location containing expected value
       
   160 	@param	v	The new value to be written if the old value is as expected
       
   161 	@return		TRUE if *a was updated, FALSE otherwise
       
   162 */
       
   163 EXPORT_C __NAKED__ TBool		__fname__(__e32_atomic_cas_rel)(volatile TAny* /*a*/, __TUintX__* /*q*/, __TUintX__ /*v*/)
       
   164 	{
       
   165 	__redir__(__e32_atomic_cas_ord);
       
   166 	}
       
   167 
       
   168 
       
   169 /** 8/16/32 bit compare and swap, full barrier semantics.
       
   170 
       
   171 	Atomically performs the following operation:
       
   172 		if (*a == *q)	{ *a = v; return TRUE; }
       
   173 		else			{ *q = *a; return FALSE; }
       
   174 
       
   175 	@param	a	Address of data to be written - must be naturally aligned
       
   176 	@param	q	Address of location containing expected value
       
   177 	@param	v	The new value to be written if the old value is as expected
       
   178 	@return		TRUE if *a was updated, FALSE otherwise
       
   179 */
       
   180 EXPORT_C __NAKED__ TBool		__fname__(__e32_atomic_cas_ord)(volatile TAny* /*a*/, __TUintX__* /*q*/, __TUintX__ /*v*/)
       
   181 	{
       
   182 	asm("mov ecx, [esp+4] ");
       
   183 	asm("mov eax, [esp+8] ");
       
   184 	asm("mov " __D_REG__ ", [esp+12] ");
       
   185 	asm("mov " __A_REG__ ", [eax] ");
       
   186 	asm(__LOCK__ "cmpxchg [ecx], " __D_REG__ );
       
   187 	asm("jne short 2f ");
       
   188 	asm("mov eax, 1 ");
       
   189 	asm("ret ");
       
   190 	asm("2: ");
       
   191 	asm("mov edx, [esp+8] ");
       
   192 	asm("mov [edx], " __A_REG__ );
       
   193 	asm("xor eax, eax ");
       
   194 	asm("ret ");
       
   195 	}
       
   196 
       
   197 
       
   198 /** 8/16/32 bit atomic add, relaxed ordering.
       
   199 
       
   200 	Atomically performs the following operation:
       
   201 		oldv = *a; *a = oldv + v; return oldv;
       
   202 
       
   203 	@param	a	Address of data to be updated - must be naturally aligned
       
   204 	@param	v	The value to be added
       
   205 	@return		The original value of *a
       
   206 */
       
   207 EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_add_rlx)(volatile TAny* /*a*/, __TUintX__ /*v*/)
       
   208 	{
       
   209 	__redir__(__e32_atomic_add_ord);
       
   210 	}
       
   211 
       
   212 
       
   213 /** 8/16/32 bit atomic add, acquire semantics.
       
   214 
       
   215 	Atomically performs the following operation:
       
   216 		oldv = *a; *a = oldv + v; return oldv;
       
   217 
       
   218 	@param	a	Address of data to be updated - must be naturally aligned
       
   219 	@param	v	The value to be added
       
   220 	@return		The original value of *a
       
   221 */
       
   222 EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_add_acq)(volatile TAny* /*a*/, __TUintX__ /*v*/)
       
   223 	{
       
   224 	__redir__(__e32_atomic_add_ord);
       
   225 	}
       
   226 
       
   227 
       
   228 /** 8/16/32 bit atomic add, release semantics.
       
   229 
       
   230 	Atomically performs the following operation:
       
   231 		oldv = *a; *a = oldv + v; return oldv;
       
   232 
       
   233 	@param	a	Address of data to be updated - must be naturally aligned
       
   234 	@param	v	The value to be added
       
   235 	@return		The original value of *a
       
   236 */
       
   237 EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_add_rel)(volatile TAny* /*a*/, __TUintX__ /*v*/)
       
   238 	{
       
   239 	__redir__(__e32_atomic_add_ord);
       
   240 	}
       
   241 
       
   242 
       
   243 /** 8/16/32 bit atomic add, full barrier semantics.
       
   244 
       
   245 	Atomically performs the following operation:
       
   246 		oldv = *a; *a = oldv + v; return oldv;
       
   247 
       
   248 	@param	a	Address of data to be updated - must be naturally aligned
       
   249 	@param	v	The value to be added
       
   250 	@return		The original value of *a
       
   251 */
       
   252 EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_add_ord)(volatile TAny* /*a*/, __TUintX__ /*v*/)
       
   253 	{
       
   254 	asm("mov ecx, [esp+4] ");
       
   255 	asm("mov " __A_REG__ ", [esp+8] ");
       
   256 	asm(__LOCK__ "xadd [ecx], " __A_REG__ );
       
   257 	asm("ret ");
       
   258 	}
       
   259 
       
   260 
       
   261 /** 8/16/32 bit atomic bitwise logical AND, relaxed ordering.
       
   262 
       
   263 	Atomically performs the following operation:
       
   264 		oldv = *a; *a = oldv & v; return oldv;
       
   265 
       
   266 	@param	a	Address of data to be updated - must be naturally aligned
       
   267 	@param	v	The value to be ANDed with *a
       
   268 	@return		The original value of *a
       
   269 */
       
   270 EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_and_rlx)(volatile TAny* /*a*/, __TUintX__ /*v*/)
       
   271 	{
       
   272 	__redir__(__e32_atomic_and_ord);
       
   273 	}
       
   274 
       
   275 
       
   276 /** 8/16/32 bit atomic bitwise logical AND, acquire semantics.
       
   277 
       
   278 	Atomically performs the following operation:
       
   279 		oldv = *a; *a = oldv & v; return oldv;
       
   280 
       
   281 	@param	a	Address of data to be updated - must be naturally aligned
       
   282 	@param	v	The value to be ANDed with *a
       
   283 	@return		The original value of *a
       
   284 */
       
   285 EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_and_acq)(volatile TAny* /*a*/, __TUintX__ /*v*/)
       
   286 	{
       
   287 	__redir__(__e32_atomic_and_ord);
       
   288 	}
       
   289 
       
   290 
       
   291 /** 8/16/32 bit atomic bitwise logical AND, release semantics.
       
   292 
       
   293 	Atomically performs the following operation:
       
   294 		oldv = *a; *a = oldv & v; return oldv;
       
   295 
       
   296 	@param	a	Address of data to be updated - must be naturally aligned
       
   297 	@param	v	The value to be ANDed with *a
       
   298 	@return		The original value of *a
       
   299 */
       
   300 EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_and_rel)(volatile TAny* /*a*/, __TUintX__ /*v*/)
       
   301 	{
       
   302 	__redir__(__e32_atomic_and_ord);
       
   303 	}
       
   304 
       
   305 
       
   306 /** 8/16/32 bit atomic bitwise logical AND, full barrier semantics.
       
   307 
       
   308 	Atomically performs the following operation:
       
   309 		oldv = *a; *a = oldv & v; return oldv;
       
   310 
       
   311 	@param	a	Address of data to be updated - must be naturally aligned
       
   312 	@param	v	The value to be ANDed with *a
       
   313 	@return		The original value of *a
       
   314 */
       
   315 EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_and_ord)(volatile TAny* /*a*/, __TUintX__ /*v*/)
       
   316 	{
       
   317 	asm("mov ecx, [esp+4] ");
       
   318 	asm("mov " __A_REG__ ", [ecx] ");
       
   319 	asm("1: ");
       
   320 	asm("mov " __D_REG__ ", [esp+8] ");
       
   321 	asm("and " __D_REG__ ", " __A_REG__ );
       
   322 	asm(__LOCK__ "cmpxchg [ecx], " __D_REG__ );
       
   323 	asm("jne short 1b ");
       
   324 	asm("ret ");
       
   325 	}
       
   326 
       
   327 
       
   328 /** 8/16/32 bit atomic bitwise logical inclusive OR, relaxed ordering.
       
   329 
       
   330 	Atomically performs the following operation:
       
   331 		oldv = *a; *a = oldv | v; return oldv;
       
   332 
       
   333 	@param	a	Address of data to be updated - must be naturally aligned
       
   334 	@param	v	The value to be ORed with *a
       
   335 	@return		The original value of *a
       
   336 */
       
   337 EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_ior_rlx)(volatile TAny* /*a*/, __TUintX__ /*v*/)
       
   338 	{
       
   339 	__redir__(__e32_atomic_ior_ord);
       
   340 	}
       
   341 
       
   342 
       
   343 /** 8/16/32 bit atomic bitwise logical inclusive OR, acquire semantics.
       
   344 
       
   345 	Atomically performs the following operation:
       
   346 		oldv = *a; *a = oldv | v; return oldv;
       
   347 
       
   348 	@param	a	Address of data to be updated - must be naturally aligned
       
   349 	@param	v	The value to be ORed with *a
       
   350 	@return		The original value of *a
       
   351 */
       
   352 EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_ior_acq)(volatile TAny* /*a*/, __TUintX__ /*v*/)
       
   353 	{
       
   354 	__redir__(__e32_atomic_ior_ord);
       
   355 	}
       
   356 
       
   357 
       
   358 /** 8/16/32 bit atomic bitwise logical inclusive OR, release semantics.
       
   359 
       
   360 	Atomically performs the following operation:
       
   361 		oldv = *a; *a = oldv | v; return oldv;
       
   362 
       
   363 	@param	a	Address of data to be updated - must be naturally aligned
       
   364 	@param	v	The value to be ORed with *a
       
   365 	@return		The original value of *a
       
   366 */
       
   367 EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_ior_rel)(volatile TAny* /*a*/, __TUintX__ /*v*/)
       
   368 	{
       
   369 	__redir__(__e32_atomic_ior_ord);
       
   370 	}
       
   371 
       
   372 
       
   373 /** 8/16/32 bit atomic bitwise logical inclusive OR, full barrier semantics.
       
   374 
       
   375 	Atomically performs the following operation:
       
   376 		oldv = *a; *a = oldv | v; return oldv;
       
   377 
       
   378 	@param	a	Address of data to be updated - must be naturally aligned
       
   379 	@param	v	The value to be ORed with *a
       
   380 	@return		The original value of *a
       
   381 */
       
   382 EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_ior_ord)(volatile TAny* /*a*/, __TUintX__ /*v*/)
       
   383 	{
       
   384 	asm("mov ecx, [esp+4] ");
       
   385 	asm("mov " __A_REG__ ", [ecx] ");
       
   386 	asm("1: ");
       
   387 	asm("mov " __D_REG__ ", [esp+8] ");
       
   388 	asm("or " __D_REG__ ", " __A_REG__ );
       
   389 	asm(__LOCK__ "cmpxchg [ecx], " __D_REG__ );
       
   390 	asm("jne short 1b ");
       
   391 	asm("ret ");
       
   392 	}
       
   393 
       
   394 
       
   395 /** 8/16/32 bit atomic bitwise logical exclusive OR, relaxed ordering.
       
   396 
       
   397 	Atomically performs the following operation:
       
   398 		oldv = *a; *a = oldv ^ v; return oldv;
       
   399 
       
   400 	@param	a	Address of data to be updated - must be naturally aligned
       
   401 	@param	v	The value to be XORed with *a
       
   402 	@return		The original value of *a
       
   403 */
       
   404 EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_xor_rlx)(volatile TAny* /*a*/, __TUintX__ /*v*/)
       
   405 	{
       
   406 	__redir__(__e32_atomic_xor_ord);
       
   407 	}
       
   408 
       
   409 
       
   410 /** 8/16/32 bit atomic bitwise logical exclusive OR, acquire semantics.
       
   411 
       
   412 	Atomically performs the following operation:
       
   413 		oldv = *a; *a = oldv ^ v; return oldv;
       
   414 
       
   415 	@param	a	Address of data to be updated - must be naturally aligned
       
   416 	@param	v	The value to be XORed with *a
       
   417 	@return		The original value of *a
       
   418 */
       
   419 EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_xor_acq)(volatile TAny* /*a*/, __TUintX__ /*v*/)
       
   420 	{
       
   421 	__redir__(__e32_atomic_xor_ord);
       
   422 	}
       
   423 
       
   424 
       
   425 /** 8/16/32 bit atomic bitwise logical exclusive OR, release semantics.
       
   426 
       
   427 	Atomically performs the following operation:
       
   428 		oldv = *a; *a = oldv ^ v; return oldv;
       
   429 
       
   430 	@param	a	Address of data to be updated - must be naturally aligned
       
   431 	@param	v	The value to be XORed with *a
       
   432 	@return		The original value of *a
       
   433 */
       
   434 EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_xor_rel)(volatile TAny* /*a*/, __TUintX__ /*v*/)
       
   435 	{
       
   436 	__redir__(__e32_atomic_xor_ord);
       
   437 	}
       
   438 
       
   439 
       
   440 /** 8/16/32 bit atomic bitwise logical exclusive OR, full barrier semantics.
       
   441 
       
   442 	Atomically performs the following operation:
       
   443 		oldv = *a; *a = oldv ^ v; return oldv;
       
   444 
       
   445 	@param	a	Address of data to be updated - must be naturally aligned
       
   446 	@param	v	The value to be XORed with *a
       
   447 	@return		The original value of *a
       
   448 */
       
   449 EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_xor_ord)(volatile TAny* /*a*/, __TUintX__ /*v*/)
       
   450 	{
       
   451 	asm("mov ecx, [esp+4] ");
       
   452 	asm("mov " __A_REG__ ", [ecx] ");
       
   453 	asm("1: ");
       
   454 	asm("mov " __D_REG__ ", [esp+8] ");
       
   455 	asm("xor " __D_REG__ ", " __A_REG__ );
       
   456 	asm(__LOCK__ "cmpxchg [ecx], " __D_REG__ );
       
   457 	asm("jne short 1b ");
       
   458 	asm("ret ");
       
   459 	}
       
   460 
       
   461 
       
   462 /** 8/16/32 bit atomic bitwise universal function, relaxed ordering.
       
   463 
       
   464 	Atomically performs the following operation:
       
   465 		oldv = *a; *a = (oldv & u) ^ v; return oldv;
       
   466 
       
   467 	@param	a	Address of data to be updated - must be naturally aligned
       
   468 	@param	u	The value to be ANDed with *a
       
   469 	@param	v	The value to be XORed with (*a&u)
       
   470 	@return		The original value of *a
       
   471 */
       
   472 EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_axo_rlx)(volatile TAny* /*a*/, __TUintX__ /*u*/, __TUintX__ /*v*/)
       
   473 	{
       
   474 	__redir__(__e32_atomic_axo_ord);
       
   475 	}
       
   476 
       
   477 
       
   478 /** 8/16/32 bit atomic bitwise universal function, acquire semantics.
       
   479 
       
   480 	Atomically performs the following operation:
       
   481 		oldv = *a; *a = (oldv & u) ^ v; return oldv;
       
   482 
       
   483 	@param	a	Address of data to be updated - must be naturally aligned
       
   484 	@param	u	The value to be ANDed with *a
       
   485 	@param	v	The value to be XORed with (*a&u)
       
   486 	@return		The original value of *a
       
   487 */
       
   488 EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_axo_acq)(volatile TAny* /*a*/, __TUintX__ /*u*/, __TUintX__ /*v*/)
       
   489 	{
       
   490 	__redir__(__e32_atomic_axo_ord);
       
   491 	}
       
   492 
       
   493 
       
   494 /** 8/16/32 bit atomic bitwise universal function, release semantics.
       
   495 
       
   496 	Atomically performs the following operation:
       
   497 		oldv = *a; *a = (oldv & u) ^ v; return oldv;
       
   498 
       
   499 	@param	a	Address of data to be updated - must be naturally aligned
       
   500 	@param	u	The value to be ANDed with *a
       
   501 	@param	v	The value to be XORed with (*a&u)
       
   502 	@return		The original value of *a
       
   503 */
       
   504 EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_axo_rel)(volatile TAny* /*a*/, __TUintX__ /*u*/, __TUintX__ /*v*/)
       
   505 	{
       
   506 	__redir__(__e32_atomic_axo_ord);
       
   507 	}
       
   508 
       
   509 
       
   510 /** 8/16/32 bit atomic bitwise universal function, full barrier semantics.
       
   511 
       
   512 	Atomically performs the following operation:
       
   513 		oldv = *a; *a = (oldv & u) ^ v; return oldv;
       
   514 
       
   515 	@param	a	Address of data to be updated - must be naturally aligned
       
   516 	@param	u	The value to be ANDed with *a
       
   517 	@param	v	The value to be XORed with (*a&u)
       
   518 	@return		The original value of *a
       
   519 */
       
   520 EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_axo_ord)(volatile TAny* /*a*/, __TUintX__ /*u*/, __TUintX__ /*v*/)
       
   521 	{
       
   522 	asm("mov ecx, [esp+4] ");
       
   523 	asm("mov " __A_REG__ ", [ecx] ");
       
   524 	asm("1: ");
       
   525 	asm("mov " __D_REG__ ", [esp+8] ");
       
   526 	asm("and " __D_REG__ ", " __A_REG__ );
       
   527 	asm("xor " __D_REG__ ", [esp+12] ");
       
   528 	asm(__LOCK__ "cmpxchg [ecx], " __D_REG__ );
       
   529 	asm("jne short 1b ");
       
   530 	asm("ret ");
       
   531 	}
       
   532 
       
   533 
       
   534 /** 8/16/32 bit threshold and add, unsigned, relaxed ordering.
       
   535 
       
   536 	Atomically performs the following operation:
       
   537 		oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv;
       
   538 
       
   539 	@param	a	Address of data to be updated - must be naturally aligned
       
   540 	@param	t	The threshold to compare *a to (unsigned compare)
       
   541 	@param	u	The value to be added to *a if it is originally >= t
       
   542 	@param	u	The value to be added to *a if it is originally < t
       
   543 	@return		The original value of *a
       
   544 */
       
   545 EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_tau_rlx)(volatile TAny* /*a*/, __TUintX__ /*t*/, __TUintX__ /*u*/, __TUintX__ /*v*/)
       
   546 	{
       
   547 	__redir__(__e32_atomic_tau_ord);
       
   548 	}
       
   549 
       
   550 
       
   551 /** 8/16/32 bit threshold and add, unsigned, acquire semantics.
       
   552 
       
   553 	Atomically performs the following operation:
       
   554 		oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv;
       
   555 
       
   556 	@param	a	Address of data to be updated - must be naturally aligned
       
   557 	@param	t	The threshold to compare *a to (unsigned compare)
       
   558 	@param	u	The value to be added to *a if it is originally >= t
       
   559 	@param	u	The value to be added to *a if it is originally < t
       
   560 	@return		The original value of *a
       
   561 */
       
   562 EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_tau_acq)(volatile TAny* /*a*/, __TUintX__ /*t*/, __TUintX__ /*u*/, __TUintX__ /*v*/)
       
   563 	{
       
   564 	__redir__(__e32_atomic_tau_ord);
       
   565 	}
       
   566 
       
   567 
       
   568 /** 8/16/32 bit threshold and add, unsigned, release semantics.
       
   569 
       
   570 	Atomically performs the following operation:
       
   571 		oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv;
       
   572 
       
   573 	@param	a	Address of data to be updated - must be naturally aligned
       
   574 	@param	t	The threshold to compare *a to (unsigned compare)
       
   575 	@param	u	The value to be added to *a if it is originally >= t
       
   576 	@param	u	The value to be added to *a if it is originally < t
       
   577 	@return		The original value of *a
       
   578 */
       
   579 EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_tau_rel)(volatile TAny* /*a*/, __TUintX__ /*t*/, __TUintX__ /*u*/, __TUintX__ /*v*/)
       
   580 	{
       
   581 	__redir__(__e32_atomic_tau_ord);
       
   582 	}
       
   583 
       
   584 
       
   585 /** 8/16/32 bit threshold and add, unsigned, full barrier semantics.
       
   586 
       
   587 	Atomically performs the following operation:
       
   588 		oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv;
       
   589 
       
   590 	@param	a	Address of data to be updated - must be naturally aligned
       
   591 	@param	t	The threshold to compare *a to (unsigned compare)
       
   592 	@param	u	The value to be added to *a if it is originally >= t
       
   593 	@param	u	The value to be added to *a if it is originally < t
       
   594 	@return		The original value of *a
       
   595 */
       
   596 EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_tau_ord)(volatile TAny* /*a*/, __TUintX__ /*t*/, __TUintX__ /*u*/, __TUintX__ /*v*/)
       
   597 	{
       
   598 	asm("mov ecx, [esp+4] ");
       
   599 	asm("mov " __A_REG__ ", [ecx] ");
       
   600 	asm("1: ");
       
   601 	asm("mov " __D_REG__ ", [esp+12] ");
       
   602 	asm("cmp " __A_REG__ ", [esp+8] ");
       
   603 	asm("jae short 2f ");
       
   604 	asm("mov " __D_REG__ ", [esp+16] ");
       
   605 	asm("2: ");
       
   606 	asm("add " __D_REG__ ", " __A_REG__ );
       
   607 	asm(__LOCK__ "cmpxchg [ecx], " __D_REG__ );
       
   608 	asm("jne short 1b ");
       
   609 	asm("ret ");
       
   610 	}
       
   611 
       
   612 
       
   613 /** 8/16/32 bit threshold and add, signed, relaxed ordering.
       
   614 
       
   615 	Atomically performs the following operation:
       
   616 		oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv;
       
   617 
       
   618 	@param	a	Address of data to be updated - must be naturally aligned
       
   619 	@param	t	The threshold to compare *a to (signed compare)
       
   620 	@param	u	The value to be added to *a if it is originally >= t
       
   621 	@param	u	The value to be added to *a if it is originally < t
       
   622 	@return		The original value of *a
       
   623 */
       
   624 EXPORT_C __NAKED__ __TIntX__	__fname__(__e32_atomic_tas_rlx)(volatile TAny* /*a*/, __TIntX__ /*t*/, __TIntX__ /*u*/, __TIntX__ /*v*/)
       
   625 	{
       
   626 	__redir__(__e32_atomic_tas_ord);
       
   627 	}
       
   628 
       
   629 
       
   630 /** 8/16/32 bit threshold and add, signed, acquire semantics.
       
   631 
       
   632 	Atomically performs the following operation:
       
   633 		oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv;
       
   634 
       
   635 	@param	a	Address of data to be updated - must be naturally aligned
       
   636 	@param	t	The threshold to compare *a to (signed compare)
       
   637 	@param	u	The value to be added to *a if it is originally >= t
       
   638 	@param	u	The value to be added to *a if it is originally < t
       
   639 	@return		The original value of *a
       
   640 */
       
   641 EXPORT_C __NAKED__ __TIntX__	__fname__(__e32_atomic_tas_acq)(volatile TAny* /*a*/, __TIntX__ /*t*/, __TIntX__ /*u*/, __TIntX__ /*v*/)
       
   642 	{
       
   643 	__redir__(__e32_atomic_tas_ord);
       
   644 	}
       
   645 
       
   646 
       
   647 /** 8/16/32 bit threshold and add, signed, release semantics.
       
   648 
       
   649 	Atomically performs the following operation:
       
   650 		oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv;
       
   651 
       
   652 	@param	a	Address of data to be updated - must be naturally aligned
       
   653 	@param	t	The threshold to compare *a to (signed compare)
       
   654 	@param	u	The value to be added to *a if it is originally >= t
       
   655 	@param	u	The value to be added to *a if it is originally < t
       
   656 	@return		The original value of *a
       
   657 */
       
   658 EXPORT_C __NAKED__ __TIntX__	__fname__(__e32_atomic_tas_rel)(volatile TAny* /*a*/, __TIntX__ /*t*/, __TIntX__ /*u*/, __TIntX__ /*v*/)
       
   659 	{
       
   660 	__redir__(__e32_atomic_tas_ord);
       
   661 	}
       
   662 
       
   663 
       
   664 /** 8/16/32 bit threshold and add, signed, full barrier semantics.
       
   665 
       
   666 	Atomically performs the following operation:
       
   667 		oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv;
       
   668 
       
   669 	@param	a	Address of data to be updated - must be naturally aligned
       
   670 	@param	t	The threshold to compare *a to (signed compare)
       
   671 	@param	u	The value to be added to *a if it is originally >= t
       
   672 	@param	u	The value to be added to *a if it is originally < t
       
   673 	@return		The original value of *a
       
   674 */
       
   675 EXPORT_C __NAKED__ __TIntX__	__fname__(__e32_atomic_tas_ord)(volatile TAny* /*a*/, __TIntX__ /*t*/, __TIntX__ /*u*/, __TIntX__ /*v*/)
       
   676 	{
       
   677 	asm("mov ecx, [esp+4] ");
       
   678 	asm("mov " __A_REG__ ", [ecx] ");
       
   679 	asm("1: ");
       
   680 	asm("mov " __D_REG__ ", [esp+12] ");
       
   681 	asm("cmp " __A_REG__ ", [esp+8] ");
       
   682 	asm("jge short 2f ");
       
   683 	asm("mov " __D_REG__ ", [esp+16] ");
       
   684 	asm("2: ");
       
   685 	asm("add " __D_REG__ ", " __A_REG__ );
       
   686 	asm(__LOCK__ "cmpxchg [ecx], " __D_REG__ );
       
   687 	asm("jne short 1b ");
       
   688 	asm("ret ");
       
   689 	}
       
   690 
       
   691