kernel/eka/common/win32/atomic_skeleton.h
author John Imhofe <john.imhofe@nokia.com>
Mon, 21 Dec 2009 16:14:42 +0000
changeset 2 4122176ea935
parent 0 a41df078684a
permissions -rw-r--r--
Revision: 200948 + Removing redundant base integration tests and fixing build errors Kit: 200948

// Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
// All rights reserved.
// This component and the accompanying materials are made available
// under the terms of the License "Eclipse Public License v1.0"
// which accompanies this distribution, and is available
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
//
// Initial Contributors:
// Nokia Corporation - initial contribution.
//
// Contributors:
//
// Description:
// e32\common\x86\atomic_skeleton.h
// 
//

/**
 Read an 8/16/32 bit quantity with acquire semantics
 
 @param	a	Address of data to be read - must be naturally aligned
 @return		The value read
*/
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_load_acq)(const volatile TAny* /*a*/)
	{
	_asm mov ecx, [esp+4]
	_asm mov __A_REG__, [ecx]
#ifdef __BARRIERS_NEEDED__
	_asm lock add dword ptr [esp], 0
#endif
	_asm ret
	}


/** Write an 8/16/32 bit quantity with release semantics

	@param	a	Address of data to be written - must be naturally aligned
	@param	v	The value to be written
	@return		The value written
*/
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_store_rel)(volatile TAny* /*a*/, __TUintX__ /*v*/)
	{
	_asm mov ecx, [esp+4]
	_asm mov __D_REG__, [esp+8]
	_asm mov __A_REG__, __D_REG__
	_asm __LOCK__ xchg [ecx], __D_REG__
	_asm ret
	}


/** Write an 8/16/32 bit quantity with full barrier semantics

	@param	a	Address of data to be written - must be naturally aligned
	@param	v	The value to be written
	@return		The value written
*/
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_store_ord)(volatile TAny* /*a*/, __TUintX__ /*v*/)
	{
	_asm jmp __fname__(__e32_atomic_store_rel)
	}


/** Write an 8/16/32 bit quantity to memory and return the original value of the memory.
	Relaxed ordering.

	@param	a	Address of data to be written - must be naturally aligned
	@param	v	The value to be written
	@return		The original value of *a
*/
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_swp_rlx)(volatile TAny* /*a*/, __TUintX__ /*v*/)
	{
	_asm jmp __fname__(__e32_atomic_swp_ord)
	}


/** Write an 8/16/32 bit quantity to memory and return the original value of the memory.
	Acquire semantics.

	@param	a	Address of data to be written - must be naturally aligned
	@param	v	The value to be written
	@return		The original value of *a
*/
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_swp_acq)(volatile TAny* /*a*/, __TUintX__ /*v*/)
	{
	_asm jmp __fname__(__e32_atomic_swp_ord)
	}


/** Write an 8/16/32 bit quantity to memory and return the original value of the memory.
	Release semantics.

	@param	a	Address of data to be written - must be naturally aligned
	@param	v	The value to be written
	@return		The original value of *a
*/
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_swp_rel)(volatile TAny* /*a*/, __TUintX__ /*v*/)
	{
	_asm jmp __fname__(__e32_atomic_swp_ord)
	}


/** Write an 8/16/32 bit quantity to memory and return the original value of the memory.
	Full barrier semantics.

	@param	a	Address of data to be written - must be naturally aligned
	@param	v	The value to be written
	@return		The original value of *a
*/
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_swp_ord)(volatile TAny* /*a*/, __TUintX__ /*v*/)
	{
	_asm mov ecx, [esp+4]
	_asm mov __A_REG__, [esp+8]
	_asm __LOCK__ xchg [ecx], __A_REG__
	_asm ret
	}


/** 8/16/32 bit compare and swap, relaxed ordering.

	Atomically performs the following operation:
		if (*a == *q)	{ *a = v; return TRUE; }
		else			{ *q = *a; return FALSE; }

	@param	a	Address of data to be written - must be naturally aligned
	@param	q	Address of location containing expected value
	@param	v	The new value to be written if the old value is as expected
	@return		TRUE if *a was updated, FALSE otherwise
*/
EXPORT_C __NAKED__ TBool		__fname__(__e32_atomic_cas_rlx)(volatile TAny* /*a*/, __TUintX__* /*q*/, __TUintX__ /*v*/)
	{
	_asm jmp __fname__(__e32_atomic_cas_ord)
	}


/** 8/16/32 bit compare and swap, acquire semantics.

	Atomically performs the following operation:
		if (*a == *q)	{ *a = v; return TRUE; }
		else			{ *q = *a; return FALSE; }

	@param	a	Address of data to be written - must be naturally aligned
	@param	q	Address of location containing expected value
	@param	v	The new value to be written if the old value is as expected
	@return		TRUE if *a was updated, FALSE otherwise
*/
EXPORT_C __NAKED__ TBool		__fname__(__e32_atomic_cas_acq)(volatile TAny* /*a*/, __TUintX__* /*q*/, __TUintX__ /*v*/)
	{
	_asm jmp __fname__(__e32_atomic_cas_ord)
	}


/** 8/16/32 bit compare and swap, release semantics.

	Atomically performs the following operation:
		if (*a == *q)	{ *a = v; return TRUE; }
		else			{ *q = *a; return FALSE; }

	@param	a	Address of data to be written - must be naturally aligned
	@param	q	Address of location containing expected value
	@param	v	The new value to be written if the old value is as expected
	@return		TRUE if *a was updated, FALSE otherwise
*/
EXPORT_C __NAKED__ TBool		__fname__(__e32_atomic_cas_rel)(volatile TAny* /*a*/, __TUintX__* /*q*/, __TUintX__ /*v*/)
	{
	_asm jmp __fname__(__e32_atomic_cas_ord)
	}


/** 8/16/32 bit compare and swap, full barrier semantics.

	Atomically performs the following operation:
		if (*a == *q)	{ *a = v; return TRUE; }
		else			{ *q = *a; return FALSE; }

	@param	a	Address of data to be written - must be naturally aligned
	@param	q	Address of location containing expected value
	@param	v	The new value to be written if the old value is as expected
	@return		TRUE if *a was updated, FALSE otherwise
*/
EXPORT_C __NAKED__ TBool		__fname__(__e32_atomic_cas_ord)(volatile TAny* /*a*/, __TUintX__* /*q*/, __TUintX__ /*v*/)
	{
	_asm mov ecx, [esp+4]
	_asm mov eax, [esp+8]
	_asm mov __D_REG__, [esp+12]
	_asm mov __A_REG__, [eax]
	_asm __LOCK__ cmpxchg [ecx], __D_REG__
	_asm jne short cas_fail
	_asm mov eax, 1
	_asm ret
	_asm cas_fail:
	_asm mov edx, [esp+8]
	_asm mov [edx], __A_REG__
	_asm xor eax, eax
	_asm ret
	}


/** 8/16/32 bit atomic add, relaxed ordering.

	Atomically performs the following operation:
		oldv = *a; *a = oldv + v; return oldv;

	@param	a	Address of data to be updated - must be naturally aligned
	@param	v	The value to be added
	@return		The original value of *a
*/
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_add_rlx)(volatile TAny* /*a*/, __TUintX__ /*v*/)
	{
	_asm jmp __fname__(__e32_atomic_add_ord)
	}


/** 8/16/32 bit atomic add, acquire semantics.

	Atomically performs the following operation:
		oldv = *a; *a = oldv + v; return oldv;

	@param	a	Address of data to be updated - must be naturally aligned
	@param	v	The value to be added
	@return		The original value of *a
*/
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_add_acq)(volatile TAny* /*a*/, __TUintX__ /*v*/)
	{
	_asm jmp __fname__(__e32_atomic_add_ord)
	}


/** 8/16/32 bit atomic add, release semantics.

	Atomically performs the following operation:
		oldv = *a; *a = oldv + v; return oldv;

	@param	a	Address of data to be updated - must be naturally aligned
	@param	v	The value to be added
	@return		The original value of *a
*/
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_add_rel)(volatile TAny* /*a*/, __TUintX__ /*v*/)
	{
	_asm jmp __fname__(__e32_atomic_add_ord)
	}


/** 8/16/32 bit atomic add, full barrier semantics.

	Atomically performs the following operation:
		oldv = *a; *a = oldv + v; return oldv;

	@param	a	Address of data to be updated - must be naturally aligned
	@param	v	The value to be added
	@return		The original value of *a
*/
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_add_ord)(volatile TAny* /*a*/, __TUintX__ /*v*/)
	{
	_asm mov ecx, [esp+4]
	_asm mov __A_REG__, [esp+8]
	_asm __LOCK__ xadd [ecx], __A_REG__
	_asm ret
	}


/** 8/16/32 bit atomic bitwise logical AND, relaxed ordering.

	Atomically performs the following operation:
		oldv = *a; *a = oldv & v; return oldv;

	@param	a	Address of data to be updated - must be naturally aligned
	@param	v	The value to be ANDed with *a
	@return		The original value of *a
*/
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_and_rlx)(volatile TAny* /*a*/, __TUintX__ /*v*/)
	{
	_asm jmp __fname__(__e32_atomic_and_ord)
	}


/** 8/16/32 bit atomic bitwise logical AND, acquire semantics.

	Atomically performs the following operation:
		oldv = *a; *a = oldv & v; return oldv;

	@param	a	Address of data to be updated - must be naturally aligned
	@param	v	The value to be ANDed with *a
	@return		The original value of *a
*/
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_and_acq)(volatile TAny* /*a*/, __TUintX__ /*v*/)
	{
	_asm jmp __fname__(__e32_atomic_and_ord)
	}


/** 8/16/32 bit atomic bitwise logical AND, release semantics.

	Atomically performs the following operation:
		oldv = *a; *a = oldv & v; return oldv;

	@param	a	Address of data to be updated - must be naturally aligned
	@param	v	The value to be ANDed with *a
	@return		The original value of *a
*/
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_and_rel)(volatile TAny* /*a*/, __TUintX__ /*v*/)
	{
	_asm jmp __fname__(__e32_atomic_and_ord)
	}


/** 8/16/32 bit atomic bitwise logical AND, full barrier semantics.

	Atomically performs the following operation:
		oldv = *a; *a = oldv & v; return oldv;

	@param	a	Address of data to be updated - must be naturally aligned
	@param	v	The value to be ANDed with *a
	@return		The original value of *a
*/
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_and_ord)(volatile TAny* /*a*/, __TUintX__ /*v*/)
	{
	_asm mov ecx, [esp+4]
	_asm mov __A_REG__, [ecx]
	_asm retry:
	_asm mov __D_REG__, [esp+8]
	_asm and __D_REG__, __A_REG__
	_asm __LOCK__ cmpxchg [ecx], __D_REG__
	_asm jne short retry
	_asm ret
	}


/** 8/16/32 bit atomic bitwise logical inclusive OR, relaxed ordering.

	Atomically performs the following operation:
		oldv = *a; *a = oldv | v; return oldv;

	@param	a	Address of data to be updated - must be naturally aligned
	@param	v	The value to be ORed with *a
	@return		The original value of *a
*/
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_ior_rlx)(volatile TAny* /*a*/, __TUintX__ /*v*/)
	{
	_asm jmp __fname__(__e32_atomic_ior_ord)
	}


/** 8/16/32 bit atomic bitwise logical inclusive OR, acquire semantics.

	Atomically performs the following operation:
		oldv = *a; *a = oldv | v; return oldv;

	@param	a	Address of data to be updated - must be naturally aligned
	@param	v	The value to be ORed with *a
	@return		The original value of *a
*/
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_ior_acq)(volatile TAny* /*a*/, __TUintX__ /*v*/)
	{
	_asm jmp __fname__(__e32_atomic_ior_ord)
	}


/** 8/16/32 bit atomic bitwise logical inclusive OR, release semantics.

	Atomically performs the following operation:
		oldv = *a; *a = oldv | v; return oldv;

	@param	a	Address of data to be updated - must be naturally aligned
	@param	v	The value to be ORed with *a
	@return		The original value of *a
*/
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_ior_rel)(volatile TAny* /*a*/, __TUintX__ /*v*/)
	{
	_asm jmp __fname__(__e32_atomic_ior_ord)
	}


/** 8/16/32 bit atomic bitwise logical inclusive OR, full barrier semantics.

	Atomically performs the following operation:
		oldv = *a; *a = oldv | v; return oldv;

	@param	a	Address of data to be updated - must be naturally aligned
	@param	v	The value to be ORed with *a
	@return		The original value of *a
*/
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_ior_ord)(volatile TAny* /*a*/, __TUintX__ /*v*/)
	{
	_asm mov ecx, [esp+4]
	_asm mov __A_REG__, [ecx]
	_asm retry:
	_asm mov __D_REG__, [esp+8]
	_asm or __D_REG__, __A_REG__
	_asm __LOCK__ cmpxchg [ecx], __D_REG__
	_asm jne short retry
	_asm ret
	}


/** 8/16/32 bit atomic bitwise logical exclusive OR, relaxed ordering.

	Atomically performs the following operation:
		oldv = *a; *a = oldv ^ v; return oldv;

	@param	a	Address of data to be updated - must be naturally aligned
	@param	v	The value to be XORed with *a
	@return		The original value of *a
*/
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_xor_rlx)(volatile TAny* /*a*/, __TUintX__ /*v*/)
	{
	_asm jmp __fname__(__e32_atomic_xor_ord)
	}


/** 8/16/32 bit atomic bitwise logical exclusive OR, acquire semantics.

	Atomically performs the following operation:
		oldv = *a; *a = oldv ^ v; return oldv;

	@param	a	Address of data to be updated - must be naturally aligned
	@param	v	The value to be XORed with *a
	@return		The original value of *a
*/
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_xor_acq)(volatile TAny* /*a*/, __TUintX__ /*v*/)
	{
	_asm jmp __fname__(__e32_atomic_xor_ord)
	}


/** 8/16/32 bit atomic bitwise logical exclusive OR, release semantics.

	Atomically performs the following operation:
		oldv = *a; *a = oldv ^ v; return oldv;

	@param	a	Address of data to be updated - must be naturally aligned
	@param	v	The value to be XORed with *a
	@return		The original value of *a
*/
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_xor_rel)(volatile TAny* /*a*/, __TUintX__ /*v*/)
	{
	_asm jmp __fname__(__e32_atomic_xor_ord)
	}


/** 8/16/32 bit atomic bitwise logical exclusive OR, full barrier semantics.

	Atomically performs the following operation:
		oldv = *a; *a = oldv ^ v; return oldv;

	@param	a	Address of data to be updated - must be naturally aligned
	@param	v	The value to be XORed with *a
	@return		The original value of *a
*/
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_xor_ord)(volatile TAny* /*a*/, __TUintX__ /*v*/)
	{
	_asm mov ecx, [esp+4]
	_asm mov __A_REG__, [ecx]
	_asm retry:
	_asm mov __D_REG__, [esp+8]
	_asm xor __D_REG__, __A_REG__
	_asm __LOCK__ cmpxchg [ecx], __D_REG__
	_asm jne short retry
	_asm ret
	}


/** 8/16/32 bit atomic bitwise universal function, relaxed ordering.

	Atomically performs the following operation:
		oldv = *a; *a = (oldv & u) ^ v; return oldv;

	@param	a	Address of data to be updated - must be naturally aligned
	@param	u	The value to be ANDed with *a
	@param	v	The value to be XORed with (*a&u)
	@return		The original value of *a
*/
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_axo_rlx)(volatile TAny* /*a*/, __TUintX__ /*u*/, __TUintX__ /*v*/)
	{
	_asm jmp __fname__(__e32_atomic_axo_ord)
	}


/** 8/16/32 bit atomic bitwise universal function, acquire semantics.

	Atomically performs the following operation:
		oldv = *a; *a = (oldv & u) ^ v; return oldv;

	@param	a	Address of data to be updated - must be naturally aligned
	@param	u	The value to be ANDed with *a
	@param	v	The value to be XORed with (*a&u)
	@return		The original value of *a
*/
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_axo_acq)(volatile TAny* /*a*/, __TUintX__ /*u*/, __TUintX__ /*v*/)
	{
	_asm jmp __fname__(__e32_atomic_axo_ord)
	}


/** 8/16/32 bit atomic bitwise universal function, release semantics.

	Atomically performs the following operation:
		oldv = *a; *a = (oldv & u) ^ v; return oldv;

	@param	a	Address of data to be updated - must be naturally aligned
	@param	u	The value to be ANDed with *a
	@param	v	The value to be XORed with (*a&u)
	@return		The original value of *a
*/
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_axo_rel)(volatile TAny* /*a*/, __TUintX__ /*u*/, __TUintX__ /*v*/)
	{
	_asm jmp __fname__(__e32_atomic_axo_ord)
	}


/** 8/16/32 bit atomic bitwise universal function, full barrier semantics.

	Atomically performs the following operation:
		oldv = *a; *a = (oldv & u) ^ v; return oldv;

	@param	a	Address of data to be updated - must be naturally aligned
	@param	u	The value to be ANDed with *a
	@param	v	The value to be XORed with (*a&u)
	@return		The original value of *a
*/
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_axo_ord)(volatile TAny* /*a*/, __TUintX__ /*u*/, __TUintX__ /*v*/)
	{
	_asm mov ecx, [esp+4]
	_asm mov __A_REG__, [ecx]
	_asm retry:
	_asm mov __D_REG__, [esp+8]
	_asm and __D_REG__, __A_REG__
	_asm xor __D_REG__, [esp+12]
	_asm __LOCK__ cmpxchg [ecx], __D_REG__
	_asm jne short retry
	_asm ret
	}


/** 8/16/32 bit threshold and add, unsigned, relaxed ordering.

	Atomically performs the following operation:
		oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv;

	@param	a	Address of data to be updated - must be naturally aligned
	@param	t	The threshold to compare *a to (unsigned compare)
	@param	u	The value to be added to *a if it is originally >= t
	@param	u	The value to be added to *a if it is originally < t
	@return		The original value of *a
*/
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_tau_rlx)(volatile TAny* /*a*/, __TUintX__ /*t*/, __TUintX__ /*u*/, __TUintX__ /*v*/)
	{
	_asm jmp __fname__(__e32_atomic_tau_ord)
	}


/** 8/16/32 bit threshold and add, unsigned, acquire semantics.

	Atomically performs the following operation:
		oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv;

	@param	a	Address of data to be updated - must be naturally aligned
	@param	t	The threshold to compare *a to (unsigned compare)
	@param	u	The value to be added to *a if it is originally >= t
	@param	u	The value to be added to *a if it is originally < t
	@return		The original value of *a
*/
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_tau_acq)(volatile TAny* /*a*/, __TUintX__ /*t*/, __TUintX__ /*u*/, __TUintX__ /*v*/)
	{
	_asm jmp __fname__(__e32_atomic_tau_ord)
	}


/** 8/16/32 bit threshold and add, unsigned, release semantics.

	Atomically performs the following operation:
		oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv;

	@param	a	Address of data to be updated - must be naturally aligned
	@param	t	The threshold to compare *a to (unsigned compare)
	@param	u	The value to be added to *a if it is originally >= t
	@param	u	The value to be added to *a if it is originally < t
	@return		The original value of *a
*/
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_tau_rel)(volatile TAny* /*a*/, __TUintX__ /*t*/, __TUintX__ /*u*/, __TUintX__ /*v*/)
	{
	_asm jmp __fname__(__e32_atomic_tau_ord)
	}


/** 8/16/32 bit threshold and add, unsigned, full barrier semantics.

	Atomically performs the following operation:
		oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv;

	@param	a	Address of data to be updated - must be naturally aligned
	@param	t	The threshold to compare *a to (unsigned compare)
	@param	u	The value to be added to *a if it is originally >= t
	@param	u	The value to be added to *a if it is originally < t
	@return		The original value of *a
*/
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_tau_ord)(volatile TAny* /*a*/, __TUintX__ /*t*/, __TUintX__ /*u*/, __TUintX__ /*v*/)
	{
	_asm mov ecx, [esp+4]
	_asm mov __A_REG__, [ecx]
	_asm retry:
	_asm mov __D_REG__, [esp+12]
	_asm cmp __A_REG__, [esp+8]
	_asm jae short use_u
	_asm mov __D_REG__, [esp+16]
	_asm use_u:
	_asm add __D_REG__, __A_REG__
	_asm __LOCK__ cmpxchg [ecx], __D_REG__
	_asm jne short retry
	_asm ret
	}


/** 8/16/32 bit threshold and add, signed, relaxed ordering.

	Atomically performs the following operation:
		oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv;

	@param	a	Address of data to be updated - must be naturally aligned
	@param	t	The threshold to compare *a to (signed compare)
	@param	u	The value to be added to *a if it is originally >= t
	@param	u	The value to be added to *a if it is originally < t
	@return		The original value of *a
*/
EXPORT_C __NAKED__ __TIntX__	__fname__(__e32_atomic_tas_rlx)(volatile TAny* /*a*/, __TIntX__ /*t*/, __TIntX__ /*u*/, __TIntX__ /*v*/)
	{
	_asm jmp __fname__(__e32_atomic_tas_ord)
	}


/** 8/16/32 bit threshold and add, signed, acquire semantics.

	Atomically performs the following operation:
		oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv;

	@param	a	Address of data to be updated - must be naturally aligned
	@param	t	The threshold to compare *a to (signed compare)
	@param	u	The value to be added to *a if it is originally >= t
	@param	u	The value to be added to *a if it is originally < t
	@return		The original value of *a
*/
EXPORT_C __NAKED__ __TIntX__	__fname__(__e32_atomic_tas_acq)(volatile TAny* /*a*/, __TIntX__ /*t*/, __TIntX__ /*u*/, __TIntX__ /*v*/)
	{
	_asm jmp __fname__(__e32_atomic_tas_ord)
	}


/** 8/16/32 bit threshold and add, signed, release semantics.

	Atomically performs the following operation:
		oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv;

	@param	a	Address of data to be updated - must be naturally aligned
	@param	t	The threshold to compare *a to (signed compare)
	@param	u	The value to be added to *a if it is originally >= t
	@param	u	The value to be added to *a if it is originally < t
	@return		The original value of *a
*/
EXPORT_C __NAKED__ __TIntX__	__fname__(__e32_atomic_tas_rel)(volatile TAny* /*a*/, __TIntX__ /*t*/, __TIntX__ /*u*/, __TIntX__ /*v*/)
	{
	_asm jmp __fname__(__e32_atomic_tas_ord)
	}


/** 8/16/32 bit threshold and add, signed, full barrier semantics.

	Atomically performs the following operation:
		oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv;

	@param	a	Address of data to be updated - must be naturally aligned
	@param	t	The threshold to compare *a to (signed compare)
	@param	u	The value to be added to *a if it is originally >= t
	@param	u	The value to be added to *a if it is originally < t
	@return		The original value of *a
*/
EXPORT_C __NAKED__ __TIntX__	__fname__(__e32_atomic_tas_ord)(volatile TAny* /*a*/, __TIntX__ /*t*/, __TIntX__ /*u*/, __TIntX__ /*v*/)
	{
	_asm mov ecx, [esp+4]
	_asm mov __A_REG__, [ecx]
	_asm retry:
	_asm mov __D_REG__, [esp+12]
	_asm cmp __A_REG__, [esp+8]
	_asm jge short use_u
	_asm mov __D_REG__, [esp+16]
	_asm use_u:
	_asm add __D_REG__, __A_REG__
	_asm __LOCK__ cmpxchg [ecx], __D_REG__
	_asm jne short retry
	_asm ret
	}