author | Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com> |
Tue, 31 Aug 2010 16:34:26 +0300 | |
branch | RCL_3 |
changeset 43 | c1f20ce4abcf |
parent 0 | a41df078684a |
child 44 | 3e88ff8f41d5 |
permissions | -rw-r--r-- |
0 | 1 |
/* |
2 |
* Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
3 |
* All rights reserved. |
|
4 |
* This component and the accompanying materials are made available |
|
5 |
* under the terms of the License "Eclipse Public License v1.0" |
|
6 |
* which accompanies this distribution, and is available |
|
7 |
* at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
8 |
* |
|
9 |
* Initial Contributors: |
|
10 |
* Nokia Corporation - initial contribution. |
|
11 |
* |
|
12 |
* Contributors: |
|
13 |
* |
|
14 |
* Description: |
|
15 |
* e32/include/e32atomics.h |
|
16 |
* |
|
17 |
* |
|
18 |
*/ |
|
19 |
||
20 |
||
21 |
||
22 |
#ifndef __E32ATOMICS_H__ |
|
23 |
#define __E32ATOMICS_H__ |
|
24 |
#include <e32def.h> |
|
25 |
||
26 |
/** @file e32atomics.h |
|
27 |
@publishedAll |
|
28 |
@prototype |
|
43
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
29 |
|
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
30 |
General purpose atomic operations and utility functions |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
31 |
All functions in this header are available on both user and kernel side. |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
32 |
|
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
33 |
Atomic operations: |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
34 |
__e32_atomic_xxx_yyy8() should be used for 8 bit atomic variables |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
35 |
__e32_atomic_xxx_yyy16() should be used for 16 bit atomic variables |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
36 |
__e32_atomic_xxx_yyy32() should be used for 32 bit atomic variables |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
37 |
__e32_atomic_xxx_yyy64() should be used for 64 bit atomic variables |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
38 |
__e32_atomic_xxx_yyy_ptr() should be used for atomic updates to pointers |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
39 |
|
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
40 |
xxx specifies the operation performed |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
41 |
load read memory atomically |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
42 |
store write memory atomically |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
43 |
swp write to a memory location and return the original value of the |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
44 |
memory location |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
45 |
add add a value to a memory location and return the original value |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
46 |
of the memory location |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
47 |
and bitwise AND a value with a memory location and return the |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
48 |
original value of the memory location |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
49 |
ior bitwise OR a value with a memory location and return the |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
50 |
original value of the memory location |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
51 |
xor bitwise XOR a value with a memory location and return the |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
52 |
original value of the memory location |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
53 |
axo atomic { orig_v = *p; *p = (orig_v & u) ^ v; } return orig_v; |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
54 |
cas if the value of a memory location matches a specified expected |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
55 |
value, write a specified new value and return TRUE, otherwise |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
56 |
update the expected value with the actual value seen and return |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
57 |
FALSE. |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
58 |
tau if the value of a memory location is >= a specified threshold, |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
59 |
considered as an unsigned integer, add a specified value to it |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
60 |
otherwise add a different specified value to it; return the |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
61 |
original value of the memory location |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
62 |
tas if the value of a memory location is >= a specified threshold, |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
63 |
considered as a signed integer, add a specified value to it |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
64 |
otherwise add a different specified value to it; return the |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
65 |
original value of the memory location |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
66 |
|
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
67 |
yyy specifies the memory ordering: |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
68 |
rlx = relaxed memory ordering |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
69 |
there is no guarantee on the order in which the atomic operation |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
70 |
is observed relative to preceding or following memory accesses |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
71 |
acq = acquire semantics |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
72 |
the atomic operation is guaranteed to be observed before any |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
73 |
following memory accesses |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
74 |
rel = release semantics |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
75 |
the atomic operation is guaranteed to be observed after any |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
76 |
preceding memory accesses |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
77 |
ord = fully ordered |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
78 |
the atomic operation is guaranteed to be observed after any |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
79 |
preceding memory accesses and before any following memory |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
80 |
accesses |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
81 |
|
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
82 |
Note that these operations should only be used on normal memory regions |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
83 |
since they are implemented in terms of LDREX/STREX and so multiple reads |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
84 |
can occur before the operation completes. Also __e32_atomic_load_yyy64() |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
85 |
can't be used on read-only memory regions since it uses LDREXD/STREXD to |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
86 |
guarantee atomicity. |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
87 |
Atomic operations may only be used on naturally aligned memory (i.e. *16() |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
88 |
operations on an even address, *32() operations on an address which is a |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
89 |
multiple of 4 and *64() operations on an address which is a multiple of 8). |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
90 |
This applies even if you have (unwisely) decided to turn off alignment |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
91 |
checking. |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
92 |
|
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
93 |
Barrier operations: |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
94 |
Two barrier functions are provided: |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
95 |
__e32_memory_barrier() - this ensures all preceding explicit memory accesses |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
96 |
are observed before any following explicit memory accesses. |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
97 |
Equates to the ARM DMB instruction. |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
98 |
__e32_io_completion_barrier() - this ensures all preceding explicit memory |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
99 |
accesses complete before any following instructions execute. |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
100 |
For example, it ensures that writes to I/O devices have actually |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
101 |
occurred before execution continues. |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
102 |
Equates to the ARM DSB instruction. |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
103 |
|
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
104 |
Utility functions: |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
105 |
__e32_find_ms1_32 Return bit position of most significant 1 in a 32 bit |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
106 |
argument, or -1 if the argument is zero. |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
107 |
__e32_find_ls1_32 Return bit position of least significant 1 in a 32 bit |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
108 |
argument, or -1 if the argument is zero. |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
109 |
__e32_bit_count_32 Return the count of bits set to 1 in a 32 bit argument. |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
110 |
__e32_find_ms1_64 Return bit position of most significant 1 in a 64 bit |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
111 |
argument, or -1 if the argument is zero. |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
112 |
__e32_find_ls1_64 Return bit position of least significant 1 in a 64 bit |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
113 |
argument, or -1 if the argument is zero. |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
114 |
__e32_bit_count_64 Return the count of bits set to 1 in a 64 bit argument. |
c1f20ce4abcf
Revision: 201035
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
115 |
|
0 | 116 |
*/ |
117 |
||
118 |
||
119 |
/* |
|
120 |
Versions needed: |
|
121 |
WINS/WINSCW Use X86 locked operations. Assume Pentium or above CPU (CMPXCHG8B available) |
|
122 |
X86 For Pentium and above use locked operations |
|
123 |
For 486 use locked operations for 8, 16, 32 bit. For 64 bit must disable interrupts. |
|
124 |
NOTE: 486 not supported at the moment |
|
125 |
ARMv4/ARMv5 Must disable interrupts. |
|
126 |
ARMv6 LDREX/STREX for 8, 16, 32 bit. For 64 bit must disable interrupts (maybe). |
|
127 |
ARMv6K/ARMv7 LDREXB/LDREXH/LDREX/LDREXD |
|
128 |
*/ |
|
129 |
||
130 |
#ifdef __cplusplus |
|
131 |
extern "C" { |
|
132 |
#endif |
|
133 |
||
134 |
IMPORT_C void __e32_memory_barrier(); /* Barrier guaranteeing ordering of memory accesses */ |
|
135 |
IMPORT_C void __e32_io_completion_barrier(); /* Barrier guaranteeing ordering and completion of memory accesses */ |
|
136 |
||
137 |
/* Atomic operations on 8 bit quantities */ |
|
138 |
IMPORT_C TUint8 __e32_atomic_load_acq8(const volatile TAny* a); /* read 8 bit acquire semantics */ |
|
139 |
IMPORT_C TUint8 __e32_atomic_store_rel8(volatile TAny* a, TUint8 v); /* write 8 bit, return v, release semantics */ |
|
140 |
IMPORT_C TUint8 __e32_atomic_store_ord8(volatile TAny* a, TUint8 v); /* write 8 bit, return v, full fence */ |
|
141 |
IMPORT_C TUint8 __e32_atomic_swp_rlx8(volatile TAny* a, TUint8 v); /* write 8 bit, return original, relaxed */ |
|
142 |
IMPORT_C TUint8 __e32_atomic_swp_acq8(volatile TAny* a, TUint8 v); /* write 8 bit, return original, acquire */ |
|
143 |
IMPORT_C TUint8 __e32_atomic_swp_rel8(volatile TAny* a, TUint8 v); /* write 8 bit, return original, release */ |
|
144 |
IMPORT_C TUint8 __e32_atomic_swp_ord8(volatile TAny* a, TUint8 v); /* write 8 bit, return original, full fence */ |
|
145 |
IMPORT_C TBool __e32_atomic_cas_rlx8(volatile TAny* a, TUint8* q, TUint8 v); /* if (*a==*q) {*a=v; return TRUE;} else {*q=*a; return FALSE;} */ |
|
146 |
IMPORT_C TBool __e32_atomic_cas_acq8(volatile TAny* a, TUint8* q, TUint8 v); |
|
147 |
IMPORT_C TBool __e32_atomic_cas_rel8(volatile TAny* a, TUint8* q, TUint8 v); |
|
148 |
IMPORT_C TBool __e32_atomic_cas_ord8(volatile TAny* a, TUint8* q, TUint8 v); |
|
149 |
IMPORT_C TUint8 __e32_atomic_add_rlx8(volatile TAny* a, TUint8 v); /* *a += v; return original *a; */ |
|
150 |
IMPORT_C TUint8 __e32_atomic_add_acq8(volatile TAny* a, TUint8 v); |
|
151 |
IMPORT_C TUint8 __e32_atomic_add_rel8(volatile TAny* a, TUint8 v); |
|
152 |
IMPORT_C TUint8 __e32_atomic_add_ord8(volatile TAny* a, TUint8 v); |
|
153 |
IMPORT_C TUint8 __e32_atomic_and_rlx8(volatile TAny* a, TUint8 v); /* *a &= v; return original *a; */ |
|
154 |
IMPORT_C TUint8 __e32_atomic_and_acq8(volatile TAny* a, TUint8 v); |
|
155 |
IMPORT_C TUint8 __e32_atomic_and_rel8(volatile TAny* a, TUint8 v); |
|
156 |
IMPORT_C TUint8 __e32_atomic_and_ord8(volatile TAny* a, TUint8 v); |
|
157 |
IMPORT_C TUint8 __e32_atomic_ior_rlx8(volatile TAny* a, TUint8 v); /* *a |= v; return original *a; */ |
|
158 |
IMPORT_C TUint8 __e32_atomic_ior_acq8(volatile TAny* a, TUint8 v); |
|
159 |
IMPORT_C TUint8 __e32_atomic_ior_rel8(volatile TAny* a, TUint8 v); |
|
160 |
IMPORT_C TUint8 __e32_atomic_ior_ord8(volatile TAny* a, TUint8 v); |
|
161 |
IMPORT_C TUint8 __e32_atomic_xor_rlx8(volatile TAny* a, TUint8 v); /* *a ^= v; return original *a; */ |
|
162 |
IMPORT_C TUint8 __e32_atomic_xor_acq8(volatile TAny* a, TUint8 v); |
|
163 |
IMPORT_C TUint8 __e32_atomic_xor_rel8(volatile TAny* a, TUint8 v); |
|
164 |
IMPORT_C TUint8 __e32_atomic_xor_ord8(volatile TAny* a, TUint8 v); |
|
165 |
IMPORT_C TUint8 __e32_atomic_axo_rlx8(volatile TAny* a, TUint8 u, TUint8 v); /* *a = (*a & u) ^ v; return original *a; */ |
|
166 |
IMPORT_C TUint8 __e32_atomic_axo_acq8(volatile TAny* a, TUint8 u, TUint8 v); |
|
167 |
IMPORT_C TUint8 __e32_atomic_axo_rel8(volatile TAny* a, TUint8 u, TUint8 v); |
|
168 |
IMPORT_C TUint8 __e32_atomic_axo_ord8(volatile TAny* a, TUint8 u, TUint8 v); |
|
169 |
IMPORT_C TUint8 __e32_atomic_tau_rlx8(volatile TAny* a, TUint8 t, TUint8 u, TUint8 v); /* if (*a>=t) *a+=u else *a+=v; return original *a; */ |
|
170 |
IMPORT_C TUint8 __e32_atomic_tau_acq8(volatile TAny* a, TUint8 t, TUint8 u, TUint8 v); |
|
171 |
IMPORT_C TUint8 __e32_atomic_tau_rel8(volatile TAny* a, TUint8 t, TUint8 u, TUint8 v); |
|
172 |
IMPORT_C TUint8 __e32_atomic_tau_ord8(volatile TAny* a, TUint8 t, TUint8 u, TUint8 v); |
|
173 |
IMPORT_C TInt8 __e32_atomic_tas_rlx8(volatile TAny* a, TInt8 t, TInt8 u, TInt8 v); /* if (*a>=t) *a+=u else *a+=v; return original *a; */ |
|
174 |
IMPORT_C TInt8 __e32_atomic_tas_acq8(volatile TAny* a, TInt8 t, TInt8 u, TInt8 v); |
|
175 |
IMPORT_C TInt8 __e32_atomic_tas_rel8(volatile TAny* a, TInt8 t, TInt8 u, TInt8 v); |
|
176 |
IMPORT_C TInt8 __e32_atomic_tas_ord8(volatile TAny* a, TInt8 t, TInt8 u, TInt8 v); |
|
177 |
||
178 |
/* Atomic operations on 16 bit quantities */ |
|
179 |
IMPORT_C TUint16 __e32_atomic_load_acq16(const volatile TAny* a); /* read 16 bit acquire semantics */ |
|
180 |
IMPORT_C TUint16 __e32_atomic_store_rel16(volatile TAny* a, TUint16 v); /* write 16 bit, return v, release semantics */ |
|
181 |
IMPORT_C TUint16 __e32_atomic_store_ord16(volatile TAny* a, TUint16 v); /* write 16 bit, return v, full fence */ |
|
182 |
IMPORT_C TUint16 __e32_atomic_swp_rlx16(volatile TAny* a, TUint16 v); /* write 16 bit, return original, relaxed */ |
|
183 |
IMPORT_C TUint16 __e32_atomic_swp_acq16(volatile TAny* a, TUint16 v); /* write 16 bit, return original, acquire */ |
|
184 |
IMPORT_C TUint16 __e32_atomic_swp_rel16(volatile TAny* a, TUint16 v); /* write 16 bit, return original, release */ |
|
185 |
IMPORT_C TUint16 __e32_atomic_swp_ord16(volatile TAny* a, TUint16 v); /* write 16 bit, return original, full fence */ |
|
186 |
IMPORT_C TBool __e32_atomic_cas_rlx16(volatile TAny* a, TUint16* q, TUint16 v); /* if (*a==*q) {*a=v; return TRUE;} else {*q=*a; return FALSE;} */ |
|
187 |
IMPORT_C TBool __e32_atomic_cas_acq16(volatile TAny* a, TUint16* q, TUint16 v); |
|
188 |
IMPORT_C TBool __e32_atomic_cas_rel16(volatile TAny* a, TUint16* q, TUint16 v); |
|
189 |
IMPORT_C TBool __e32_atomic_cas_ord16(volatile TAny* a, TUint16* q, TUint16 v); |
|
190 |
IMPORT_C TUint16 __e32_atomic_add_rlx16(volatile TAny* a, TUint16 v); /* *a += v; return original *a; */ |
|
191 |
IMPORT_C TUint16 __e32_atomic_add_acq16(volatile TAny* a, TUint16 v); |
|
192 |
IMPORT_C TUint16 __e32_atomic_add_rel16(volatile TAny* a, TUint16 v); |
|
193 |
IMPORT_C TUint16 __e32_atomic_add_ord16(volatile TAny* a, TUint16 v); |
|
194 |
IMPORT_C TUint16 __e32_atomic_and_rlx16(volatile TAny* a, TUint16 v); /* *a &= v; return original *a; */ |
|
195 |
IMPORT_C TUint16 __e32_atomic_and_acq16(volatile TAny* a, TUint16 v); |
|
196 |
IMPORT_C TUint16 __e32_atomic_and_rel16(volatile TAny* a, TUint16 v); |
|
197 |
IMPORT_C TUint16 __e32_atomic_and_ord16(volatile TAny* a, TUint16 v); |
|
198 |
IMPORT_C TUint16 __e32_atomic_ior_rlx16(volatile TAny* a, TUint16 v); /* *a |= v; return original *a; */ |
|
199 |
IMPORT_C TUint16 __e32_atomic_ior_acq16(volatile TAny* a, TUint16 v); |
|
200 |
IMPORT_C TUint16 __e32_atomic_ior_rel16(volatile TAny* a, TUint16 v); |
|
201 |
IMPORT_C TUint16 __e32_atomic_ior_ord16(volatile TAny* a, TUint16 v); |
|
202 |
IMPORT_C TUint16 __e32_atomic_xor_rlx16(volatile TAny* a, TUint16 v); /* *a ^= v; return original *a; */ |
|
203 |
IMPORT_C TUint16 __e32_atomic_xor_acq16(volatile TAny* a, TUint16 v); |
|
204 |
IMPORT_C TUint16 __e32_atomic_xor_rel16(volatile TAny* a, TUint16 v); |
|
205 |
IMPORT_C TUint16 __e32_atomic_xor_ord16(volatile TAny* a, TUint16 v); |
|
206 |
IMPORT_C TUint16 __e32_atomic_axo_rlx16(volatile TAny* a, TUint16 u, TUint16 v); /* *a = (*a & u) ^ v; return original *a; */ |
|
207 |
IMPORT_C TUint16 __e32_atomic_axo_acq16(volatile TAny* a, TUint16 u, TUint16 v); |
|
208 |
IMPORT_C TUint16 __e32_atomic_axo_rel16(volatile TAny* a, TUint16 u, TUint16 v); |
|
209 |
IMPORT_C TUint16 __e32_atomic_axo_ord16(volatile TAny* a, TUint16 u, TUint16 v); |
|
210 |
IMPORT_C TUint16 __e32_atomic_tau_rlx16(volatile TAny* a, TUint16 t, TUint16 u, TUint16 v); /* if (*a>=t) *a+=u else *a+=v; return original *a; */ |
|
211 |
IMPORT_C TUint16 __e32_atomic_tau_acq16(volatile TAny* a, TUint16 t, TUint16 u, TUint16 v); |
|
212 |
IMPORT_C TUint16 __e32_atomic_tau_rel16(volatile TAny* a, TUint16 t, TUint16 u, TUint16 v); |
|
213 |
IMPORT_C TUint16 __e32_atomic_tau_ord16(volatile TAny* a, TUint16 t, TUint16 u, TUint16 v); |
|
214 |
IMPORT_C TInt16 __e32_atomic_tas_rlx16(volatile TAny* a, TInt16 t, TInt16 u, TInt16 v); /* if (*a>=t) *a+=u else *a+=v; return original *a; */ |
|
215 |
IMPORT_C TInt16 __e32_atomic_tas_acq16(volatile TAny* a, TInt16 t, TInt16 u, TInt16 v); |
|
216 |
IMPORT_C TInt16 __e32_atomic_tas_rel16(volatile TAny* a, TInt16 t, TInt16 u, TInt16 v); |
|
217 |
IMPORT_C TInt16 __e32_atomic_tas_ord16(volatile TAny* a, TInt16 t, TInt16 u, TInt16 v); |
|
218 |
||
219 |
/* Atomic operations on 32 bit quantities */ |
|
220 |
IMPORT_C TUint32 __e32_atomic_load_acq32(const volatile TAny* a); /* read 32 bit acquire semantics */ |
|
221 |
IMPORT_C TUint32 __e32_atomic_store_rel32(volatile TAny* a, TUint32 v); /* write 32 bit, return v, release semantics */ |
|
222 |
IMPORT_C TUint32 __e32_atomic_store_ord32(volatile TAny* a, TUint32 v); /* write 32 bit, return v, full fence */ |
|
223 |
IMPORT_C TUint32 __e32_atomic_swp_rlx32(volatile TAny* a, TUint32 v); /* write 32 bit, return original, relaxed */ |
|
224 |
IMPORT_C TUint32 __e32_atomic_swp_acq32(volatile TAny* a, TUint32 v); /* write 32 bit, return original, acquire */ |
|
225 |
IMPORT_C TUint32 __e32_atomic_swp_rel32(volatile TAny* a, TUint32 v); /* write 32 bit, return original, release */ |
|
226 |
IMPORT_C TUint32 __e32_atomic_swp_ord32(volatile TAny* a, TUint32 v); /* write 32 bit, return original, full fence */ |
|
227 |
IMPORT_C TBool __e32_atomic_cas_rlx32(volatile TAny* a, TUint32* q, TUint32 v); /* if (*a==*q) {*a=v; return TRUE;} else {*q=*a; return FALSE;} */ |
|
228 |
IMPORT_C TBool __e32_atomic_cas_acq32(volatile TAny* a, TUint32* q, TUint32 v); |
|
229 |
IMPORT_C TBool __e32_atomic_cas_rel32(volatile TAny* a, TUint32* q, TUint32 v); |
|
230 |
IMPORT_C TBool __e32_atomic_cas_ord32(volatile TAny* a, TUint32* q, TUint32 v); |
|
231 |
IMPORT_C TUint32 __e32_atomic_add_rlx32(volatile TAny* a, TUint32 v); /* *a += v; return original *a; */ |
|
232 |
IMPORT_C TUint32 __e32_atomic_add_acq32(volatile TAny* a, TUint32 v); |
|
233 |
IMPORT_C TUint32 __e32_atomic_add_rel32(volatile TAny* a, TUint32 v); |
|
234 |
IMPORT_C TUint32 __e32_atomic_add_ord32(volatile TAny* a, TUint32 v); |
|
235 |
IMPORT_C TUint32 __e32_atomic_and_rlx32(volatile TAny* a, TUint32 v); /* *a &= v; return original *a; */ |
|
236 |
IMPORT_C TUint32 __e32_atomic_and_acq32(volatile TAny* a, TUint32 v); |
|
237 |
IMPORT_C TUint32 __e32_atomic_and_rel32(volatile TAny* a, TUint32 v); |
|
238 |
IMPORT_C TUint32 __e32_atomic_and_ord32(volatile TAny* a, TUint32 v); |
|
239 |
IMPORT_C TUint32 __e32_atomic_ior_rlx32(volatile TAny* a, TUint32 v); /* *a |= v; return original *a; */ |
|
240 |
IMPORT_C TUint32 __e32_atomic_ior_acq32(volatile TAny* a, TUint32 v); |
|
241 |
IMPORT_C TUint32 __e32_atomic_ior_rel32(volatile TAny* a, TUint32 v); |
|
242 |
IMPORT_C TUint32 __e32_atomic_ior_ord32(volatile TAny* a, TUint32 v); |
|
243 |
IMPORT_C TUint32 __e32_atomic_xor_rlx32(volatile TAny* a, TUint32 v); /* *a ^= v; return original *a; */ |
|
244 |
IMPORT_C TUint32 __e32_atomic_xor_acq32(volatile TAny* a, TUint32 v); |
|
245 |
IMPORT_C TUint32 __e32_atomic_xor_rel32(volatile TAny* a, TUint32 v); |
|
246 |
IMPORT_C TUint32 __e32_atomic_xor_ord32(volatile TAny* a, TUint32 v); |
|
247 |
IMPORT_C TUint32 __e32_atomic_axo_rlx32(volatile TAny* a, TUint32 u, TUint32 v); /* *a = (*a & u) ^ v; return original *a; */ |
|
248 |
IMPORT_C TUint32 __e32_atomic_axo_acq32(volatile TAny* a, TUint32 u, TUint32 v); |
|
249 |
IMPORT_C TUint32 __e32_atomic_axo_rel32(volatile TAny* a, TUint32 u, TUint32 v); |
|
250 |
IMPORT_C TUint32 __e32_atomic_axo_ord32(volatile TAny* a, TUint32 u, TUint32 v); |
|
251 |
IMPORT_C TUint32 __e32_atomic_tau_rlx32(volatile TAny* a, TUint32 t, TUint32 u, TUint32 v); /* if (*a>=t) *a+=u else *a+=v; return original *a; */ |
|
252 |
IMPORT_C TUint32 __e32_atomic_tau_acq32(volatile TAny* a, TUint32 t, TUint32 u, TUint32 v); |
|
253 |
IMPORT_C TUint32 __e32_atomic_tau_rel32(volatile TAny* a, TUint32 t, TUint32 u, TUint32 v); |
|
254 |
IMPORT_C TUint32 __e32_atomic_tau_ord32(volatile TAny* a, TUint32 t, TUint32 u, TUint32 v); |
|
255 |
IMPORT_C TInt32 __e32_atomic_tas_rlx32(volatile TAny* a, TInt32 t, TInt32 u, TInt32 v); /* if (*a>=t) *a+=u else *a+=v; return original *a; */ |
|
256 |
IMPORT_C TInt32 __e32_atomic_tas_acq32(volatile TAny* a, TInt32 t, TInt32 u, TInt32 v); |
|
257 |
IMPORT_C TInt32 __e32_atomic_tas_rel32(volatile TAny* a, TInt32 t, TInt32 u, TInt32 v); |
|
258 |
IMPORT_C TInt32 __e32_atomic_tas_ord32(volatile TAny* a, TInt32 t, TInt32 u, TInt32 v); |
|
259 |
||
260 |
/* Atomic operations on 64 bit quantities */ |
|
261 |
IMPORT_C TUint64 __e32_atomic_load_acq64(const volatile TAny* a); /* read 64 bit acquire semantics */ |
|
262 |
IMPORT_C TUint64 __e32_atomic_store_rel64(volatile TAny* a, TUint64 v); /* write 64 bit, return v, release semantics */ |
|
263 |
IMPORT_C TUint64 __e32_atomic_store_ord64(volatile TAny* a, TUint64 v); /* write 64 bit, return v, full fence */ |
|
264 |
IMPORT_C TUint64 __e32_atomic_swp_rlx64(volatile TAny* a, TUint64 v); /* write 64 bit, return original, relaxed */ |
|
265 |
IMPORT_C TUint64 __e32_atomic_swp_acq64(volatile TAny* a, TUint64 v); /* write 64 bit, return original, acquire */ |
|
266 |
IMPORT_C TUint64 __e32_atomic_swp_rel64(volatile TAny* a, TUint64 v); /* write 64 bit, return original, release */ |
|
267 |
IMPORT_C TUint64 __e32_atomic_swp_ord64(volatile TAny* a, TUint64 v); /* write 64 bit, return original, full fence */ |
|
268 |
IMPORT_C TBool __e32_atomic_cas_rlx64(volatile TAny* a, TUint64* q, TUint64 v); /* if (*a==*q) {*a=v; return TRUE;} else {*q=*a; return FALSE;} */ |
|
269 |
IMPORT_C TBool __e32_atomic_cas_acq64(volatile TAny* a, TUint64* q, TUint64 v); |
|
270 |
IMPORT_C TBool __e32_atomic_cas_rel64(volatile TAny* a, TUint64* q, TUint64 v); |
|
271 |
IMPORT_C TBool __e32_atomic_cas_ord64(volatile TAny* a, TUint64* q, TUint64 v); |
|
272 |
IMPORT_C TUint64 __e32_atomic_add_rlx64(volatile TAny* a, TUint64 v); /* *a += v; return original *a; */ |
|
273 |
IMPORT_C TUint64 __e32_atomic_add_acq64(volatile TAny* a, TUint64 v); |
|
274 |
IMPORT_C TUint64 __e32_atomic_add_rel64(volatile TAny* a, TUint64 v); |
|
275 |
IMPORT_C TUint64 __e32_atomic_add_ord64(volatile TAny* a, TUint64 v); |
|
276 |
IMPORT_C TUint64 __e32_atomic_and_rlx64(volatile TAny* a, TUint64 v); /* *a &= v; return original *a; */ |
|
277 |
IMPORT_C TUint64 __e32_atomic_and_acq64(volatile TAny* a, TUint64 v); |
|
278 |
IMPORT_C TUint64 __e32_atomic_and_rel64(volatile TAny* a, TUint64 v); |
|
279 |
IMPORT_C TUint64 __e32_atomic_and_ord64(volatile TAny* a, TUint64 v); |
|
280 |
IMPORT_C TUint64 __e32_atomic_ior_rlx64(volatile TAny* a, TUint64 v); /* *a |= v; return original *a; */ |
|
281 |
IMPORT_C TUint64 __e32_atomic_ior_acq64(volatile TAny* a, TUint64 v); |
|
282 |
IMPORT_C TUint64 __e32_atomic_ior_rel64(volatile TAny* a, TUint64 v); |
|
283 |
IMPORT_C TUint64 __e32_atomic_ior_ord64(volatile TAny* a, TUint64 v); |
|
284 |
IMPORT_C TUint64 __e32_atomic_xor_rlx64(volatile TAny* a, TUint64 v); /* *a ^= v; return original *a; */ |
|
285 |
IMPORT_C TUint64 __e32_atomic_xor_acq64(volatile TAny* a, TUint64 v); |
|
286 |
IMPORT_C TUint64 __e32_atomic_xor_rel64(volatile TAny* a, TUint64 v); |
|
287 |
IMPORT_C TUint64 __e32_atomic_xor_ord64(volatile TAny* a, TUint64 v); |
|
288 |
IMPORT_C TUint64 __e32_atomic_axo_rlx64(volatile TAny* a, TUint64 u, TUint64 v); /* *a = (*a & u) ^ v; return original *a; */ |
|
289 |
IMPORT_C TUint64 __e32_atomic_axo_acq64(volatile TAny* a, TUint64 u, TUint64 v); |
|
290 |
IMPORT_C TUint64 __e32_atomic_axo_rel64(volatile TAny* a, TUint64 u, TUint64 v); |
|
291 |
IMPORT_C TUint64 __e32_atomic_axo_ord64(volatile TAny* a, TUint64 u, TUint64 v); |
|
292 |
IMPORT_C TUint64 __e32_atomic_tau_rlx64(volatile TAny* a, TUint64 t, TUint64 u, TUint64 v); /* if (*a>=t) *a+=u else *a+=v; return original *a; */ |
|
293 |
IMPORT_C TUint64 __e32_atomic_tau_acq64(volatile TAny* a, TUint64 t, TUint64 u, TUint64 v); |
|
294 |
IMPORT_C TUint64 __e32_atomic_tau_rel64(volatile TAny* a, TUint64 t, TUint64 u, TUint64 v); |
|
295 |
IMPORT_C TUint64 __e32_atomic_tau_ord64(volatile TAny* a, TUint64 t, TUint64 u, TUint64 v); |
|
296 |
IMPORT_C TInt64 __e32_atomic_tas_rlx64(volatile TAny* a, TInt64 t, TInt64 u, TInt64 v); /* if (*a>=t) *a+=u else *a+=v; return original *a; */ |
|
297 |
IMPORT_C TInt64 __e32_atomic_tas_acq64(volatile TAny* a, TInt64 t, TInt64 u, TInt64 v); |
|
298 |
IMPORT_C TInt64 __e32_atomic_tas_rel64(volatile TAny* a, TInt64 t, TInt64 u, TInt64 v); |
|
299 |
IMPORT_C TInt64 __e32_atomic_tas_ord64(volatile TAny* a, TInt64 t, TInt64 u, TInt64 v); |
|
300 |
||
301 |
/* Atomic operations on pointers |
|
302 |
These are implemented as macro definitions over the 32 or 64 bit operations |
|
303 |
*/ |
|
304 |
/* IMPORT_C TAny* __e32_atomic_load_acq_ptr(const volatile TAny* a); */ |
|
305 |
#define __e32_atomic_load_acq_ptr(a) ((TAny*)__e32_atomic_load_acq32(a)) |
|
306 |
/* IMPORT_C TAny* __e32_atomic_store_rel_ptr(volatile TAny* a, const volatile TAny* v); */ |
|
307 |
#define __e32_atomic_store_rel_ptr(a,v) ((TAny*)__e32_atomic_store_rel32(a,(T_UintPtr)(v))) |
|
308 |
/* IMPORT_C TAny* __e32_atomic_store_ord_ptr(volatile TAny* a, const volatile TAny* v); */ |
|
309 |
#define __e32_atomic_store_ord_ptr(a,v) ((TAny*)__e32_atomic_store_ord32(a,(T_UintPtr)(v))) |
|
310 |
/* IMPORT_C TAny* __e32_atomic_swp_rlx_ptr(volatile TAny* a, const volatile TAny* v); */ |
|
311 |
#define __e32_atomic_swp_rlx_ptr(a,v) ((TAny*)__e32_atomic_swp_rlx32(a,(T_UintPtr)(v))) |
|
312 |
/* IMPORT_C TAny* __e32_atomic_swp_acq_ptr(volatile TAny* a, const volatile TAny* v); */ |
|
313 |
#define __e32_atomic_swp_acq_ptr(a,v) ((TAny*)__e32_atomic_swp_acq32(a,(T_UintPtr)(v))) |
|
314 |
/* IMPORT_C TAny* __e32_atomic_swp_rel_ptr(volatile TAny* a, const volatile TAny* v); */ |
|
315 |
#define __e32_atomic_swp_rel_ptr(a,v) ((TAny*)__e32_atomic_swp_rel32(a,(T_UintPtr)(v))) |
|
316 |
/* IMPORT_C TAny* __e32_atomic_swp_ord_ptr(volatile TAny* a, const volatile TAny* v); */ |
|
317 |
#define __e32_atomic_swp_ord_ptr(a,v) ((TAny*)__e32_atomic_swp_ord32(a,(T_UintPtr)(v))) |
|
318 |
/* IMPORT_C TBool __e32_atomic_cas_rlx_ptr(volatile TAny* a, const volatile TAny** q, const volatile TAny* v); */ |
|
319 |
#define __e32_atomic_cas_rlx_ptr(a,q,v) (__e32_atomic_cas_rlx32(a,(T_UintPtr*)(q),(T_UintPtr)(v))) |
|
320 |
/* IMPORT_C TBool __e32_atomic_cas_acq_ptr(volatile TAny* a, const volatile TAny** q, const volatile TAny* v); */ |
|
321 |
#define __e32_atomic_cas_acq_ptr(a,q,v) (__e32_atomic_cas_acq32(a,(T_UintPtr*)(q),(T_UintPtr)(v))) |
|
322 |
/* IMPORT_C TBool __e32_atomic_cas_rel_ptr(volatile TAny* a, const volatile TAny** q, const volatile TAny* v); */ |
|
323 |
#define __e32_atomic_cas_rel_ptr(a,q,v) (__e32_atomic_cas_rel32(a,(T_UintPtr*)(q),(T_UintPtr)(v))) |
|
324 |
/* IMPORT_C TBool __e32_atomic_cas_ord_ptr(volatile TAny* a, const volatile TAny** q, const volatile TAny* v); */ |
|
325 |
#define __e32_atomic_cas_ord_ptr(a,q,v) (__e32_atomic_cas_ord32(a,(T_UintPtr*)(q),(T_UintPtr)(v))) |
|
326 |
/* IMPORT_C TAny* __e32_atomic_add_rlx_ptr(volatile TAny* a, T_UintPtr v); */ |
|
327 |
#define __e32_atomic_add_rlx_ptr(a,v) ((TAny*)__e32_atomic_add_rlx32(a,(T_UintPtr)(v))) |
|
328 |
/* IMPORT_C TAny* __e32_atomic_add_acq_ptr(volatile TAny* a, T_UintPtr v); */ |
|
329 |
#define __e32_atomic_add_acq_ptr(a,v) ((TAny*)__e32_atomic_add_acq32(a,(T_UintPtr)(v))) |
|
330 |
/* IMPORT_C TAny* __e32_atomic_add_rel_ptr(volatile TAny* a, T_UintPtr v); */ |
|
331 |
#define __e32_atomic_add_rel_ptr(a,v) ((TAny*)__e32_atomic_add_rel32(a,(T_UintPtr)(v))) |
|
332 |
/* IMPORT_C TAny* __e32_atomic_add_ord_ptr(volatile TAny* a, T_UintPtr v); */ |
|
333 |
#define __e32_atomic_add_ord_ptr(a,v) ((TAny*)__e32_atomic_add_ord32(a,(T_UintPtr)(v))) |
|
334 |
/* IMPORT_C TAny* __e32_atomic_and_rlx_ptr(volatile TAny* a, T_UintPtr v); */ |
|
335 |
#define __e32_atomic_and_rlx_ptr(a,v) ((TAny*)__e32_atomic_and_rlx32(a,(T_UintPtr)(v))) |
|
336 |
/* IMPORT_C TAny* __e32_atomic_and_acq_ptr(volatile TAny* a, T_UintPtr v); */ |
|
337 |
#define __e32_atomic_and_acq_ptr(a,v) ((TAny*)__e32_atomic_and_acq32(a,(T_UintPtr)(v))) |
|
338 |
/* IMPORT_C TAny* __e32_atomic_and_rel_ptr(volatile TAny* a, T_UintPtr v); */ |
|
339 |
#define __e32_atomic_and_rel_ptr(a,v) ((TAny*)__e32_atomic_and_rel32(a,(T_UintPtr)(v))) |
|
340 |
/* IMPORT_C TAny* __e32_atomic_and_ord_ptr(volatile TAny* a, T_UintPtr v); */ |
|
341 |
#define __e32_atomic_and_ord_ptr(a,v) ((TAny*)__e32_atomic_and_ord32(a,(T_UintPtr)(v))) |
|
342 |
/* IMPORT_C TAny* __e32_atomic_ior_rlx_ptr(volatile TAny* a, T_UintPtr v); */ |
|
343 |
#define __e32_atomic_ior_rlx_ptr(a,v) ((TAny*)__e32_atomic_ior_rlx32(a,(T_UintPtr)(v))) |
|
344 |
/* IMPORT_C TAny* __e32_atomic_ior_acq_ptr(volatile TAny* a, T_UintPtr v); */ |
|
345 |
#define __e32_atomic_ior_acq_ptr(a,v) ((TAny*)__e32_atomic_ior_acq32(a,(T_UintPtr)(v))) |
|
346 |
/* IMPORT_C TAny* __e32_atomic_ior_rel_ptr(volatile TAny* a, T_UintPtr v); */ |
|
347 |
#define __e32_atomic_ior_rel_ptr(a,v) ((TAny*)__e32_atomic_ior_rel32(a,(T_UintPtr)(v))) |
|
348 |
/* IMPORT_C TAny* __e32_atomic_ior_ord_ptr(volatile TAny* a, T_UintPtr v); */ |
|
349 |
#define __e32_atomic_ior_ord_ptr(a,v) ((TAny*)__e32_atomic_ior_ord32(a,(T_UintPtr)(v))) |
|
350 |
/* IMPORT_C TAny* __e32_atomic_xor_rlx_ptr(volatile TAny* a, T_UintPtr v); */ |
|
351 |
#define __e32_atomic_xor_rlx_ptr(a,v) ((TAny*)__e32_atomic_xor_rlx32(a,(T_UintPtr)(v))) |
|
352 |
/* IMPORT_C TAny* __e32_atomic_xor_acq_ptr(volatile TAny* a, T_UintPtr v); */ |
|
353 |
#define __e32_atomic_xor_acq_ptr(a,v) ((TAny*)__e32_atomic_xor_acq32(a,(T_UintPtr)(v))) |
|
354 |
/* IMPORT_C TAny* __e32_atomic_xor_rel_ptr(volatile TAny* a, T_UintPtr v); */ |
|
355 |
#define __e32_atomic_xor_rel_ptr(a,v) ((TAny*)__e32_atomic_xor_rel32(a,(T_UintPtr)(v))) |
|
356 |
/* IMPORT_C TAny* __e32_atomic_xor_ord_ptr(volatile TAny* a, T_UintPtr v); */ |
|
357 |
#define __e32_atomic_xor_ord_ptr(a,v) ((TAny*)__e32_atomic_xor_ord32(a,(T_UintPtr)(v))) |
|
358 |
/* IMPORT_C TAny* __e32_atomic_axo_rlx_ptr(volatile TAny* a, T_UintPtr u, T_UintPtr v); */ |
|
359 |
#define __e32_atomic_axo_rlx_ptr(a,u,v) ((TAny*)__e32_atomic_axo_rlx32(a,(T_UintPtr)(u),(T_UintPtr)(v))) |
|
360 |
/* IMPORT_C TAny* __e32_atomic_axo_acq_ptr(volatile TAny* a, T_UintPtr u, T_UintPtr v); */ |
|
361 |
#define __e32_atomic_axo_acq_ptr(a,u,v) ((TAny*)__e32_atomic_axo_acq32(a,(T_UintPtr)(u),(T_UintPtr)(v))) |
|
362 |
/* IMPORT_C TAny* __e32_atomic_axo_rel_ptr(volatile TAny* a, T_UintPtr u, T_UintPtr v); */ |
|
363 |
#define __e32_atomic_axo_rel_ptr(a,u,v) ((TAny*)__e32_atomic_axo_rel32(a,(T_UintPtr)(u),(T_UintPtr)(v))) |
|
364 |
/* IMPORT_C TAny* __e32_atomic_axo_ord_ptr(volatile TAny* a, T_UintPtr u, T_UintPtr v); */ |
|
365 |
#define __e32_atomic_axo_ord_ptr(a,u,v) ((TAny*)__e32_atomic_axo_ord32(a,(T_UintPtr)(u),(T_UintPtr)(v))) |
|
366 |
/* IMPORT_C TAny* __e32_atomic_tau_rlx_ptr(volatile TAny* a, const volatile TAny* t, T_UintPtr u, T_UintPtr v); */ |
|
367 |
#define __e32_atomic_tau_rlx_ptr(a,t,u,v) ((TAny*)__e32_atomic_tau_rlx32(a,(T_UintPtr)(t),(T_UintPtr)(u),(T_UintPtr)(v))) |
|
368 |
/* IMPORT_C TAny* __e32_atomic_tau_acq_ptr(volatile TAny* a, const volatile TAny* t, T_UintPtr u, T_UintPtr v); */ |
|
369 |
#define __e32_atomic_tau_acq_ptr(a,t,u,v) ((TAny*)__e32_atomic_tau_acq32(a,(T_UintPtr)(t),(T_UintPtr)(u),(T_UintPtr)(v))) |
|
370 |
/* IMPORT_C TAny* __e32_atomic_tau_rel_ptr(volatile TAny* a, const volatile TAny* t, T_UintPtr u, T_UintPtr v); */ |
|
371 |
#define __e32_atomic_tau_rel_ptr(a,t,u,v) ((TAny*)__e32_atomic_tau_rel32(a,(T_UintPtr)(t),(T_UintPtr)(u),(T_UintPtr)(v))) |
|
372 |
/* IMPORT_C TAny* __e32_atomic_tau_ord_ptr(volatile TAny* a, const volatile TAny* t, T_UintPtr u, T_UintPtr v); */ |
|
373 |
#define __e32_atomic_tau_ord_ptr(a,t,u,v) ((TAny*)__e32_atomic_tau_ord32(a,(T_UintPtr)(t),(T_UintPtr)(u),(T_UintPtr)(v))) |
|
374 |
||
375 |
/* Miscellaneous utility functions |
|
376 |
*/ |
|
377 |
IMPORT_C TInt __e32_find_ms1_32(TUint32 v); /* return bit number of most significant 1, -1 if argument zero */ |
|
378 |
IMPORT_C TInt __e32_find_ls1_32(TUint32 v); /* return bit number of least significant 1, -1 if argument zero */ |
|
379 |
IMPORT_C TInt __e32_bit_count_32(TUint32 v); /* return number of bits with value 1 */ |
|
380 |
IMPORT_C TInt __e32_find_ms1_64(TUint64 v); /* return bit number of most significant 1, -1 if argument zero */ |
|
381 |
IMPORT_C TInt __e32_find_ls1_64(TUint64 v); /* return bit number of least significant 1, -1 if argument zero */ |
|
382 |
IMPORT_C TInt __e32_bit_count_64(TUint64 v); /* return number of bits with value 1 */ |
|
383 |
||
384 |
#ifdef __cplusplus |
|
385 |
} /* extern "C" */ |
|
386 |
#endif |
|
387 |
||
388 |
||
389 |
#endif /* __E32ATOMICS_H__ */ |