author | Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com> |
Fri, 16 Apr 2010 16:24:37 +0300 | |
changeset 90 | 947f0dc9f7a8 |
parent 0 | a41df078684a |
child 257 | 3e88ff8f41d5 |
permissions | -rw-r--r-- |
0 | 1 |
// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies). |
2 |
// All rights reserved. |
|
3 |
// This component and the accompanying materials are made available |
|
4 |
// under the terms of the License "Eclipse Public License v1.0" |
|
5 |
// which accompanies this distribution, and is available |
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 |
// |
|
8 |
// Initial Contributors: |
|
9 |
// Nokia Corporation - initial contribution. |
|
10 |
// |
|
11 |
// Contributors: |
|
12 |
// |
|
13 |
// Description: |
|
14 |
// e32\nkernsmp\x86\ncutilf.cia |
|
15 |
// |
|
16 |
// |
|
17 |
||
18 |
#include <x86.h> |
|
19 |
#include <apic.h> |
|
20 |
||
21 |
#if defined(__VC32__) |
|
22 |
#define __ASM_CALL(func) _asm call func |
|
23 |
#elif defined(__GCC32__) |
|
24 |
#define __ASM_CALL(func) asm("call _" #func); |
|
25 |
#else |
|
26 |
#error Unknown x86 compiler |
|
27 |
#endif |
|
28 |
||
29 |
#if defined(__INCLUDE_SPIN_LOCK_CHECKS__) |
|
30 |
#define SPIN_LOCK_ENTRY_CHECK() __ASM_CALL(spin_lock_entry_check) |
|
31 |
#define SPIN_LOCK_MARK_ACQ() __ASM_CALL(spin_lock_mark_acq) |
|
32 |
#define SPIN_UNLOCK_ENTRY_CHECK() __ASM_CALL(spin_unlock_entry_check) |
|
33 |
||
34 |
#define RWSPIN_RLOCK_ENTRY_CHECK() __ASM_CALL(rwspin_rlock_entry_check) |
|
35 |
#define RWSPIN_RLOCK_MARK_ACQ() __ASM_CALL(rwspin_rlock_mark_acq) |
|
36 |
#define RWSPIN_RUNLOCK_ENTRY_CHECK() __ASM_CALL(rwspin_runlock_entry_check) |
|
37 |
||
38 |
#define RWSPIN_WLOCK_ENTRY_CHECK() __ASM_CALL(rwspin_wlock_entry_check) |
|
39 |
#define RWSPIN_WLOCK_MARK_ACQ() __ASM_CALL(rwspin_wlock_mark_acq) |
|
40 |
#define RWSPIN_WUNLOCK_ENTRY_CHECK() __ASM_CALL(rwspin_wunlock_entry_check) |
|
41 |
||
42 |
#else |
|
43 |
#define SPIN_LOCK_ENTRY_CHECK() |
|
44 |
#define SPIN_LOCK_MARK_ACQ() |
|
45 |
#define SPIN_UNLOCK_ENTRY_CHECK() |
|
46 |
||
47 |
#define RWSPIN_RLOCK_ENTRY_CHECK() |
|
48 |
#define RWSPIN_RLOCK_MARK_ACQ() |
|
49 |
#define RWSPIN_RUNLOCK_ENTRY_CHECK() |
|
50 |
||
51 |
#define RWSPIN_WLOCK_ENTRY_CHECK() |
|
52 |
#define RWSPIN_WLOCK_MARK_ACQ() |
|
53 |
#define RWSPIN_WUNLOCK_ENTRY_CHECK() |
|
54 |
||
55 |
#endif |
|
56 |
||
57 |
||
58 |
/****************************************************************************** |
|
59 |
* Timestamp |
|
60 |
******************************************************************************/ |
|
61 |
||
62 |
/** Returns a timestamp value which is consistent across CPUs. |
|
63 |
||
64 |
*/ |
|
65 |
EXPORT_C __NAKED__ TUint64 NKern::Timestamp() |
|
66 |
{ |
|
67 |
asm("pushfd "); |
|
68 |
asm("cli "); // stop thread migration between reading APIC ID and thread pointer |
|
69 |
asm("mov ecx, ds:[%0]" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID)); |
|
70 |
asm("shr ecx, 24 "); |
|
71 |
asm("mov ecx, [ecx*4+%0]" : : "i" (&SubSchedulerLookupTable)); |
|
72 |
asm("cmp ecx, 0 "); |
|
73 |
asm("jz short use_tsc_only "); |
|
74 |
asm("test cl, 3 "); |
|
75 |
asm("jnz short use_tsc_only "); |
|
76 |
asm("rdtsc "); |
|
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
77 |
asm("lea ecx, [ecx+%0]" : : "i" _FOFF(TSubScheduler, iSSX.iTimestampOffset)); |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
78 |
asm("add eax, [ecx] "); |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
79 |
asm("adc edx, [ecx+4] "); |
0 | 80 |
asm("popfd "); |
81 |
asm("ret "); |
|
82 |
||
83 |
asm("use_tsc_only: "); |
|
84 |
asm("rdtsc "); |
|
85 |
asm("popfd "); |
|
86 |
asm("ret "); |
|
87 |
} |
|
88 |
||
89 |
/** Get the current value of the CPU timestamp counter |
|
90 |
||
91 |
*/ |
|
92 |
EXPORT_C __NAKED__ TUint64 X86::Timestamp() |
|
93 |
{ |
|
94 |
asm("rdtsc "); |
|
95 |
asm("ret "); |
|
96 |
} |
|
97 |
||
98 |
||
99 |
||
100 |
/****************************************************************************** |
|
101 |
* Spin locks |
|
102 |
* |
|
103 |
* [this+0] in count (byte) |
|
104 |
* [this+1] out count (byte) |
|
105 |
* [this+6] order (byte) |
|
106 |
* [this+7] holding CPU (byte) |
|
107 |
******************************************************************************/ |
|
108 |
||
109 |
#if defined(__INCLUDE_SPIN_LOCK_CHECKS__) |
|
110 |
extern "C" __NAKED__ void spin_lock_entry_check() |
|
111 |
{ |
|
112 |
/* ecx points to lock */ |
|
113 |
asm("push eax "); |
|
114 |
asm("push ecx "); |
|
115 |
asm("push edx "); |
|
116 |
asm("pushfd "); |
|
117 |
asm("cli "); |
|
118 |
asm("mov edx, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID)); |
|
119 |
asm("shr edx, 24"); |
|
120 |
asm("mov edx, [edx*4+%0]" : : "i" (&SubSchedulerLookupTable)); |
|
121 |
asm("cmp edx, 0 "); /* Skip checks if subschedulers not yet initialised */ |
|
122 |
asm("je short slec_ok "); |
|
123 |
asm("test edx, 3 "); /* Skip checks if subscheduler for this CPU not yet initialised */ |
|
124 |
asm("jnz short slec_ok "); |
|
125 |
asm("movzx ecx, word ptr [ecx+6] "); /* CL = order, CH = holding CPU */ |
|
126 |
asm("cmp cl, 0x20 "); |
|
127 |
asm("jae short slec_preemption "); /* This lock requires preemption to be disabled */ |
|
128 |
||
129 |
/* check interrupts disabled */ |
|
130 |
asm("test dword ptr [esp], 0x200 "); /* Interrupts enabled? */ |
|
131 |
asm("jz short slec_1 "); /* No - OK */ |
|
132 |
asm("int 0xff "); /* Yes - die */ |
|
133 |
||
134 |
asm("slec_preemption: "); |
|
135 |
asm("cmp cl, 0xff "); |
|
136 |
asm("je short slec_1 "); /* EOrderNone - don't check interrupts or preemption */ |
|
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
137 |
asm("cmp dword ptr [edx+%0], 0" : : "i"_FOFF(TSubScheduler, iSSX.iIrqNestCount)); |
0 | 138 |
asm("jge short slec_preemption_die "); /* If called from ISR, die */ |
139 |
asm("cmp dword ptr [edx+%0], 0" : : "i" _FOFF(TSubScheduler, iKernLockCount)); |
|
140 |
asm("jnz short slec_1 "); /* Preemption disabled - OK */ |
|
141 |
asm("slec_preemption_die: "); |
|
142 |
asm("int 0xff "); /* Preemption enabled - die */ |
|
143 |
||
144 |
asm("slec_1: "); |
|
145 |
asm("lea eax, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuNum)); |
|
146 |
asm("cmp ch, [eax] "); |
|
147 |
asm("jnz short slec_2 "); /* Not already held by this CPU - OK */ |
|
148 |
asm("int 0xff "); /* Already held by this CPU - die */ |
|
149 |
||
150 |
asm("slec_2: "); |
|
151 |
asm("lea edx, [edx+%0]" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck)); |
|
152 |
asm("bsf eax, [edx] "); /* find LSB of low dword */ |
|
153 |
asm("jnz short slec_3 "); /* skip if low dword nonzero */ |
|
154 |
asm("bsf eax, [edx+4] "); /* else find LSB of high dword */ |
|
155 |
asm("lea eax, [eax+32] "); /* add 32 to eax without changing flags */ |
|
156 |
asm("jnz short slec_3 "); /* skip if high dword nonzero */ |
|
157 |
asm("mov eax, 0x7f "); /* else set EAX = 0x7F */ |
|
158 |
||
159 |
asm("slec_3: "); |
|
160 |
asm("cmp cl, al "); /* check order of this lock against lowest currently held order */ |
|
161 |
asm("jl short slec_ok "); /* if this lock has lower order, OK - signed comparison so EOrderNone always works */ |
|
162 |
asm("int 0xff "); /* ordering violation - die */ |
|
163 |
||
164 |
asm("slec_ok: "); |
|
165 |
asm("popfd "); |
|
166 |
asm("pop edx "); |
|
167 |
asm("pop ecx "); |
|
168 |
asm("pop eax "); |
|
169 |
asm("ret "); |
|
170 |
} |
|
171 |
||
172 |
extern "C" __NAKED__ void spin_lock_mark_acq() |
|
173 |
{ |
|
174 |
/* ecx points to lock */ |
|
175 |
asm("push eax "); |
|
176 |
asm("push ecx "); |
|
177 |
asm("push edx "); |
|
178 |
asm("pushfd "); |
|
179 |
asm("cli "); |
|
180 |
asm("mov edx, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID)); |
|
181 |
asm("shr edx, 24"); |
|
182 |
asm("mov edx, [edx*4+%0]" : : "i"(&SubSchedulerLookupTable)); |
|
183 |
asm("cmp edx, 0 "); /* Skip checks if subschedulers not yet initialised */ |
|
184 |
asm("je short slma_ok "); |
|
185 |
asm("test edx, 3 "); /* Skip checks if subscheduler for this CPU not yet initialised */ |
|
186 |
asm("jnz short slma_ok "); |
|
187 |
asm("mov eax, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuNum)); |
|
188 |
asm("mov [ecx+7], al "); /* set byte 7 to holding CPU number */ |
|
189 |
asm("movzx ecx, byte ptr [ecx+6] "); /* CL = order */ |
|
190 |
asm("cmp ecx, 0x40 "); |
|
191 |
asm("jae short slma_ok "); /* if EOrderNone, done */ |
|
192 |
asm("bts [edx+%0], ecx" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck)); |
|
193 |
||
194 |
asm("slma_ok: "); |
|
195 |
asm("popfd "); |
|
196 |
asm("pop edx "); |
|
197 |
asm("pop ecx "); |
|
198 |
asm("pop eax "); |
|
199 |
asm("ret "); |
|
200 |
} |
|
201 |
||
202 |
extern "C" __NAKED__ void spin_unlock_entry_check() |
|
203 |
{ |
|
204 |
/* ecx points to lock */ |
|
205 |
asm("push eax "); |
|
206 |
asm("push ecx "); |
|
207 |
asm("push edx "); |
|
208 |
asm("pushfd "); |
|
209 |
asm("cli "); |
|
210 |
asm("mov edx, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID)); |
|
211 |
asm("shr edx, 24"); |
|
212 |
asm("mov edx, [edx*4+%0]" : : "i"(&SubSchedulerLookupTable)); |
|
213 |
asm("cmp edx, 0 "); /* Skip checks if subschedulers not yet initialised */ |
|
214 |
asm("je short suec_ok "); |
|
215 |
asm("test edx, 3 "); /* Skip checks if subscheduler for this CPU not yet initialised */ |
|
216 |
asm("jnz short suec_ok "); |
|
217 |
asm("mov eax, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuNum)); /* eax = current CPU number */ |
|
218 |
asm("shl eax, 8 "); /* AL = 0, AH = current CPU number */ |
|
219 |
asm("xor ax, [ecx+6] "); /* AL = order, AH = holding CPU ^ current CPU number */ |
|
220 |
asm("cmp al, 0x20 "); |
|
221 |
asm("jae short suec_preemption "); /* This lock requires preemption to be disabled */ |
|
222 |
||
223 |
/* check interrupts disabled */ |
|
224 |
asm("test dword ptr [esp], 0x200 "); /* Interrupts enabled? */ |
|
225 |
asm("jz short suec_1 "); /* No - OK */ |
|
226 |
asm("int 0xff "); /* Yes - die */ |
|
227 |
||
228 |
asm("suec_preemption: "); |
|
229 |
asm("cmp al, 0xff "); |
|
230 |
asm("je short suec_1 "); /* EOrderNone - don't check interrupts or preemption */ |
|
231 |
asm("cmp dword ptr [edx+%0], 0" : : "i" _FOFF(TSubScheduler, iKernLockCount)); |
|
232 |
asm("jnz short suec_1 "); /* Preemption disabled - OK */ |
|
233 |
asm("int 0xff "); /* Preemption enabled - die */ |
|
234 |
||
235 |
asm("suec_1: "); |
|
236 |
asm("cmp ah, 0 "); /* Check if holding CPU ^ current CPU number == 0 */ |
|
237 |
asm("jz short suec_2 "); /* Already held by this CPU - OK */ |
|
238 |
asm("int 0xff "); /* We don't hold lock - die */ |
|
239 |
||
240 |
asm("suec_2: "); |
|
241 |
asm("mov byte ptr [ecx+7], 0xff "); /* reset holding CPU */ |
|
242 |
asm("cmp eax, 0x40 "); /* EAX = lock order */ |
|
243 |
asm("jae short suec_ok "); /* if EOrderNone, done */ |
|
244 |
asm("btr [edx+%0], eax" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck)); |
|
245 |
asm("jc short suec_ok "); /* bit should have been set originally */ |
|
246 |
asm("int 0xff "); /* if not, die - something must have got corrupted */ |
|
247 |
||
248 |
asm("suec_ok: "); |
|
249 |
asm("popfd "); |
|
250 |
asm("pop edx "); |
|
251 |
asm("pop ecx "); |
|
252 |
asm("pop eax "); |
|
253 |
asm("ret "); |
|
254 |
} |
|
255 |
#endif |
|
256 |
||
257 |
||
258 |
/****************************************************************************** |
|
259 |
* Plain old spin lock |
|
260 |
* |
|
261 |
* Fundamental algorithm: |
|
262 |
* lock() { old_in = in++; while(out!=old_in) __chill(); } |
|
263 |
* unlock() { ++out; } |
|
264 |
* |
|
265 |
* [this+0] in count (byte) |
|
266 |
* [this+1] out count (byte) |
|
267 |
* [this+6] order value |
|
268 |
* [this+7] holding CPU number, 0xFF if none |
|
269 |
* |
|
270 |
******************************************************************************/ |
|
271 |
__NAKED__ EXPORT_C void TSpinLock::LockIrq() |
|
272 |
{ |
|
273 |
THISCALL_PROLOG0() |
|
274 |
asm("cli "); |
|
275 |
SPIN_LOCK_ENTRY_CHECK() |
|
276 |
asm("mov al, 1 "); |
|
277 |
asm("lock xadd [ecx], al "); /* al = in++ */ |
|
278 |
asm("sl_lockirq_loop: "); |
|
279 |
asm("cmp al, [ecx+1] "); /* compare al to out */ |
|
280 |
asm("jnz short sl_lockirq_loop2 "); |
|
281 |
SPIN_LOCK_MARK_ACQ() |
|
282 |
asm("lock add dword ptr [esp], 0 "); /* make sure subsequent accesses don't happen until lock acquired */ |
|
283 |
THISCALL_EPILOG0() |
|
284 |
||
285 |
asm("sl_lockirq_loop2: "); |
|
286 |
X86_PAUSE |
|
287 |
asm("jmp short sl_lockirq_loop "); |
|
288 |
} |
|
289 |
||
290 |
__NAKED__ EXPORT_C void TSpinLock::UnlockIrq() |
|
291 |
{ |
|
292 |
THISCALL_PROLOG0() |
|
293 |
SPIN_UNLOCK_ENTRY_CHECK() |
|
294 |
asm("lock inc byte ptr [ecx+1] "); /* ++out */ |
|
295 |
asm("sti "); |
|
296 |
THISCALL_EPILOG0() |
|
297 |
} |
|
298 |
||
299 |
extern "C" TBool __fastcall spin_lock_flash_irq(TSpinLock* a) |
|
300 |
{ |
|
301 |
a->UnlockIrq(); |
|
302 |
a->LockIrq(); |
|
303 |
return TRUE; |
|
304 |
} |
|
305 |
||
306 |
__NAKED__ EXPORT_C TBool TSpinLock::FlashIrq() |
|
307 |
{ |
|
308 |
THISCALL_PROLOG0() |
|
309 |
asm("mov ax, [ecx] "); |
|
310 |
asm("inc ah "); |
|
311 |
asm("xor al, ah "); |
|
312 |
asm("and eax, 0xff "); |
|
313 |
asm("jne %a0" : : "i" (&spin_lock_flash_irq)); |
|
314 |
THISCALL_EPILOG0() |
|
315 |
} |
|
316 |
||
317 |
__NAKED__ EXPORT_C void TSpinLock::LockOnly() |
|
318 |
{ |
|
319 |
THISCALL_PROLOG0() |
|
320 |
SPIN_LOCK_ENTRY_CHECK() |
|
321 |
asm("mov al, 1 "); |
|
322 |
asm("lock xadd [ecx], al "); /* al = in++ */ |
|
323 |
asm("sl_lockonly_loop: "); |
|
324 |
asm("cmp al, [ecx+1] "); /* compare al to out */ |
|
325 |
asm("jnz short sl_lockonly_loop2 "); |
|
326 |
SPIN_LOCK_MARK_ACQ() |
|
327 |
asm("lock add dword ptr [esp], 0 "); /* make sure subsequent accesses don't happen until lock acquired */ |
|
328 |
THISCALL_EPILOG0() |
|
329 |
||
330 |
asm("sl_lockonly_loop2: "); |
|
331 |
X86_PAUSE |
|
332 |
asm("jmp short sl_lockonly_loop "); |
|
333 |
} |
|
334 |
||
335 |
__NAKED__ EXPORT_C void TSpinLock::UnlockOnly() |
|
336 |
{ |
|
337 |
THISCALL_PROLOG0() |
|
338 |
SPIN_UNLOCK_ENTRY_CHECK() |
|
339 |
asm("lock inc byte ptr [ecx+1] "); /* ++out */ |
|
340 |
THISCALL_EPILOG0() |
|
341 |
} |
|
342 |
||
343 |
extern "C" TBool __fastcall spin_lock_flash_only(TSpinLock* a) |
|
344 |
{ |
|
345 |
a->UnlockOnly(); |
|
346 |
a->LockOnly(); |
|
347 |
return TRUE; |
|
348 |
} |
|
349 |
||
350 |
__NAKED__ EXPORT_C TBool TSpinLock::FlashOnly() |
|
351 |
{ |
|
352 |
THISCALL_PROLOG0() |
|
353 |
asm("mov ax, [ecx] "); |
|
354 |
asm("inc ah "); |
|
355 |
asm("xor al, ah "); |
|
356 |
asm("and eax, 0xff "); |
|
357 |
asm("jne %a0" : : "i" (&spin_lock_flash_only)); |
|
358 |
THISCALL_EPILOG0() |
|
359 |
} |
|
360 |
||
361 |
__NAKED__ EXPORT_C TInt TSpinLock::LockIrqSave() |
|
362 |
{ |
|
363 |
THISCALL_PROLOG0() |
|
364 |
asm("pushfd "); |
|
365 |
asm("cli "); |
|
366 |
SPIN_LOCK_ENTRY_CHECK() |
|
367 |
asm("mov al, 1 "); |
|
368 |
asm("lock xadd [ecx], al "); /* al = in++ */ |
|
369 |
asm("sl_lockirqs_loop: "); |
|
370 |
asm("cmp al, [ecx+1] "); /* compare al to out */ |
|
371 |
asm("jnz short sl_lockirqs_loop2 "); |
|
372 |
SPIN_LOCK_MARK_ACQ() |
|
373 |
asm("lock add dword ptr [esp], 0 "); /* make sure subsequent accesses don't happen until lock acquired */ |
|
374 |
asm("pop eax "); /* retrieve saved EFLAGS */ |
|
375 |
asm("and eax, 0x200 "); /* return just interrupt mask bit */ |
|
376 |
THISCALL_EPILOG0() |
|
377 |
||
378 |
asm("sl_lockirqs_loop2: "); |
|
379 |
X86_PAUSE |
|
380 |
asm("jmp short sl_lockirqs_loop "); |
|
381 |
} |
|
382 |
||
383 |
__NAKED__ EXPORT_C void TSpinLock::UnlockIrqRestore(TInt) |
|
384 |
{ |
|
385 |
THISCALL_PROLOG1() |
|
386 |
SPIN_UNLOCK_ENTRY_CHECK() |
|
387 |
asm("lock inc byte ptr [ecx+1] "); /* ++out */ |
|
388 |
asm("test dword ptr [esp+4], 0x200 "); |
|
389 |
asm("jz short sl_unlockirqr_1 "); |
|
390 |
asm("sti "); |
|
391 |
asm("sl_unlockirqr_1: "); |
|
392 |
THISCALL_EPILOG1() |
|
393 |
} |
|
394 |
||
395 |
__NAKED__ EXPORT_C TBool TSpinLock::FlashIrqRestore(TInt) |
|
396 |
{ |
|
397 |
/* don't mess with stacked args, yet */ |
|
398 |
THISCALL_PROLOG0() |
|
399 |
asm("mov ax, [ecx] "); |
|
400 |
asm("inc ah "); |
|
401 |
asm("xor al, ah "); |
|
402 |
asm("and eax, 0xff "); |
|
403 |
asm("jne short sl_flashirqr_1 "); |
|
404 |
||
405 |
/* now we can remove stacked arg since we don't need it */ |
|
406 |
THISCALL_EPILOG1() |
|
407 |
||
408 |
asm("sl_flashirqr_1: "); |
|
409 |
THISCALL_PROLOG1() |
|
410 |
asm("test dword ptr [esp+4], 0x200 "); |
|
411 |
asm("jnz short sl_flashirqr_2 "); |
|
412 |
asm("call %a0" : : "i" (&spin_lock_flash_only)); |
|
413 |
asm("jmp short sl_flashirqr_3 "); |
|
414 |
asm("sl_flashirqr_2: "); |
|
415 |
asm("call %a0" : : "i" (&spin_lock_flash_irq)); |
|
416 |
asm("sl_flashirqr_3: "); |
|
417 |
THISCALL_EPILOG1() |
|
418 |
} |
|
419 |
||
420 |
extern "C" TBool __fastcall spin_lock_flash_preempt(TSpinLock* a) |
|
421 |
{ |
|
422 |
a->UnlockOnly(); |
|
423 |
NKern::PreemptionPoint(); |
|
424 |
a->LockOnly(); |
|
425 |
return TRUE; |
|
426 |
} |
|
427 |
||
428 |
__NAKED__ EXPORT_C TBool TSpinLock::FlashPreempt() |
|
429 |
{ |
|
430 |
THISCALL_PROLOG0() |
|
431 |
asm("mov ax, [ecx] "); |
|
432 |
asm("inc ah "); |
|
433 |
asm("xor al, ah "); |
|
434 |
asm("and eax, 0xff "); |
|
435 |
asm("jne %a0" : : "i" (&spin_lock_flash_preempt)); |
|
436 |
THISCALL_EPILOG0() |
|
437 |
} |
|
438 |
||
439 |
||
440 |
/****************************************************************************** |
|
441 |
* Read/Write Spin lock |
|
442 |
* |
|
443 |
* Structure ( (in.r,in.w) , (out.r,out.w) ) |
|
444 |
* Fundamental algorithm: |
|
445 |
* lockr() { old_in = (in.r++,in.w); while(out.w!=old_in.w) __chill(); } |
|
446 |
* unlockr() { ++out.r; } |
|
447 |
* lockw() { old_in = (in.r,in.w++); while(out!=old_in) __chill(); } |
|
448 |
* unlockw() { ++out.w; } |
|
449 |
* |
|
450 |
* [this+0] in.w |
|
451 |
* [this+1] in.r |
|
452 |
* [this+2] out.w |
|
453 |
* [this+3] out.r |
|
454 |
* [this+4] Bit mask of CPUs which hold read locks |
|
455 |
* [this+6] order value |
|
456 |
* [this+7] CPU number which holds write lock, 0xFF if none |
|
457 |
* |
|
458 |
******************************************************************************/ |
|
459 |
||
460 |
#if defined(__INCLUDE_SPIN_LOCK_CHECKS__) |
|
461 |
extern "C" __NAKED__ void rwspin_rlock_entry_check() |
|
462 |
{ |
|
463 |
/* ecx points to lock */ |
|
464 |
asm("push eax "); |
|
465 |
asm("push ecx "); |
|
466 |
asm("push edx "); |
|
467 |
asm("pushfd "); |
|
468 |
asm("cli "); |
|
469 |
asm("mov edx, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID)); |
|
470 |
asm("shr edx, 24"); |
|
471 |
asm("mov edx, [edx*4+%0]" : : "i" (&SubSchedulerLookupTable)); |
|
472 |
asm("cmp edx, 0 "); /* Skip checks if subschedulers not yet initialised */ |
|
473 |
asm("je short rwrlec_ok "); |
|
474 |
asm("test edx, 3 "); /* Skip checks if subscheduler for this CPU not yet initialised */ |
|
475 |
asm("jnz short rwrlec_ok "); |
|
476 |
asm("movzx ecx, word ptr [ecx+6] "); /* CL = order, CH = holding CPU for write lock */ |
|
477 |
asm("cmp cl, 0x20 "); |
|
478 |
asm("jae short rwrlec_preemption "); /* This lock requires preemption to be disabled */ |
|
479 |
||
480 |
/* check interrupts disabled */ |
|
481 |
asm("test dword ptr [esp], 0x200 "); /* Interrupts enabled? */ |
|
482 |
asm("jz short rwrlec_1 "); /* No - OK */ |
|
483 |
asm("int 0xff "); /* Yes - die */ |
|
484 |
||
485 |
asm("rwrlec_preemption: "); |
|
486 |
asm("cmp cl, 0xff "); |
|
487 |
asm("je short rwrlec_1 "); /* EOrderNone - don't check interrupts or preemption */ |
|
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
488 |
asm("cmp dword ptr [edx+%0], 0" : : "i"_FOFF(TSubScheduler, iSSX.iIrqNestCount)); |
0 | 489 |
asm("jge short rwrlec_preemption_die "); /* If called from ISR, die */ |
490 |
asm("cmp dword ptr [edx+%0], 0" : : "i" _FOFF(TSubScheduler, iKernLockCount)); |
|
491 |
asm("jnz short rwrlec_1 "); /* Preemption disabled - OK */ |
|
492 |
asm("rwrlec_preemption_die: "); |
|
493 |
asm("int 0xff "); /* Preemption enabled - die */ |
|
494 |
||
495 |
asm("rwrlec_1: "); |
|
496 |
asm("lea eax, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuNum)); |
|
497 |
asm("cmp ch, [eax] "); |
|
498 |
asm("jnz short rwrlec_2 "); /* Not already held by this CPU for write - OK */ |
|
499 |
asm("int 0xff "); /* Already held by this CPU for write - die */ |
|
500 |
||
501 |
asm("rwrlec_2: "); |
|
502 |
asm("mov eax, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuMask)); |
|
503 |
asm("test al, [ecx+4] "); /* Test if already held by this CPU for read */ |
|
504 |
asm("jz short rwrlec_3 "); |
|
505 |
asm("int 0xff "); /* if so, die */ |
|
506 |
||
507 |
asm("rwrlec_3: "); |
|
508 |
asm("lea edx, [edx+%0]" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck)); |
|
509 |
asm("bsf eax, [edx] "); /* find LSB of low dword */ |
|
510 |
asm("jnz short rwrlec_3 "); /* skip if low dword nonzero */ |
|
511 |
asm("bsf eax, [edx+4] "); /* else find LSB of high dword */ |
|
512 |
asm("lea eax, [eax+32] "); /* add 32 to eax without changing flags */ |
|
513 |
asm("jnz short rwrlec_4 "); /* skip if high dword nonzero */ |
|
514 |
asm("mov eax, 0x7f "); /* else set EAX = 0x7F */ |
|
515 |
||
516 |
asm("rwrlec_4: "); |
|
517 |
asm("cmp cl, al "); /* check order of this lock against lowest currently held order */ |
|
518 |
asm("jl short rwrlec_ok "); /* if this lock has lower order, OK - signed comparison so EOrderNone always works */ |
|
519 |
asm("int 0xff "); /* ordering violation - die */ |
|
520 |
||
521 |
asm("rwrlec_ok: "); |
|
522 |
asm("popfd "); |
|
523 |
asm("pop edx "); |
|
524 |
asm("pop ecx "); |
|
525 |
asm("pop eax "); |
|
526 |
asm("ret "); |
|
527 |
} |
|
528 |
||
529 |
extern "C" __NAKED__ void rwspin_rlock_mark_acq() |
|
530 |
{ |
|
531 |
/* ecx points to lock */ |
|
532 |
asm("push eax "); |
|
533 |
asm("push ecx "); |
|
534 |
asm("push edx "); |
|
535 |
asm("pushfd "); |
|
536 |
asm("cli "); |
|
537 |
asm("mov edx, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID)); |
|
538 |
asm("shr edx, 24"); |
|
539 |
asm("mov edx, [edx*4+%0]" : : "i"(&SubSchedulerLookupTable)); |
|
540 |
asm("cmp edx, 0 "); /* Skip checks if subschedulers not yet initialised */ |
|
541 |
asm("je short rwrlma_ok "); |
|
542 |
asm("test edx, 3 "); /* Skip checks if subscheduler for this CPU not yet initialised */ |
|
543 |
asm("jnz short rwrlma_ok "); |
|
544 |
asm("mov eax, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuMask)); |
|
545 |
asm("lock or [ecx+4], al "); /* set bit in byte 4 corresponding to this CPU */ |
|
546 |
asm("movzx ecx, byte ptr [ecx+6] "); /* CL = order */ |
|
547 |
asm("cmp ecx, 0x40 "); |
|
548 |
asm("jae short rwrlma_ok "); /* if EOrderNone, done */ |
|
549 |
asm("bts [edx+%0], ecx" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck)); |
|
550 |
||
551 |
asm("rwrlma_ok: "); |
|
552 |
asm("popfd "); |
|
553 |
asm("pop edx "); |
|
554 |
asm("pop ecx "); |
|
555 |
asm("pop eax "); |
|
556 |
asm("ret "); |
|
557 |
} |
|
558 |
||
559 |
extern "C" __NAKED__ void rwspin_runlock_entry_check() |
|
560 |
{ |
|
561 |
/* ecx points to lock */ |
|
562 |
asm("push eax "); |
|
563 |
asm("push ebx "); |
|
564 |
asm("push ecx "); |
|
565 |
asm("push edx "); |
|
566 |
asm("pushfd "); |
|
567 |
asm("cli "); |
|
568 |
asm("mov edx, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID)); |
|
569 |
asm("shr edx, 24"); |
|
570 |
asm("mov edx, [edx*4+%0]" : : "i"(&SubSchedulerLookupTable)); |
|
571 |
asm("cmp edx, 0 "); /* Skip checks if subschedulers not yet initialised */ |
|
572 |
asm("je short rwruec_ok "); |
|
573 |
asm("test edx, 3 "); /* Skip checks if subscheduler for this CPU not yet initialised */ |
|
574 |
asm("jnz short rwruec_ok "); |
|
575 |
asm("mov eax, [ecx+4] "); /* AL = R-mask, EAX byte 2 = order */ |
|
576 |
asm("and eax, 0x00ffffff "); /* mask out W CPU */ |
|
577 |
asm("cmp eax, 0x00200000 "); |
|
578 |
asm("jae short rwruec_preemption "); /* This lock requires preemption to be disabled */ |
|
579 |
||
580 |
/* check interrupts disabled */ |
|
581 |
asm("test dword ptr [esp], 0x200 "); /* Interrupts enabled? */ |
|
582 |
asm("jz short rwruec_1 "); /* No - OK */ |
|
583 |
asm("int 0xff "); /* Yes - die */ |
|
584 |
||
585 |
asm("rwruec_preemption: "); |
|
586 |
asm("cmp eax, 0x00ff0000 "); |
|
587 |
asm("jae short rwruec_1 "); /* EOrderNone - don't check interrupts or preemption */ |
|
588 |
asm("cmp dword ptr [edx+%0], 0" : : "i" _FOFF(TSubScheduler, iKernLockCount)); |
|
589 |
asm("jnz short rwruec_1 "); /* Preemption disabled - OK */ |
|
590 |
asm("int 0xff "); /* Preemption enabled - die */ |
|
591 |
||
592 |
asm("rwruec_1: "); |
|
593 |
asm("mov ebx, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuMask)); |
|
594 |
asm("test al, bl "); /* Check if current CPU holds read lock */ |
|
595 |
asm("jnz short rwruec_2 "); /* Already held by this CPU - OK */ |
|
596 |
asm("int 0xff "); /* We don't hold lock - die */ |
|
597 |
||
598 |
asm("rwruec_2: "); |
|
599 |
asm("not bl "); |
|
600 |
asm("lock and [ecx+4], bl "); /* clear bit in R-holding CPU mask */ |
|
601 |
asm("shr eax, 16 "); /* EAX = lock order */ |
|
602 |
asm("cmp eax, 0x40 "); |
|
603 |
asm("jae short rwruec_ok "); /* if EOrderNone, done */ |
|
604 |
asm("btr [edx+%0], eax" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck)); |
|
605 |
asm("jc short rwruec_ok "); /* bit should have been set originally */ |
|
606 |
asm("int 0xff "); /* if not, die - something must have got corrupted */ |
|
607 |
||
608 |
asm("rwruec_ok: "); |
|
609 |
asm("popfd "); |
|
610 |
asm("pop edx "); |
|
611 |
asm("pop ecx "); |
|
612 |
asm("pop ebx "); |
|
613 |
asm("pop eax "); |
|
614 |
asm("ret "); |
|
615 |
} |
|
616 |
||
617 |
||
618 |
extern "C" __NAKED__ void rwspin_wlock_entry_check() |
|
619 |
{ |
|
620 |
/* ecx points to lock */ |
|
621 |
asm("push eax "); |
|
622 |
asm("push ecx "); |
|
623 |
asm("push edx "); |
|
624 |
asm("pushfd "); |
|
625 |
asm("cli "); |
|
626 |
asm("mov edx, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID)); |
|
627 |
asm("shr edx, 24"); |
|
628 |
asm("mov edx, [edx*4+%0]" : : "i" (&SubSchedulerLookupTable)); |
|
629 |
asm("cmp edx, 0 "); /* Skip checks if subschedulers not yet initialised */ |
|
630 |
asm("je short rwwlec_ok "); |
|
631 |
asm("test edx, 3 "); /* Skip checks if subscheduler for this CPU not yet initialised */ |
|
632 |
asm("jnz short rwwlec_ok "); |
|
633 |
asm("movzx ecx, word ptr [ecx+6] "); /* CL = order, CH = write lock holding CPU */ |
|
634 |
asm("cmp cl, 0x20 "); |
|
635 |
asm("jae short rwwlec_preemption "); /* This lock requires preemption to be disabled */ |
|
636 |
||
637 |
/* check interrupts disabled */ |
|
638 |
asm("test dword ptr [esp], 0x200 "); /* Interrupts enabled? */ |
|
639 |
asm("jz short rwwlec_1 "); /* No - OK */ |
|
640 |
asm("int 0xff "); /* Yes - die */ |
|
641 |
||
642 |
asm("rwwlec_preemption: "); |
|
643 |
asm("cmp cl, 0xff "); |
|
644 |
asm("je short rwwlec_1 "); /* EOrderNone - don't check interrupts or preemption */ |
|
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
645 |
asm("cmp dword ptr [edx+%0], 0" : : "i"_FOFF(TSubScheduler, iSSX.iIrqNestCount)); |
0 | 646 |
asm("jge short rwwlec_preemption_die "); /* If called from ISR, die */ |
647 |
asm("cmp dword ptr [edx+%0], 0" : : "i" _FOFF(TSubScheduler, iKernLockCount)); |
|
648 |
asm("jnz short rwwlec_1 "); /* Preemption disabled - OK */ |
|
649 |
asm("rwwlec_preemption_die: "); |
|
650 |
asm("int 0xff "); /* Preemption enabled - die */ |
|
651 |
||
652 |
asm("rwwlec_1: "); |
|
653 |
asm("lea eax, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuNum)); |
|
654 |
asm("cmp ch, [eax] "); |
|
655 |
asm("jnz short rwwlec_2 "); /* Not already held by this CPU for write - OK */ |
|
656 |
asm("int 0xff "); /* Already held by this CPU for write - die */ |
|
657 |
||
658 |
asm("rwwlec_2: "); |
|
659 |
asm("mov eax, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuMask)); |
|
660 |
asm("test al, [ecx+4] "); /* Test if already held by this CPU for read */ |
|
661 |
asm("jz short rwwlec_3 "); |
|
662 |
asm("int 0xff "); /* if so, die */ |
|
663 |
||
664 |
asm("rwwlec_3: "); |
|
665 |
asm("lea edx, [edx+%0]" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck)); |
|
666 |
asm("bsf eax, [edx] "); /* find LSB of low dword */ |
|
667 |
asm("jnz short rwwlec_4 "); /* skip if low dword nonzero */ |
|
668 |
asm("bsf eax, [edx+4] "); /* else find LSB of high dword */ |
|
669 |
asm("lea eax, [eax+32] "); /* add 32 to eax without changing flags */ |
|
670 |
asm("jnz short rwwlec_4 "); /* skip if high dword nonzero */ |
|
671 |
asm("mov eax, 0x7f "); /* else set EAX = 0x7F */ |
|
672 |
||
673 |
asm("rwwlec_4: "); |
|
674 |
asm("cmp cl, al "); /* check order of this lock against lowest currently held order */ |
|
675 |
asm("jl short rwwlec_ok "); /* if this lock has lower order, OK - signed comparison so EOrderNone always works */ |
|
676 |
asm("int 0xff "); /* ordering violation - die */ |
|
677 |
||
678 |
asm("rwwlec_ok: "); |
|
679 |
asm("popfd "); |
|
680 |
asm("pop edx "); |
|
681 |
asm("pop ecx "); |
|
682 |
asm("pop eax "); |
|
683 |
asm("ret "); |
|
684 |
} |
|
685 |
||
686 |
extern "C" __NAKED__ void rwspin_wlock_mark_acq() |
|
687 |
{ |
|
688 |
/* ecx points to lock */ |
|
689 |
asm("push eax "); |
|
690 |
asm("push ecx "); |
|
691 |
asm("push edx "); |
|
692 |
asm("pushfd "); |
|
693 |
asm("cli "); |
|
694 |
asm("mov edx, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID)); |
|
695 |
asm("shr edx, 24"); |
|
696 |
asm("mov edx, [edx*4+%0]" : : "i"(&SubSchedulerLookupTable)); |
|
697 |
asm("cmp edx, 0 "); /* Skip checks if subschedulers not yet initialised */ |
|
698 |
asm("je short rwwlma_ok "); |
|
699 |
asm("test edx, 3 "); /* Skip checks if subscheduler for this CPU not yet initialised */ |
|
700 |
asm("jnz short rwwlma_ok "); |
|
701 |
asm("mov eax, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuNum)); |
|
702 |
asm("mov [ecx+7], al "); /* set byte 7 to holding CPU number */ |
|
703 |
asm("movzx ecx, byte ptr [ecx+6] "); /* CL = order */ |
|
704 |
asm("cmp ecx, 0x40 "); |
|
705 |
asm("jae short rwwlma_ok "); /* if EOrderNone, done */ |
|
706 |
asm("bts [edx+%0], ecx" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck)); |
|
707 |
||
708 |
asm("rwwlma_ok: "); |
|
709 |
asm("popfd "); |
|
710 |
asm("pop edx "); |
|
711 |
asm("pop ecx "); |
|
712 |
asm("pop eax "); |
|
713 |
asm("ret "); |
|
714 |
} |
|
715 |
||
716 |
extern "C" __NAKED__ void rwspin_wunlock_entry_check() |
|
717 |
{ |
|
718 |
/* ecx points to lock */ |
|
719 |
asm("push eax "); |
|
720 |
asm("push ecx "); |
|
721 |
asm("push edx "); |
|
722 |
asm("pushfd "); |
|
723 |
asm("cli "); |
|
724 |
asm("mov edx, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID)); |
|
725 |
asm("shr edx, 24"); |
|
726 |
asm("mov edx, [edx*4+%0]" : : "i"(&SubSchedulerLookupTable)); |
|
727 |
asm("cmp edx, 0 "); /* Skip checks if subschedulers not yet initialised */ |
|
728 |
asm("je short rwwuec_ok "); |
|
729 |
asm("test edx, 3 "); /* Skip checks if subscheduler for this CPU not yet initialised */ |
|
730 |
asm("jnz short rwwuec_ok "); |
|
731 |
asm("mov eax, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuNum)); /* eax = current CPU number */ |
|
732 |
asm("shl eax, 8 "); /* AL = 0, AH = current CPU number */ |
|
733 |
asm("xor ax, [ecx+6] "); /* AL = order, AH = holding CPU ^ current CPU number */ |
|
734 |
asm("cmp al, 0x20 "); |
|
735 |
asm("jae short rwwuec_preemption "); /* This lock requires preemption to be disabled */ |
|
736 |
||
737 |
/* check interrupts disabled */ |
|
738 |
asm("test dword ptr [esp], 0x200 "); /* Interrupts enabled? */ |
|
739 |
asm("jz short rwwuec_1 "); /* No - OK */ |
|
740 |
asm("int 0xff "); /* Yes - die */ |
|
741 |
||
742 |
asm("rwwuec_preemption: "); |
|
743 |
asm("cmp al, 0xff "); |
|
744 |
asm("je short rwwuec_1 "); /* EOrderNone - don't check interrupts or preemption */ |
|
745 |
asm("cmp dword ptr [edx+%0], 0" : : "i" _FOFF(TSubScheduler, iKernLockCount)); |
|
746 |
asm("jnz short rwwuec_1 "); /* Preemption disabled - OK */ |
|
747 |
asm("int 0xff "); /* Preemption enabled - die */ |
|
748 |
||
749 |
asm("rwwuec_1: "); |
|
750 |
asm("cmp ah, 0 "); /* Check if holding CPU ^ current CPU number == 0 */ |
|
751 |
asm("jz short rwwuec_2 "); /* Already held by this CPU - OK */ |
|
752 |
asm("int 0xff "); /* We don't hold lock - die */ |
|
753 |
||
754 |
asm("rwwuec_2: "); |
|
755 |
asm("mov byte ptr [ecx+7], 0xff "); /* reset holding CPU */ |
|
756 |
asm("cmp eax, 0x40 "); /* EAX = lock order */ |
|
757 |
asm("jae short rwwuec_ok "); /* if EOrderNone, done */ |
|
758 |
asm("btr [edx+%0], eax" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck)); |
|
759 |
asm("jc short rwwuec_ok "); /* bit should have been set originally */ |
|
760 |
asm("int 0xff "); /* if not, die - something must have got corrupted */ |
|
761 |
||
762 |
asm("rwwuec_ok: "); |
|
763 |
asm("popfd "); |
|
764 |
asm("pop edx "); |
|
765 |
asm("pop ecx "); |
|
766 |
asm("pop eax "); |
|
767 |
asm("ret "); |
|
768 |
} |
|
769 |
#endif |
|
770 |
||
771 |
||
772 |
/*----------------------------------------------------------------------------- |
|
773 |
- Read locks disabling IRQ |
|
774 |
-----------------------------------------------------------------------------*/ |
|
775 |
__NAKED__ EXPORT_C void TRWSpinLock::LockIrqR() |
|
776 |
{ |
|
777 |
THISCALL_PROLOG0() |
|
778 |
asm("cli "); |
|
779 |
RWSPIN_RLOCK_ENTRY_CHECK() |
|
780 |
asm("mov ax, 0x100 "); |
|
781 |
asm("lock xadd [ecx], ax "); /* ah = in.r++, al = in.w */ |
|
782 |
asm("rwl_rlockirq_loop: "); |
|
783 |
asm("cmp al, [ecx+2] "); /* compare al to out.w */ |
|
784 |
asm("jnz short rwl_rlockirq_loop2 "); |
|
785 |
RWSPIN_RLOCK_MARK_ACQ() |
|
786 |
asm("lock add dword ptr [esp], 0 "); /* make sure subsequent accesses don't happen until lock acquired */ |
|
787 |
THISCALL_EPILOG0() |
|
788 |
||
789 |
asm("rwl_rlockirq_loop2: "); |
|
790 |
X86_PAUSE |
|
791 |
asm("jmp short rwl_rlockirq_loop "); |
|
792 |
} |
|
793 |
||
794 |
__NAKED__ EXPORT_C void TRWSpinLock::UnlockIrqR() |
|
795 |
{ |
|
796 |
THISCALL_PROLOG0() |
|
797 |
RWSPIN_RUNLOCK_ENTRY_CHECK() |
|
798 |
asm("lock add word ptr [ecx+2], 0x100 "); /* ++out.r */ |
|
799 |
asm("sti "); |
|
800 |
THISCALL_EPILOG0() |
|
801 |
} |
|
802 |
||
803 |
extern "C" TBool __fastcall rwspin_rlock_flash_irq(TRWSpinLock* a) |
|
804 |
{ |
|
805 |
a->UnlockIrqR(); |
|
806 |
a->LockIrqR(); |
|
807 |
return TRUE; |
|
808 |
} |
|
809 |
||
810 |
__NAKED__ EXPORT_C TBool TRWSpinLock::FlashIrqR() |
|
811 |
{ |
|
812 |
THISCALL_PROLOG0() |
|
813 |
asm("mov eax, [ecx] "); /* al=in.w, ah=in.r, byte2=out.w, byte3=out.r */ |
|
814 |
asm("mov edx, eax "); |
|
815 |
asm("shr edx, 16 "); /* dl=out.w */ |
|
816 |
asm("xor eax, edx "); /* al = in.w ^ out.w = 0 if no writers waiting */ |
|
817 |
asm("and eax, 0xff "); |
|
818 |
asm("jne %a0" : : "i" (&rwspin_rlock_flash_irq)); |
|
819 |
THISCALL_EPILOG0() |
|
820 |
} |
|
821 |
||
822 |
||
823 |
/*----------------------------------------------------------------------------- |
|
824 |
- Write locks disabling IRQ |
|
825 |
-----------------------------------------------------------------------------*/ |
|
826 |
__NAKED__ EXPORT_C void TRWSpinLock::LockIrqW() |
|
827 |
{ |
|
828 |
THISCALL_PROLOG0() |
|
829 |
asm("cli "); |
|
830 |
RWSPIN_WLOCK_ENTRY_CHECK() |
|
831 |
asm("mov ax, [ecx] "); /* ah = in.r, al = in.w */ |
|
832 |
asm("rwl_wlockirq_loop3: "); |
|
833 |
asm("mov edx, eax "); |
|
834 |
asm("inc dl "); /* dh = in.r, dl = in.w+1 */ |
|
835 |
asm("lock cmpxchg [ecx], dx "); /* attempt to update in.w */ |
|
836 |
asm("jne short rwl_wlockirq_loop3 "); /* loop if failed */ |
|
837 |
asm("rwl_wlockirq_loop: "); |
|
838 |
asm("cmp ax, [ecx+2] "); /* compare ax to (out.w,out.r) */ |
|
839 |
asm("jnz short rwl_wlockirq_loop2 "); |
|
840 |
RWSPIN_WLOCK_MARK_ACQ() |
|
841 |
asm("lock add dword ptr [esp], 0 "); /* make sure subsequent accesses don't happen until lock acquired */ |
|
842 |
THISCALL_EPILOG0() |
|
843 |
||
844 |
asm("rwl_wlockirq_loop2: "); |
|
845 |
X86_PAUSE |
|
846 |
asm("jmp short rwl_wlockirq_loop "); |
|
847 |
} |
|
848 |
||
849 |
__NAKED__ EXPORT_C void TRWSpinLock::UnlockIrqW() |
|
850 |
{ |
|
851 |
THISCALL_PROLOG0() |
|
852 |
RWSPIN_WUNLOCK_ENTRY_CHECK() |
|
853 |
asm("mov ax, [ecx+2] "); /* ah = out.r, al = out.w */ |
|
854 |
asm("rwl_wunlockirq_loop: "); |
|
855 |
asm("mov edx, eax "); |
|
856 |
asm("inc dl "); /* dh = out.r, dl = out.w+1 */ |
|
857 |
asm("lock cmpxchg [ecx+2], dx "); /* attempt to update out.w */ |
|
858 |
asm("jne short rwl_wunlockirq_loop "); /* loop if failed */ |
|
859 |
asm("sti "); |
|
860 |
THISCALL_EPILOG0() |
|
861 |
} |
|
862 |
||
863 |
extern "C" TBool __fastcall rwspin_wlock_flash_irq(TRWSpinLock* a) |
|
864 |
{ |
|
865 |
a->UnlockIrqW(); |
|
866 |
a->LockIrqW(); |
|
867 |
return TRUE; |
|
868 |
} |
|
869 |
||
870 |
__NAKED__ EXPORT_C TBool TRWSpinLock::FlashIrqW() |
|
871 |
{ |
|
872 |
THISCALL_PROLOG0() |
|
873 |
asm("mov eax, [ecx] "); /* al=in.w, ah=in.r, byte2=out.w, byte3=out.r */ |
|
874 |
asm("mov edx, eax "); |
|
875 |
asm("shr edx, 16 "); /* dl=out.w, dh=out.r */ |
|
876 |
asm("inc dl "); /* dx==ax now means no-one else is waiting for lock */ |
|
877 |
asm("xor eax, edx "); |
|
878 |
asm("and eax, 0xffff "); |
|
879 |
asm("jne %a0" : : "i" (&rwspin_wlock_flash_irq)); |
|
880 |
THISCALL_EPILOG0() |
|
881 |
} |
|
882 |
||
883 |
||
884 |
||
885 |
/*----------------------------------------------------------------------------- |
|
886 |
- Read locks leaving IRQ alone |
|
887 |
-----------------------------------------------------------------------------*/ |
|
888 |
__NAKED__ EXPORT_C void TRWSpinLock::LockOnlyR() |
|
889 |
{ |
|
890 |
THISCALL_PROLOG0() |
|
891 |
RWSPIN_RLOCK_ENTRY_CHECK() |
|
892 |
asm("mov ax, 0x100 "); |
|
893 |
asm("lock xadd [ecx], ax "); /* ah = in.r++, al = in.w */ |
|
894 |
asm("rwl_rlockonly_loop: "); |
|
895 |
asm("cmp al, [ecx+2] "); /* compare al to out.w */ |
|
896 |
asm("jnz short rwl_rlockonly_loop2 "); |
|
897 |
RWSPIN_RLOCK_MARK_ACQ() |
|
898 |
asm("lock add dword ptr [esp], 0 "); /* make sure subsequent accesses don't happen until lock acquired */ |
|
899 |
THISCALL_EPILOG0() |
|
900 |
||
901 |
asm("rwl_rlockonly_loop2: "); |
|
902 |
X86_PAUSE |
|
903 |
asm("jmp short rwl_rlockonly_loop "); |
|
904 |
} |
|
905 |
||
906 |
__NAKED__ EXPORT_C void TRWSpinLock::UnlockOnlyR() |
|
907 |
{ |
|
908 |
THISCALL_PROLOG0() |
|
909 |
RWSPIN_RUNLOCK_ENTRY_CHECK() |
|
910 |
asm("lock add word ptr [ecx+2], 0x100 "); /* ++out.r */ |
|
911 |
THISCALL_EPILOG0() |
|
912 |
} |
|
913 |
||
914 |
extern "C" TBool __fastcall rwspin_rlock_flash_only(TRWSpinLock* a) |
|
915 |
{ |
|
916 |
a->UnlockOnlyR(); |
|
917 |
a->LockOnlyR(); |
|
918 |
return TRUE; |
|
919 |
} |
|
920 |
||
921 |
__NAKED__ EXPORT_C TBool TRWSpinLock::FlashOnlyR() |
|
922 |
{ |
|
923 |
THISCALL_PROLOG0() |
|
924 |
asm("mov eax, [ecx] "); /* al=in.w, ah=in.r, byte2=out.w, byte3=out.r */ |
|
925 |
asm("mov edx, eax "); |
|
926 |
asm("shr edx, 16 "); /* dl=out.w */ |
|
927 |
asm("xor eax, edx "); /* al = in.w ^ out.w = 0 if no writers waiting */ |
|
928 |
asm("and eax, 0xff "); |
|
929 |
asm("jne %a0" : : "i" (&rwspin_rlock_flash_only)); |
|
930 |
THISCALL_EPILOG0() |
|
931 |
} |
|
932 |
||
933 |
||
934 |
/*----------------------------------------------------------------------------- |
|
935 |
- Write locks leaving IRQ alone |
|
936 |
-----------------------------------------------------------------------------*/ |
|
937 |
__NAKED__ EXPORT_C void TRWSpinLock::LockOnlyW() |
|
938 |
{ |
|
939 |
THISCALL_PROLOG0() |
|
940 |
RWSPIN_WLOCK_ENTRY_CHECK() |
|
941 |
asm("mov ax, [ecx] "); /* ah = in.r, al = in.w */ |
|
942 |
asm("rwl_wlockonly_loop3: "); |
|
943 |
asm("mov edx, eax "); |
|
944 |
asm("inc dl "); /* dh = in.r, dl = in.w+1 */ |
|
945 |
asm("lock cmpxchg [ecx], dx "); /* attempt to update in.w */ |
|
946 |
asm("jne short rwl_wlockonly_loop3 "); /* loop if failed */ |
|
947 |
asm("rwl_wlockonly_loop: "); |
|
948 |
asm("cmp ax, [ecx+2] "); /* compare ax to (out.w,out.r) */ |
|
949 |
asm("jnz short rwl_wlockonly_loop2 "); |
|
950 |
RWSPIN_WLOCK_MARK_ACQ() |
|
951 |
asm("lock add dword ptr [esp], 0 "); /* make sure subsequent accesses don't happen until lock acquired */ |
|
952 |
THISCALL_EPILOG0() |
|
953 |
||
954 |
asm("rwl_wlockonly_loop2: "); |
|
955 |
X86_PAUSE |
|
956 |
asm("jmp short rwl_wlockonly_loop "); |
|
957 |
} |
|
958 |
||
959 |
__NAKED__ EXPORT_C void TRWSpinLock::UnlockOnlyW() |
|
960 |
{ |
|
961 |
THISCALL_PROLOG0() |
|
962 |
RWSPIN_WUNLOCK_ENTRY_CHECK() |
|
963 |
asm("mov ax, [ecx+2] "); /* ah = out.r, al = out.w */ |
|
964 |
asm("rwl_wunlockonly_loop: "); |
|
965 |
asm("mov edx, eax "); |
|
966 |
asm("inc dl "); /* dh = out.r, dl = out.w+1 */ |
|
967 |
asm("lock cmpxchg [ecx+2], dx "); /* attempt to update out.w */ |
|
968 |
asm("jne short rwl_wunlockonly_loop "); /* loop if failed */ |
|
969 |
THISCALL_EPILOG0() |
|
970 |
} |
|
971 |
||
972 |
extern "C" TBool __fastcall rwspin_wlock_flash_only(TRWSpinLock* a) |
|
973 |
{ |
|
974 |
a->UnlockOnlyW(); |
|
975 |
a->LockOnlyW(); |
|
976 |
return TRUE; |
|
977 |
} |
|
978 |
||
979 |
__NAKED__ EXPORT_C TBool TRWSpinLock::FlashOnlyW() |
|
980 |
{ |
|
981 |
THISCALL_PROLOG0() |
|
982 |
asm("mov eax, [ecx] "); /* al=in.w, ah=in.r, byte2=out.w, byte3=out.r */ |
|
983 |
asm("mov edx, eax "); |
|
984 |
asm("shr edx, 16 "); /* dl=out.w, dh=out.r */ |
|
985 |
asm("inc dl "); /* dx==ax now means no-one else is waiting for lock */ |
|
986 |
asm("xor eax, edx "); |
|
987 |
asm("and eax, 0xffff "); |
|
988 |
asm("jne %a0" : : "i" (&rwspin_wlock_flash_only)); |
|
989 |
THISCALL_EPILOG0() |
|
990 |
} |
|
991 |
||
992 |
||
993 |
||
994 |
/*----------------------------------------------------------------------------- |
|
995 |
- Read locks disabling IRQ with save/restore IRQ state |
|
996 |
-----------------------------------------------------------------------------*/ |
|
997 |
__NAKED__ EXPORT_C TInt TRWSpinLock::LockIrqSaveR() |
|
998 |
{ |
|
999 |
THISCALL_PROLOG0() |
|
1000 |
asm("pushfd "); |
|
1001 |
asm("cli "); |
|
1002 |
RWSPIN_RLOCK_ENTRY_CHECK() |
|
1003 |
asm("mov ax, 0x100 "); |
|
1004 |
asm("lock xadd [ecx], ax "); /* ah = in.r++, al = in.w */ |
|
1005 |
asm("rwl_rlockirqs_loop: "); |
|
1006 |
asm("cmp al, [ecx+2] "); /* compare al to out.w */ |
|
1007 |
asm("jnz short rwl_rlockirqs_loop2 "); |
|
1008 |
RWSPIN_RLOCK_MARK_ACQ() |
|
1009 |
asm("lock add dword ptr [esp], 0 "); /* make sure subsequent accesses don't happen until lock acquired */ |
|
1010 |
asm("pop eax "); /* retrieve saved EFLAGS */ |
|
1011 |
asm("and eax, 0x200 "); /* return just interrupt mask bit */ |
|
1012 |
THISCALL_EPILOG0() |
|
1013 |
||
1014 |
asm("rwl_rlockirqs_loop2: "); |
|
1015 |
X86_PAUSE |
|
1016 |
asm("jmp short rwl_rlockirqs_loop "); |
|
1017 |
} |
|
1018 |
||
1019 |
__NAKED__ EXPORT_C void TRWSpinLock::UnlockIrqRestoreR(TInt) |
|
1020 |
{ |
|
1021 |
THISCALL_PROLOG1() |
|
1022 |
RWSPIN_RUNLOCK_ENTRY_CHECK() |
|
1023 |
asm("lock add word ptr [ecx+2], 0x100 "); /* ++out.r */ |
|
1024 |
asm("test dword ptr [esp+4], 0x200 "); |
|
1025 |
asm("jz short rwl_runlockirqr_1 "); |
|
1026 |
asm("sti "); |
|
1027 |
asm("rwl_runlockirqr_1: "); |
|
1028 |
THISCALL_EPILOG1() |
|
1029 |
} |
|
1030 |
||
1031 |
__NAKED__ EXPORT_C TBool TRWSpinLock::FlashIrqRestoreR(TInt) |
|
1032 |
{ |
|
1033 |
/* don't mess with stacked args, yet */ |
|
1034 |
THISCALL_PROLOG0() |
|
1035 |
asm("mov eax, [ecx] "); /* al=in.w, ah=in.r, byte2=out.w, byte3=out.r */ |
|
1036 |
asm("mov edx, eax "); |
|
1037 |
asm("shr edx, 16 "); /* dl=out.w */ |
|
1038 |
asm("xor eax, edx "); /* al = in.w ^ out.w = 0 if no writers waiting */ |
|
1039 |
asm("and eax, 0xff "); |
|
1040 |
asm("jne short rwl_rflashirqr_1 "); |
|
1041 |
||
1042 |
/* now we can remove stacked arg since we don't need it */ |
|
1043 |
THISCALL_EPILOG1() |
|
1044 |
||
1045 |
asm("rwl_rflashirqr_1: "); |
|
1046 |
THISCALL_PROLOG1() |
|
1047 |
asm("test dword ptr [esp+4], 0x200 "); |
|
1048 |
asm("jnz short rwl_rflashirqr_2 "); |
|
1049 |
asm("call %a0" : : "i" (&rwspin_rlock_flash_only)); |
|
1050 |
asm("jmp short rwl_rflashirqr_3 "); |
|
1051 |
asm("rwl_rflashirqr_2: "); |
|
1052 |
asm("call %a0" : : "i" (&rwspin_rlock_flash_irq)); |
|
1053 |
asm("rwl_rflashirqr_3: "); |
|
1054 |
THISCALL_EPILOG1() |
|
1055 |
} |
|
1056 |
||
1057 |
||
1058 |
/*----------------------------------------------------------------------------- |
|
1059 |
- Write locks disabling IRQ with save/restore IRQ state |
|
1060 |
-----------------------------------------------------------------------------*/ |
|
1061 |
__NAKED__ EXPORT_C TInt TRWSpinLock::LockIrqSaveW() |
|
1062 |
{ |
|
1063 |
THISCALL_PROLOG0() |
|
1064 |
asm("pushfd "); |
|
1065 |
asm("cli "); |
|
1066 |
RWSPIN_WLOCK_ENTRY_CHECK() |
|
1067 |
asm("mov ax, [ecx] "); /* ah = in.r, al = in.w */ |
|
1068 |
asm("rwl_wlockirqs_loop3: "); |
|
1069 |
asm("mov edx, eax "); |
|
1070 |
asm("inc dl "); /* dh = in.r, dl = in.w+1 */ |
|
1071 |
asm("lock cmpxchg [ecx], dx "); /* attempt to update in.w */ |
|
1072 |
asm("jne short rwl_wlockirqs_loop3 "); /* loop if failed */ |
|
1073 |
asm("rwl_wlockirqs_loop: "); |
|
1074 |
asm("cmp ax, [ecx+2] "); /* compare ax to (out.w,out.r) */ |
|
1075 |
asm("jnz short rwl_wlockirqs_loop2 "); |
|
1076 |
RWSPIN_WLOCK_MARK_ACQ() |
|
1077 |
asm("lock add dword ptr [esp], 0 "); /* make sure subsequent accesses don't happen until lock acquired */ |
|
1078 |
asm("pop eax "); /* retrieve saved EFLAGS */ |
|
1079 |
asm("and eax, 0x200 "); /* return just interrupt mask bit */ |
|
1080 |
THISCALL_EPILOG0() |
|
1081 |
||
1082 |
asm("rwl_wlockirqs_loop2: "); |
|
1083 |
X86_PAUSE |
|
1084 |
asm("jmp short rwl_wlockirqs_loop "); |
|
1085 |
} |
|
1086 |
||
1087 |
__NAKED__ EXPORT_C void TRWSpinLock::UnlockIrqRestoreW(TInt) |
|
1088 |
{ |
|
1089 |
THISCALL_PROLOG1() |
|
1090 |
RWSPIN_WUNLOCK_ENTRY_CHECK() |
|
1091 |
asm("mov ax, [ecx+2] "); /* ah = out.r, al = out.w */ |
|
1092 |
asm("rwl_wunlockirqr_loop: "); |
|
1093 |
asm("mov edx, eax "); |
|
1094 |
asm("inc dl "); /* dh = out.r, dl = out.w+1 */ |
|
1095 |
asm("lock cmpxchg [ecx+2], dx "); /* attempt to update out.w */ |
|
1096 |
asm("jne short rwl_wunlockirqr_loop "); /* loop if failed */ |
|
1097 |
asm("test dword ptr [esp+4], 0x200 "); |
|
1098 |
asm("jz short rwl_wunlockirqr_1 "); |
|
1099 |
asm("sti "); |
|
1100 |
asm("rwl_wunlockirqr_1: "); |
|
1101 |
THISCALL_EPILOG1() |
|
1102 |
} |
|
1103 |
||
1104 |
__NAKED__ EXPORT_C TBool TRWSpinLock::FlashIrqRestoreW(TInt) |
|
1105 |
{ |
|
1106 |
/* don't mess with stacked args, yet */ |
|
1107 |
THISCALL_PROLOG0() |
|
1108 |
asm("mov eax, [ecx] "); /* al=in.w, ah=in.r, byte2=out.w, byte3=out.r */ |
|
1109 |
asm("mov edx, eax "); |
|
1110 |
asm("shr edx, 16 "); /* dl=out.w, dh=out.r */ |
|
1111 |
asm("inc dl "); /* dx==ax now means no-one else is waiting for lock */ |
|
1112 |
asm("xor eax, edx "); |
|
1113 |
asm("and eax, 0xffff "); |
|
1114 |
asm("jne short rwl_wflashirqr_1 "); |
|
1115 |
||
1116 |
/* now we can remove stacked arg since we don't need it */ |
|
1117 |
THISCALL_EPILOG1() |
|
1118 |
||
1119 |
asm("rwl_wflashirqr_1: "); |
|
1120 |
THISCALL_PROLOG1() |
|
1121 |
asm("test dword ptr [esp+4], 0x200 "); |
|
1122 |
asm("jnz short rwl_wflashirqr_2 "); |
|
1123 |
asm("call %a0" : : "i" (&rwspin_wlock_flash_only)); |
|
1124 |
asm("jmp short rwl_wflashirqr_3 "); |
|
1125 |
asm("rwl_wflashirqr_2: "); |
|
1126 |
asm("call %a0" : : "i" (&rwspin_wlock_flash_irq)); |
|
1127 |
asm("rwl_wflashirqr_3: "); |
|
1128 |
THISCALL_EPILOG1() |
|
1129 |
} |
|
1130 |
||
1131 |
||
1132 |
/*----------------------------------------------------------------------------- |
|
1133 |
- Read lock flash allowing preemption |
|
1134 |
-----------------------------------------------------------------------------*/ |
|
1135 |
extern "C" TBool __fastcall rwspin_rlock_flash_preempt(TRWSpinLock* a) |
|
1136 |
{ |
|
1137 |
a->UnlockOnlyR(); |
|
1138 |
NKern::PreemptionPoint(); |
|
1139 |
a->LockOnlyR(); |
|
1140 |
return TRUE; |
|
1141 |
} |
|
1142 |
||
1143 |
__NAKED__ EXPORT_C TBool TRWSpinLock::FlashPreemptR() |
|
1144 |
{ |
|
1145 |
THISCALL_PROLOG0() |
|
1146 |
asm("mov eax, [ecx] "); /* al=in.w, ah=in.r, byte2=out.w, byte3=out.r */ |
|
1147 |
asm("mov edx, eax "); |
|
1148 |
asm("shr edx, 16 "); /* dl=out.w */ |
|
1149 |
asm("xor eax, edx "); /* al = in.w ^ out.w = 0 if no writers waiting */ |
|
1150 |
asm("and eax, 0xff "); |
|
1151 |
asm("jne %a0" : : "i" (&rwspin_rlock_flash_preempt)); |
|
1152 |
THISCALL_EPILOG0() |
|
1153 |
} |
|
1154 |
||
1155 |
||
1156 |
/*----------------------------------------------------------------------------- |
|
1157 |
- Write lock flash allowing preemption |
|
1158 |
-----------------------------------------------------------------------------*/ |
|
1159 |
extern "C" TBool __fastcall rwspin_wlock_flash_preempt(TRWSpinLock* a) |
|
1160 |
{ |
|
1161 |
a->UnlockOnlyW(); |
|
1162 |
NKern::PreemptionPoint(); |
|
1163 |
a->LockOnlyW(); |
|
1164 |
return TRUE; |
|
1165 |
} |
|
1166 |
||
1167 |
__NAKED__ EXPORT_C TBool TRWSpinLock::FlashPreemptW() |
|
1168 |
{ |
|
1169 |
THISCALL_PROLOG0() |
|
1170 |
asm("mov eax, [ecx] "); /* al=in.w, ah=in.r, byte2=out.w, byte3=out.r */ |
|
1171 |
asm("mov edx, eax "); |
|
1172 |
asm("shr edx, 16 "); /* dl=out.w, dh=out.r */ |
|
1173 |
asm("inc dl "); /* dx==ax now means no-one else is waiting for lock */ |
|
1174 |
asm("xor eax, edx "); |
|
1175 |
asm("and eax, 0xffff "); |
|
1176 |
asm("jne %a0" : : "i" (&rwspin_wlock_flash_preempt)); |
|
1177 |
THISCALL_EPILOG0() |
|
1178 |
} |
|
1179 |