|
1 // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of the License "Eclipse Public License v1.0" |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: |
|
14 // e32\nkernsmp\x86\ncutilf.cia |
|
15 // |
|
16 // |
|
17 |
|
18 #include <x86.h> |
|
19 #include <apic.h> |
|
20 |
|
21 #if defined(__VC32__) |
|
22 #define __ASM_CALL(func) _asm call func |
|
23 #elif defined(__GCC32__) |
|
24 #define __ASM_CALL(func) asm("call _" #func); |
|
25 #else |
|
26 #error Unknown x86 compiler |
|
27 #endif |
|
28 |
|
29 #if defined(__INCLUDE_SPIN_LOCK_CHECKS__) |
|
30 #define SPIN_LOCK_ENTRY_CHECK() __ASM_CALL(spin_lock_entry_check) |
|
31 #define SPIN_LOCK_MARK_ACQ() __ASM_CALL(spin_lock_mark_acq) |
|
32 #define SPIN_UNLOCK_ENTRY_CHECK() __ASM_CALL(spin_unlock_entry_check) |
|
33 |
|
34 #define RWSPIN_RLOCK_ENTRY_CHECK() __ASM_CALL(rwspin_rlock_entry_check) |
|
35 #define RWSPIN_RLOCK_MARK_ACQ() __ASM_CALL(rwspin_rlock_mark_acq) |
|
36 #define RWSPIN_RUNLOCK_ENTRY_CHECK() __ASM_CALL(rwspin_runlock_entry_check) |
|
37 |
|
38 #define RWSPIN_WLOCK_ENTRY_CHECK() __ASM_CALL(rwspin_wlock_entry_check) |
|
39 #define RWSPIN_WLOCK_MARK_ACQ() __ASM_CALL(rwspin_wlock_mark_acq) |
|
40 #define RWSPIN_WUNLOCK_ENTRY_CHECK() __ASM_CALL(rwspin_wunlock_entry_check) |
|
41 |
|
42 #else |
|
43 #define SPIN_LOCK_ENTRY_CHECK() |
|
44 #define SPIN_LOCK_MARK_ACQ() |
|
45 #define SPIN_UNLOCK_ENTRY_CHECK() |
|
46 |
|
47 #define RWSPIN_RLOCK_ENTRY_CHECK() |
|
48 #define RWSPIN_RLOCK_MARK_ACQ() |
|
49 #define RWSPIN_RUNLOCK_ENTRY_CHECK() |
|
50 |
|
51 #define RWSPIN_WLOCK_ENTRY_CHECK() |
|
52 #define RWSPIN_WLOCK_MARK_ACQ() |
|
53 #define RWSPIN_WUNLOCK_ENTRY_CHECK() |
|
54 |
|
55 #endif |
|
56 |
|
57 |
|
58 /****************************************************************************** |
|
59 * Timestamp |
|
60 ******************************************************************************/ |
|
61 |
|
62 /** Returns a timestamp value which is consistent across CPUs. |
|
63 |
|
64 */ |
|
65 EXPORT_C __NAKED__ TUint64 NKern::Timestamp() |
|
66 { |
|
67 asm("pushfd "); |
|
68 asm("cli "); // stop thread migration between reading APIC ID and thread pointer |
|
69 asm("mov ecx, ds:[%0]" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID)); |
|
70 asm("shr ecx, 24 "); |
|
71 asm("mov ecx, [ecx*4+%0]" : : "i" (&SubSchedulerLookupTable)); |
|
72 asm("cmp ecx, 0 "); |
|
73 asm("jz short use_tsc_only "); |
|
74 asm("test cl, 3 "); |
|
75 asm("jnz short use_tsc_only "); |
|
76 asm("rdtsc "); |
|
77 asm("add eax, [ecx+80+%0]" : : "i" _FOFF(TSubScheduler, iExtras)); |
|
78 asm("adc edx, [ecx+84+%0]" : : "i" _FOFF(TSubScheduler, iExtras)); |
|
79 asm("popfd "); |
|
80 asm("ret "); |
|
81 |
|
82 asm("use_tsc_only: "); |
|
83 asm("rdtsc "); |
|
84 asm("popfd "); |
|
85 asm("ret "); |
|
86 } |
|
87 |
|
88 /** Get the current value of the CPU timestamp counter |
|
89 |
|
90 */ |
|
91 EXPORT_C __NAKED__ TUint64 X86::Timestamp() |
|
92 { |
|
93 asm("rdtsc "); |
|
94 asm("ret "); |
|
95 } |
|
96 |
|
97 |
|
98 |
|
99 /****************************************************************************** |
|
100 * Spin locks |
|
101 * |
|
102 * [this+0] in count (byte) |
|
103 * [this+1] out count (byte) |
|
104 * [this+6] order (byte) |
|
105 * [this+7] holding CPU (byte) |
|
106 ******************************************************************************/ |
|
107 |
|
108 #if defined(__INCLUDE_SPIN_LOCK_CHECKS__) |
|
109 extern "C" __NAKED__ void spin_lock_entry_check() |
|
110 { |
|
111 /* ecx points to lock */ |
|
112 asm("push eax "); |
|
113 asm("push ecx "); |
|
114 asm("push edx "); |
|
115 asm("pushfd "); |
|
116 asm("cli "); |
|
117 asm("mov edx, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID)); |
|
118 asm("shr edx, 24"); |
|
119 asm("mov edx, [edx*4+%0]" : : "i" (&SubSchedulerLookupTable)); |
|
120 asm("cmp edx, 0 "); /* Skip checks if subschedulers not yet initialised */ |
|
121 asm("je short slec_ok "); |
|
122 asm("test edx, 3 "); /* Skip checks if subscheduler for this CPU not yet initialised */ |
|
123 asm("jnz short slec_ok "); |
|
124 asm("movzx ecx, word ptr [ecx+6] "); /* CL = order, CH = holding CPU */ |
|
125 asm("cmp cl, 0x20 "); |
|
126 asm("jae short slec_preemption "); /* This lock requires preemption to be disabled */ |
|
127 |
|
128 /* check interrupts disabled */ |
|
129 asm("test dword ptr [esp], 0x200 "); /* Interrupts enabled? */ |
|
130 asm("jz short slec_1 "); /* No - OK */ |
|
131 asm("int 0xff "); /* Yes - die */ |
|
132 |
|
133 asm("slec_preemption: "); |
|
134 asm("cmp cl, 0xff "); |
|
135 asm("je short slec_1 "); /* EOrderNone - don't check interrupts or preemption */ |
|
136 asm("cmp dword ptr [edx+52+%0], 0" : : "i"_FOFF(TSubScheduler, iExtras)); |
|
137 asm("jge short slec_preemption_die "); /* If called from ISR, die */ |
|
138 asm("cmp dword ptr [edx+%0], 0" : : "i" _FOFF(TSubScheduler, iKernLockCount)); |
|
139 asm("jnz short slec_1 "); /* Preemption disabled - OK */ |
|
140 asm("slec_preemption_die: "); |
|
141 asm("int 0xff "); /* Preemption enabled - die */ |
|
142 |
|
143 asm("slec_1: "); |
|
144 asm("lea eax, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuNum)); |
|
145 asm("cmp ch, [eax] "); |
|
146 asm("jnz short slec_2 "); /* Not already held by this CPU - OK */ |
|
147 asm("int 0xff "); /* Already held by this CPU - die */ |
|
148 |
|
149 asm("slec_2: "); |
|
150 asm("lea edx, [edx+%0]" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck)); |
|
151 asm("bsf eax, [edx] "); /* find LSB of low dword */ |
|
152 asm("jnz short slec_3 "); /* skip if low dword nonzero */ |
|
153 asm("bsf eax, [edx+4] "); /* else find LSB of high dword */ |
|
154 asm("lea eax, [eax+32] "); /* add 32 to eax without changing flags */ |
|
155 asm("jnz short slec_3 "); /* skip if high dword nonzero */ |
|
156 asm("mov eax, 0x7f "); /* else set EAX = 0x7F */ |
|
157 |
|
158 asm("slec_3: "); |
|
159 asm("cmp cl, al "); /* check order of this lock against lowest currently held order */ |
|
160 asm("jl short slec_ok "); /* if this lock has lower order, OK - signed comparison so EOrderNone always works */ |
|
161 asm("int 0xff "); /* ordering violation - die */ |
|
162 |
|
163 asm("slec_ok: "); |
|
164 asm("popfd "); |
|
165 asm("pop edx "); |
|
166 asm("pop ecx "); |
|
167 asm("pop eax "); |
|
168 asm("ret "); |
|
169 } |
|
170 |
|
171 extern "C" __NAKED__ void spin_lock_mark_acq() |
|
172 { |
|
173 /* ecx points to lock */ |
|
174 asm("push eax "); |
|
175 asm("push ecx "); |
|
176 asm("push edx "); |
|
177 asm("pushfd "); |
|
178 asm("cli "); |
|
179 asm("mov edx, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID)); |
|
180 asm("shr edx, 24"); |
|
181 asm("mov edx, [edx*4+%0]" : : "i"(&SubSchedulerLookupTable)); |
|
182 asm("cmp edx, 0 "); /* Skip checks if subschedulers not yet initialised */ |
|
183 asm("je short slma_ok "); |
|
184 asm("test edx, 3 "); /* Skip checks if subscheduler for this CPU not yet initialised */ |
|
185 asm("jnz short slma_ok "); |
|
186 asm("mov eax, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuNum)); |
|
187 asm("mov [ecx+7], al "); /* set byte 7 to holding CPU number */ |
|
188 asm("movzx ecx, byte ptr [ecx+6] "); /* CL = order */ |
|
189 asm("cmp ecx, 0x40 "); |
|
190 asm("jae short slma_ok "); /* if EOrderNone, done */ |
|
191 asm("bts [edx+%0], ecx" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck)); |
|
192 |
|
193 asm("slma_ok: "); |
|
194 asm("popfd "); |
|
195 asm("pop edx "); |
|
196 asm("pop ecx "); |
|
197 asm("pop eax "); |
|
198 asm("ret "); |
|
199 } |
|
200 |
|
201 extern "C" __NAKED__ void spin_unlock_entry_check() |
|
202 { |
|
203 /* ecx points to lock */ |
|
204 asm("push eax "); |
|
205 asm("push ecx "); |
|
206 asm("push edx "); |
|
207 asm("pushfd "); |
|
208 asm("cli "); |
|
209 asm("mov edx, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID)); |
|
210 asm("shr edx, 24"); |
|
211 asm("mov edx, [edx*4+%0]" : : "i"(&SubSchedulerLookupTable)); |
|
212 asm("cmp edx, 0 "); /* Skip checks if subschedulers not yet initialised */ |
|
213 asm("je short suec_ok "); |
|
214 asm("test edx, 3 "); /* Skip checks if subscheduler for this CPU not yet initialised */ |
|
215 asm("jnz short suec_ok "); |
|
216 asm("mov eax, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuNum)); /* eax = current CPU number */ |
|
217 asm("shl eax, 8 "); /* AL = 0, AH = current CPU number */ |
|
218 asm("xor ax, [ecx+6] "); /* AL = order, AH = holding CPU ^ current CPU number */ |
|
219 asm("cmp al, 0x20 "); |
|
220 asm("jae short suec_preemption "); /* This lock requires preemption to be disabled */ |
|
221 |
|
222 /* check interrupts disabled */ |
|
223 asm("test dword ptr [esp], 0x200 "); /* Interrupts enabled? */ |
|
224 asm("jz short suec_1 "); /* No - OK */ |
|
225 asm("int 0xff "); /* Yes - die */ |
|
226 |
|
227 asm("suec_preemption: "); |
|
228 asm("cmp al, 0xff "); |
|
229 asm("je short suec_1 "); /* EOrderNone - don't check interrupts or preemption */ |
|
230 asm("cmp dword ptr [edx+%0], 0" : : "i" _FOFF(TSubScheduler, iKernLockCount)); |
|
231 asm("jnz short suec_1 "); /* Preemption disabled - OK */ |
|
232 asm("int 0xff "); /* Preemption enabled - die */ |
|
233 |
|
234 asm("suec_1: "); |
|
235 asm("cmp ah, 0 "); /* Check if holding CPU ^ current CPU number == 0 */ |
|
236 asm("jz short suec_2 "); /* Already held by this CPU - OK */ |
|
237 asm("int 0xff "); /* We don't hold lock - die */ |
|
238 |
|
239 asm("suec_2: "); |
|
240 asm("mov byte ptr [ecx+7], 0xff "); /* reset holding CPU */ |
|
241 asm("cmp eax, 0x40 "); /* EAX = lock order */ |
|
242 asm("jae short suec_ok "); /* if EOrderNone, done */ |
|
243 asm("btr [edx+%0], eax" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck)); |
|
244 asm("jc short suec_ok "); /* bit should have been set originally */ |
|
245 asm("int 0xff "); /* if not, die - something must have got corrupted */ |
|
246 |
|
247 asm("suec_ok: "); |
|
248 asm("popfd "); |
|
249 asm("pop edx "); |
|
250 asm("pop ecx "); |
|
251 asm("pop eax "); |
|
252 asm("ret "); |
|
253 } |
|
254 #endif |
|
255 |
|
256 |
|
257 /****************************************************************************** |
|
258 * Plain old spin lock |
|
259 * |
|
260 * Fundamental algorithm: |
|
261 * lock() { old_in = in++; while(out!=old_in) __chill(); } |
|
262 * unlock() { ++out; } |
|
263 * |
|
264 * [this+0] in count (byte) |
|
265 * [this+1] out count (byte) |
|
266 * [this+6] order value |
|
267 * [this+7] holding CPU number, 0xFF if none |
|
268 * |
|
269 ******************************************************************************/ |
|
270 __NAKED__ EXPORT_C void TSpinLock::LockIrq() |
|
271 { |
|
272 THISCALL_PROLOG0() |
|
273 asm("cli "); |
|
274 SPIN_LOCK_ENTRY_CHECK() |
|
275 asm("mov al, 1 "); |
|
276 asm("lock xadd [ecx], al "); /* al = in++ */ |
|
277 asm("sl_lockirq_loop: "); |
|
278 asm("cmp al, [ecx+1] "); /* compare al to out */ |
|
279 asm("jnz short sl_lockirq_loop2 "); |
|
280 SPIN_LOCK_MARK_ACQ() |
|
281 asm("lock add dword ptr [esp], 0 "); /* make sure subsequent accesses don't happen until lock acquired */ |
|
282 THISCALL_EPILOG0() |
|
283 |
|
284 asm("sl_lockirq_loop2: "); |
|
285 X86_PAUSE |
|
286 asm("jmp short sl_lockirq_loop "); |
|
287 } |
|
288 |
|
289 __NAKED__ EXPORT_C void TSpinLock::UnlockIrq() |
|
290 { |
|
291 THISCALL_PROLOG0() |
|
292 SPIN_UNLOCK_ENTRY_CHECK() |
|
293 asm("lock inc byte ptr [ecx+1] "); /* ++out */ |
|
294 asm("sti "); |
|
295 THISCALL_EPILOG0() |
|
296 } |
|
297 |
|
298 extern "C" TBool __fastcall spin_lock_flash_irq(TSpinLock* a) |
|
299 { |
|
300 a->UnlockIrq(); |
|
301 a->LockIrq(); |
|
302 return TRUE; |
|
303 } |
|
304 |
|
305 __NAKED__ EXPORT_C TBool TSpinLock::FlashIrq() |
|
306 { |
|
307 THISCALL_PROLOG0() |
|
308 asm("mov ax, [ecx] "); |
|
309 asm("inc ah "); |
|
310 asm("xor al, ah "); |
|
311 asm("and eax, 0xff "); |
|
312 asm("jne %a0" : : "i" (&spin_lock_flash_irq)); |
|
313 THISCALL_EPILOG0() |
|
314 } |
|
315 |
|
316 __NAKED__ EXPORT_C void TSpinLock::LockOnly() |
|
317 { |
|
318 THISCALL_PROLOG0() |
|
319 SPIN_LOCK_ENTRY_CHECK() |
|
320 asm("mov al, 1 "); |
|
321 asm("lock xadd [ecx], al "); /* al = in++ */ |
|
322 asm("sl_lockonly_loop: "); |
|
323 asm("cmp al, [ecx+1] "); /* compare al to out */ |
|
324 asm("jnz short sl_lockonly_loop2 "); |
|
325 SPIN_LOCK_MARK_ACQ() |
|
326 asm("lock add dword ptr [esp], 0 "); /* make sure subsequent accesses don't happen until lock acquired */ |
|
327 THISCALL_EPILOG0() |
|
328 |
|
329 asm("sl_lockonly_loop2: "); |
|
330 X86_PAUSE |
|
331 asm("jmp short sl_lockonly_loop "); |
|
332 } |
|
333 |
|
334 __NAKED__ EXPORT_C void TSpinLock::UnlockOnly() |
|
335 { |
|
336 THISCALL_PROLOG0() |
|
337 SPIN_UNLOCK_ENTRY_CHECK() |
|
338 asm("lock inc byte ptr [ecx+1] "); /* ++out */ |
|
339 THISCALL_EPILOG0() |
|
340 } |
|
341 |
|
342 extern "C" TBool __fastcall spin_lock_flash_only(TSpinLock* a) |
|
343 { |
|
344 a->UnlockOnly(); |
|
345 a->LockOnly(); |
|
346 return TRUE; |
|
347 } |
|
348 |
|
349 __NAKED__ EXPORT_C TBool TSpinLock::FlashOnly() |
|
350 { |
|
351 THISCALL_PROLOG0() |
|
352 asm("mov ax, [ecx] "); |
|
353 asm("inc ah "); |
|
354 asm("xor al, ah "); |
|
355 asm("and eax, 0xff "); |
|
356 asm("jne %a0" : : "i" (&spin_lock_flash_only)); |
|
357 THISCALL_EPILOG0() |
|
358 } |
|
359 |
|
360 __NAKED__ EXPORT_C TInt TSpinLock::LockIrqSave() |
|
361 { |
|
362 THISCALL_PROLOG0() |
|
363 asm("pushfd "); |
|
364 asm("cli "); |
|
365 SPIN_LOCK_ENTRY_CHECK() |
|
366 asm("mov al, 1 "); |
|
367 asm("lock xadd [ecx], al "); /* al = in++ */ |
|
368 asm("sl_lockirqs_loop: "); |
|
369 asm("cmp al, [ecx+1] "); /* compare al to out */ |
|
370 asm("jnz short sl_lockirqs_loop2 "); |
|
371 SPIN_LOCK_MARK_ACQ() |
|
372 asm("lock add dword ptr [esp], 0 "); /* make sure subsequent accesses don't happen until lock acquired */ |
|
373 asm("pop eax "); /* retrieve saved EFLAGS */ |
|
374 asm("and eax, 0x200 "); /* return just interrupt mask bit */ |
|
375 THISCALL_EPILOG0() |
|
376 |
|
377 asm("sl_lockirqs_loop2: "); |
|
378 X86_PAUSE |
|
379 asm("jmp short sl_lockirqs_loop "); |
|
380 } |
|
381 |
|
382 __NAKED__ EXPORT_C void TSpinLock::UnlockIrqRestore(TInt) |
|
383 { |
|
384 THISCALL_PROLOG1() |
|
385 SPIN_UNLOCK_ENTRY_CHECK() |
|
386 asm("lock inc byte ptr [ecx+1] "); /* ++out */ |
|
387 asm("test dword ptr [esp+4], 0x200 "); |
|
388 asm("jz short sl_unlockirqr_1 "); |
|
389 asm("sti "); |
|
390 asm("sl_unlockirqr_1: "); |
|
391 THISCALL_EPILOG1() |
|
392 } |
|
393 |
|
394 __NAKED__ EXPORT_C TBool TSpinLock::FlashIrqRestore(TInt) |
|
395 { |
|
396 /* don't mess with stacked args, yet */ |
|
397 THISCALL_PROLOG0() |
|
398 asm("mov ax, [ecx] "); |
|
399 asm("inc ah "); |
|
400 asm("xor al, ah "); |
|
401 asm("and eax, 0xff "); |
|
402 asm("jne short sl_flashirqr_1 "); |
|
403 |
|
404 /* now we can remove stacked arg since we don't need it */ |
|
405 THISCALL_EPILOG1() |
|
406 |
|
407 asm("sl_flashirqr_1: "); |
|
408 THISCALL_PROLOG1() |
|
409 asm("test dword ptr [esp+4], 0x200 "); |
|
410 asm("jnz short sl_flashirqr_2 "); |
|
411 asm("call %a0" : : "i" (&spin_lock_flash_only)); |
|
412 asm("jmp short sl_flashirqr_3 "); |
|
413 asm("sl_flashirqr_2: "); |
|
414 asm("call %a0" : : "i" (&spin_lock_flash_irq)); |
|
415 asm("sl_flashirqr_3: "); |
|
416 THISCALL_EPILOG1() |
|
417 } |
|
418 |
|
419 extern "C" TBool __fastcall spin_lock_flash_preempt(TSpinLock* a) |
|
420 { |
|
421 a->UnlockOnly(); |
|
422 NKern::PreemptionPoint(); |
|
423 a->LockOnly(); |
|
424 return TRUE; |
|
425 } |
|
426 |
|
427 __NAKED__ EXPORT_C TBool TSpinLock::FlashPreempt() |
|
428 { |
|
429 THISCALL_PROLOG0() |
|
430 asm("mov ax, [ecx] "); |
|
431 asm("inc ah "); |
|
432 asm("xor al, ah "); |
|
433 asm("and eax, 0xff "); |
|
434 asm("jne %a0" : : "i" (&spin_lock_flash_preempt)); |
|
435 THISCALL_EPILOG0() |
|
436 } |
|
437 |
|
438 |
|
439 /****************************************************************************** |
|
440 * Read/Write Spin lock |
|
441 * |
|
442 * Structure ( (in.r,in.w) , (out.r,out.w) ) |
|
443 * Fundamental algorithm: |
|
444 * lockr() { old_in = (in.r++,in.w); while(out.w!=old_in.w) __chill(); } |
|
445 * unlockr() { ++out.r; } |
|
446 * lockw() { old_in = (in.r,in.w++); while(out!=old_in) __chill(); } |
|
447 * unlockw() { ++out.w; } |
|
448 * |
|
449 * [this+0] in.w |
|
450 * [this+1] in.r |
|
451 * [this+2] out.w |
|
452 * [this+3] out.r |
|
453 * [this+4] Bit mask of CPUs which hold read locks |
|
454 * [this+6] order value |
|
455 * [this+7] CPU number which holds write lock, 0xFF if none |
|
456 * |
|
457 ******************************************************************************/ |
|
458 |
|
459 #if defined(__INCLUDE_SPIN_LOCK_CHECKS__) |
|
460 extern "C" __NAKED__ void rwspin_rlock_entry_check() |
|
461 { |
|
462 /* ecx points to lock */ |
|
463 asm("push eax "); |
|
464 asm("push ecx "); |
|
465 asm("push edx "); |
|
466 asm("pushfd "); |
|
467 asm("cli "); |
|
468 asm("mov edx, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID)); |
|
469 asm("shr edx, 24"); |
|
470 asm("mov edx, [edx*4+%0]" : : "i" (&SubSchedulerLookupTable)); |
|
471 asm("cmp edx, 0 "); /* Skip checks if subschedulers not yet initialised */ |
|
472 asm("je short rwrlec_ok "); |
|
473 asm("test edx, 3 "); /* Skip checks if subscheduler for this CPU not yet initialised */ |
|
474 asm("jnz short rwrlec_ok "); |
|
475 asm("movzx ecx, word ptr [ecx+6] "); /* CL = order, CH = holding CPU for write lock */ |
|
476 asm("cmp cl, 0x20 "); |
|
477 asm("jae short rwrlec_preemption "); /* This lock requires preemption to be disabled */ |
|
478 |
|
479 /* check interrupts disabled */ |
|
480 asm("test dword ptr [esp], 0x200 "); /* Interrupts enabled? */ |
|
481 asm("jz short rwrlec_1 "); /* No - OK */ |
|
482 asm("int 0xff "); /* Yes - die */ |
|
483 |
|
484 asm("rwrlec_preemption: "); |
|
485 asm("cmp cl, 0xff "); |
|
486 asm("je short rwrlec_1 "); /* EOrderNone - don't check interrupts or preemption */ |
|
487 asm("cmp dword ptr [edx+52+%0], 0" : : "i"_FOFF(TSubScheduler, iExtras)); |
|
488 asm("jge short rwrlec_preemption_die "); /* If called from ISR, die */ |
|
489 asm("cmp dword ptr [edx+%0], 0" : : "i" _FOFF(TSubScheduler, iKernLockCount)); |
|
490 asm("jnz short rwrlec_1 "); /* Preemption disabled - OK */ |
|
491 asm("rwrlec_preemption_die: "); |
|
492 asm("int 0xff "); /* Preemption enabled - die */ |
|
493 |
|
494 asm("rwrlec_1: "); |
|
495 asm("lea eax, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuNum)); |
|
496 asm("cmp ch, [eax] "); |
|
497 asm("jnz short rwrlec_2 "); /* Not already held by this CPU for write - OK */ |
|
498 asm("int 0xff "); /* Already held by this CPU for write - die */ |
|
499 |
|
500 asm("rwrlec_2: "); |
|
501 asm("mov eax, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuMask)); |
|
502 asm("test al, [ecx+4] "); /* Test if already held by this CPU for read */ |
|
503 asm("jz short rwrlec_3 "); |
|
504 asm("int 0xff "); /* if so, die */ |
|
505 |
|
506 asm("rwrlec_3: "); |
|
507 asm("lea edx, [edx+%0]" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck)); |
|
508 asm("bsf eax, [edx] "); /* find LSB of low dword */ |
|
509 asm("jnz short rwrlec_3 "); /* skip if low dword nonzero */ |
|
510 asm("bsf eax, [edx+4] "); /* else find LSB of high dword */ |
|
511 asm("lea eax, [eax+32] "); /* add 32 to eax without changing flags */ |
|
512 asm("jnz short rwrlec_4 "); /* skip if high dword nonzero */ |
|
513 asm("mov eax, 0x7f "); /* else set EAX = 0x7F */ |
|
514 |
|
515 asm("rwrlec_4: "); |
|
516 asm("cmp cl, al "); /* check order of this lock against lowest currently held order */ |
|
517 asm("jl short rwrlec_ok "); /* if this lock has lower order, OK - signed comparison so EOrderNone always works */ |
|
518 asm("int 0xff "); /* ordering violation - die */ |
|
519 |
|
520 asm("rwrlec_ok: "); |
|
521 asm("popfd "); |
|
522 asm("pop edx "); |
|
523 asm("pop ecx "); |
|
524 asm("pop eax "); |
|
525 asm("ret "); |
|
526 } |
|
527 |
|
528 extern "C" __NAKED__ void rwspin_rlock_mark_acq() |
|
529 { |
|
530 /* ecx points to lock */ |
|
531 asm("push eax "); |
|
532 asm("push ecx "); |
|
533 asm("push edx "); |
|
534 asm("pushfd "); |
|
535 asm("cli "); |
|
536 asm("mov edx, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID)); |
|
537 asm("shr edx, 24"); |
|
538 asm("mov edx, [edx*4+%0]" : : "i"(&SubSchedulerLookupTable)); |
|
539 asm("cmp edx, 0 "); /* Skip checks if subschedulers not yet initialised */ |
|
540 asm("je short rwrlma_ok "); |
|
541 asm("test edx, 3 "); /* Skip checks if subscheduler for this CPU not yet initialised */ |
|
542 asm("jnz short rwrlma_ok "); |
|
543 asm("mov eax, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuMask)); |
|
544 asm("lock or [ecx+4], al "); /* set bit in byte 4 corresponding to this CPU */ |
|
545 asm("movzx ecx, byte ptr [ecx+6] "); /* CL = order */ |
|
546 asm("cmp ecx, 0x40 "); |
|
547 asm("jae short rwrlma_ok "); /* if EOrderNone, done */ |
|
548 asm("bts [edx+%0], ecx" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck)); |
|
549 |
|
550 asm("rwrlma_ok: "); |
|
551 asm("popfd "); |
|
552 asm("pop edx "); |
|
553 asm("pop ecx "); |
|
554 asm("pop eax "); |
|
555 asm("ret "); |
|
556 } |
|
557 |
|
558 extern "C" __NAKED__ void rwspin_runlock_entry_check() |
|
559 { |
|
560 /* ecx points to lock */ |
|
561 asm("push eax "); |
|
562 asm("push ebx "); |
|
563 asm("push ecx "); |
|
564 asm("push edx "); |
|
565 asm("pushfd "); |
|
566 asm("cli "); |
|
567 asm("mov edx, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID)); |
|
568 asm("shr edx, 24"); |
|
569 asm("mov edx, [edx*4+%0]" : : "i"(&SubSchedulerLookupTable)); |
|
570 asm("cmp edx, 0 "); /* Skip checks if subschedulers not yet initialised */ |
|
571 asm("je short rwruec_ok "); |
|
572 asm("test edx, 3 "); /* Skip checks if subscheduler for this CPU not yet initialised */ |
|
573 asm("jnz short rwruec_ok "); |
|
574 asm("mov eax, [ecx+4] "); /* AL = R-mask, EAX byte 2 = order */ |
|
575 asm("and eax, 0x00ffffff "); /* mask out W CPU */ |
|
576 asm("cmp eax, 0x00200000 "); |
|
577 asm("jae short rwruec_preemption "); /* This lock requires preemption to be disabled */ |
|
578 |
|
579 /* check interrupts disabled */ |
|
580 asm("test dword ptr [esp], 0x200 "); /* Interrupts enabled? */ |
|
581 asm("jz short rwruec_1 "); /* No - OK */ |
|
582 asm("int 0xff "); /* Yes - die */ |
|
583 |
|
584 asm("rwruec_preemption: "); |
|
585 asm("cmp eax, 0x00ff0000 "); |
|
586 asm("jae short rwruec_1 "); /* EOrderNone - don't check interrupts or preemption */ |
|
587 asm("cmp dword ptr [edx+%0], 0" : : "i" _FOFF(TSubScheduler, iKernLockCount)); |
|
588 asm("jnz short rwruec_1 "); /* Preemption disabled - OK */ |
|
589 asm("int 0xff "); /* Preemption enabled - die */ |
|
590 |
|
591 asm("rwruec_1: "); |
|
592 asm("mov ebx, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuMask)); |
|
593 asm("test al, bl "); /* Check if current CPU holds read lock */ |
|
594 asm("jnz short rwruec_2 "); /* Already held by this CPU - OK */ |
|
595 asm("int 0xff "); /* We don't hold lock - die */ |
|
596 |
|
597 asm("rwruec_2: "); |
|
598 asm("not bl "); |
|
599 asm("lock and [ecx+4], bl "); /* clear bit in R-holding CPU mask */ |
|
600 asm("shr eax, 16 "); /* EAX = lock order */ |
|
601 asm("cmp eax, 0x40 "); |
|
602 asm("jae short rwruec_ok "); /* if EOrderNone, done */ |
|
603 asm("btr [edx+%0], eax" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck)); |
|
604 asm("jc short rwruec_ok "); /* bit should have been set originally */ |
|
605 asm("int 0xff "); /* if not, die - something must have got corrupted */ |
|
606 |
|
607 asm("rwruec_ok: "); |
|
608 asm("popfd "); |
|
609 asm("pop edx "); |
|
610 asm("pop ecx "); |
|
611 asm("pop ebx "); |
|
612 asm("pop eax "); |
|
613 asm("ret "); |
|
614 } |
|
615 |
|
616 |
|
617 extern "C" __NAKED__ void rwspin_wlock_entry_check() |
|
618 { |
|
619 /* ecx points to lock */ |
|
620 asm("push eax "); |
|
621 asm("push ecx "); |
|
622 asm("push edx "); |
|
623 asm("pushfd "); |
|
624 asm("cli "); |
|
625 asm("mov edx, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID)); |
|
626 asm("shr edx, 24"); |
|
627 asm("mov edx, [edx*4+%0]" : : "i" (&SubSchedulerLookupTable)); |
|
628 asm("cmp edx, 0 "); /* Skip checks if subschedulers not yet initialised */ |
|
629 asm("je short rwwlec_ok "); |
|
630 asm("test edx, 3 "); /* Skip checks if subscheduler for this CPU not yet initialised */ |
|
631 asm("jnz short rwwlec_ok "); |
|
632 asm("movzx ecx, word ptr [ecx+6] "); /* CL = order, CH = write lock holding CPU */ |
|
633 asm("cmp cl, 0x20 "); |
|
634 asm("jae short rwwlec_preemption "); /* This lock requires preemption to be disabled */ |
|
635 |
|
636 /* check interrupts disabled */ |
|
637 asm("test dword ptr [esp], 0x200 "); /* Interrupts enabled? */ |
|
638 asm("jz short rwwlec_1 "); /* No - OK */ |
|
639 asm("int 0xff "); /* Yes - die */ |
|
640 |
|
641 asm("rwwlec_preemption: "); |
|
642 asm("cmp cl, 0xff "); |
|
643 asm("je short rwwlec_1 "); /* EOrderNone - don't check interrupts or preemption */ |
|
644 asm("cmp dword ptr [edx+52+%0], 0" : : "i"_FOFF(TSubScheduler, iExtras)); |
|
645 asm("jge short rwwlec_preemption_die "); /* If called from ISR, die */ |
|
646 asm("cmp dword ptr [edx+%0], 0" : : "i" _FOFF(TSubScheduler, iKernLockCount)); |
|
647 asm("jnz short rwwlec_1 "); /* Preemption disabled - OK */ |
|
648 asm("rwwlec_preemption_die: "); |
|
649 asm("int 0xff "); /* Preemption enabled - die */ |
|
650 |
|
651 asm("rwwlec_1: "); |
|
652 asm("lea eax, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuNum)); |
|
653 asm("cmp ch, [eax] "); |
|
654 asm("jnz short rwwlec_2 "); /* Not already held by this CPU for write - OK */ |
|
655 asm("int 0xff "); /* Already held by this CPU for write - die */ |
|
656 |
|
657 asm("rwwlec_2: "); |
|
658 asm("mov eax, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuMask)); |
|
659 asm("test al, [ecx+4] "); /* Test if already held by this CPU for read */ |
|
660 asm("jz short rwwlec_3 "); |
|
661 asm("int 0xff "); /* if so, die */ |
|
662 |
|
663 asm("rwwlec_3: "); |
|
664 asm("lea edx, [edx+%0]" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck)); |
|
665 asm("bsf eax, [edx] "); /* find LSB of low dword */ |
|
666 asm("jnz short rwwlec_4 "); /* skip if low dword nonzero */ |
|
667 asm("bsf eax, [edx+4] "); /* else find LSB of high dword */ |
|
668 asm("lea eax, [eax+32] "); /* add 32 to eax without changing flags */ |
|
669 asm("jnz short rwwlec_4 "); /* skip if high dword nonzero */ |
|
670 asm("mov eax, 0x7f "); /* else set EAX = 0x7F */ |
|
671 |
|
672 asm("rwwlec_4: "); |
|
673 asm("cmp cl, al "); /* check order of this lock against lowest currently held order */ |
|
674 asm("jl short rwwlec_ok "); /* if this lock has lower order, OK - signed comparison so EOrderNone always works */ |
|
675 asm("int 0xff "); /* ordering violation - die */ |
|
676 |
|
677 asm("rwwlec_ok: "); |
|
678 asm("popfd "); |
|
679 asm("pop edx "); |
|
680 asm("pop ecx "); |
|
681 asm("pop eax "); |
|
682 asm("ret "); |
|
683 } |
|
684 |
|
685 extern "C" __NAKED__ void rwspin_wlock_mark_acq() |
|
686 { |
|
687 /* ecx points to lock */ |
|
688 asm("push eax "); |
|
689 asm("push ecx "); |
|
690 asm("push edx "); |
|
691 asm("pushfd "); |
|
692 asm("cli "); |
|
693 asm("mov edx, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID)); |
|
694 asm("shr edx, 24"); |
|
695 asm("mov edx, [edx*4+%0]" : : "i"(&SubSchedulerLookupTable)); |
|
696 asm("cmp edx, 0 "); /* Skip checks if subschedulers not yet initialised */ |
|
697 asm("je short rwwlma_ok "); |
|
698 asm("test edx, 3 "); /* Skip checks if subscheduler for this CPU not yet initialised */ |
|
699 asm("jnz short rwwlma_ok "); |
|
700 asm("mov eax, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuNum)); |
|
701 asm("mov [ecx+7], al "); /* set byte 7 to holding CPU number */ |
|
702 asm("movzx ecx, byte ptr [ecx+6] "); /* CL = order */ |
|
703 asm("cmp ecx, 0x40 "); |
|
704 asm("jae short rwwlma_ok "); /* if EOrderNone, done */ |
|
705 asm("bts [edx+%0], ecx" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck)); |
|
706 |
|
707 asm("rwwlma_ok: "); |
|
708 asm("popfd "); |
|
709 asm("pop edx "); |
|
710 asm("pop ecx "); |
|
711 asm("pop eax "); |
|
712 asm("ret "); |
|
713 } |
|
714 |
|
715 extern "C" __NAKED__ void rwspin_wunlock_entry_check() |
|
716 { |
|
717 /* ecx points to lock */ |
|
718 asm("push eax "); |
|
719 asm("push ecx "); |
|
720 asm("push edx "); |
|
721 asm("pushfd "); |
|
722 asm("cli "); |
|
723 asm("mov edx, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID)); |
|
724 asm("shr edx, 24"); |
|
725 asm("mov edx, [edx*4+%0]" : : "i"(&SubSchedulerLookupTable)); |
|
726 asm("cmp edx, 0 "); /* Skip checks if subschedulers not yet initialised */ |
|
727 asm("je short rwwuec_ok "); |
|
728 asm("test edx, 3 "); /* Skip checks if subscheduler for this CPU not yet initialised */ |
|
729 asm("jnz short rwwuec_ok "); |
|
730 asm("mov eax, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuNum)); /* eax = current CPU number */ |
|
731 asm("shl eax, 8 "); /* AL = 0, AH = current CPU number */ |
|
732 asm("xor ax, [ecx+6] "); /* AL = order, AH = holding CPU ^ current CPU number */ |
|
733 asm("cmp al, 0x20 "); |
|
734 asm("jae short rwwuec_preemption "); /* This lock requires preemption to be disabled */ |
|
735 |
|
736 /* check interrupts disabled */ |
|
737 asm("test dword ptr [esp], 0x200 "); /* Interrupts enabled? */ |
|
738 asm("jz short rwwuec_1 "); /* No - OK */ |
|
739 asm("int 0xff "); /* Yes - die */ |
|
740 |
|
741 asm("rwwuec_preemption: "); |
|
742 asm("cmp al, 0xff "); |
|
743 asm("je short rwwuec_1 "); /* EOrderNone - don't check interrupts or preemption */ |
|
744 asm("cmp dword ptr [edx+%0], 0" : : "i" _FOFF(TSubScheduler, iKernLockCount)); |
|
745 asm("jnz short rwwuec_1 "); /* Preemption disabled - OK */ |
|
746 asm("int 0xff "); /* Preemption enabled - die */ |
|
747 |
|
748 asm("rwwuec_1: "); |
|
749 asm("cmp ah, 0 "); /* Check if holding CPU ^ current CPU number == 0 */ |
|
750 asm("jz short rwwuec_2 "); /* Already held by this CPU - OK */ |
|
751 asm("int 0xff "); /* We don't hold lock - die */ |
|
752 |
|
753 asm("rwwuec_2: "); |
|
754 asm("mov byte ptr [ecx+7], 0xff "); /* reset holding CPU */ |
|
755 asm("cmp eax, 0x40 "); /* EAX = lock order */ |
|
756 asm("jae short rwwuec_ok "); /* if EOrderNone, done */ |
|
757 asm("btr [edx+%0], eax" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck)); |
|
758 asm("jc short rwwuec_ok "); /* bit should have been set originally */ |
|
759 asm("int 0xff "); /* if not, die - something must have got corrupted */ |
|
760 |
|
761 asm("rwwuec_ok: "); |
|
762 asm("popfd "); |
|
763 asm("pop edx "); |
|
764 asm("pop ecx "); |
|
765 asm("pop eax "); |
|
766 asm("ret "); |
|
767 } |
|
768 #endif |
|
769 |
|
770 |
|
771 /*----------------------------------------------------------------------------- |
|
772 - Read locks disabling IRQ |
|
773 -----------------------------------------------------------------------------*/ |
|
774 __NAKED__ EXPORT_C void TRWSpinLock::LockIrqR() |
|
775 { |
|
776 THISCALL_PROLOG0() |
|
777 asm("cli "); |
|
778 RWSPIN_RLOCK_ENTRY_CHECK() |
|
779 asm("mov ax, 0x100 "); |
|
780 asm("lock xadd [ecx], ax "); /* ah = in.r++, al = in.w */ |
|
781 asm("rwl_rlockirq_loop: "); |
|
782 asm("cmp al, [ecx+2] "); /* compare al to out.w */ |
|
783 asm("jnz short rwl_rlockirq_loop2 "); |
|
784 RWSPIN_RLOCK_MARK_ACQ() |
|
785 asm("lock add dword ptr [esp], 0 "); /* make sure subsequent accesses don't happen until lock acquired */ |
|
786 THISCALL_EPILOG0() |
|
787 |
|
788 asm("rwl_rlockirq_loop2: "); |
|
789 X86_PAUSE |
|
790 asm("jmp short rwl_rlockirq_loop "); |
|
791 } |
|
792 |
|
793 __NAKED__ EXPORT_C void TRWSpinLock::UnlockIrqR() |
|
794 { |
|
795 THISCALL_PROLOG0() |
|
796 RWSPIN_RUNLOCK_ENTRY_CHECK() |
|
797 asm("lock add word ptr [ecx+2], 0x100 "); /* ++out.r */ |
|
798 asm("sti "); |
|
799 THISCALL_EPILOG0() |
|
800 } |
|
801 |
|
802 extern "C" TBool __fastcall rwspin_rlock_flash_irq(TRWSpinLock* a) |
|
803 { |
|
804 a->UnlockIrqR(); |
|
805 a->LockIrqR(); |
|
806 return TRUE; |
|
807 } |
|
808 |
|
809 __NAKED__ EXPORT_C TBool TRWSpinLock::FlashIrqR() |
|
810 { |
|
811 THISCALL_PROLOG0() |
|
812 asm("mov eax, [ecx] "); /* al=in.w, ah=in.r, byte2=out.w, byte3=out.r */ |
|
813 asm("mov edx, eax "); |
|
814 asm("shr edx, 16 "); /* dl=out.w */ |
|
815 asm("xor eax, edx "); /* al = in.w ^ out.w = 0 if no writers waiting */ |
|
816 asm("and eax, 0xff "); |
|
817 asm("jne %a0" : : "i" (&rwspin_rlock_flash_irq)); |
|
818 THISCALL_EPILOG0() |
|
819 } |
|
820 |
|
821 |
|
822 /*----------------------------------------------------------------------------- |
|
823 - Write locks disabling IRQ |
|
824 -----------------------------------------------------------------------------*/ |
|
825 __NAKED__ EXPORT_C void TRWSpinLock::LockIrqW() |
|
826 { |
|
827 THISCALL_PROLOG0() |
|
828 asm("cli "); |
|
829 RWSPIN_WLOCK_ENTRY_CHECK() |
|
830 asm("mov ax, [ecx] "); /* ah = in.r, al = in.w */ |
|
831 asm("rwl_wlockirq_loop3: "); |
|
832 asm("mov edx, eax "); |
|
833 asm("inc dl "); /* dh = in.r, dl = in.w+1 */ |
|
834 asm("lock cmpxchg [ecx], dx "); /* attempt to update in.w */ |
|
835 asm("jne short rwl_wlockirq_loop3 "); /* loop if failed */ |
|
836 asm("rwl_wlockirq_loop: "); |
|
837 asm("cmp ax, [ecx+2] "); /* compare ax to (out.w,out.r) */ |
|
838 asm("jnz short rwl_wlockirq_loop2 "); |
|
839 RWSPIN_WLOCK_MARK_ACQ() |
|
840 asm("lock add dword ptr [esp], 0 "); /* make sure subsequent accesses don't happen until lock acquired */ |
|
841 THISCALL_EPILOG0() |
|
842 |
|
843 asm("rwl_wlockirq_loop2: "); |
|
844 X86_PAUSE |
|
845 asm("jmp short rwl_wlockirq_loop "); |
|
846 } |
|
847 |
|
848 __NAKED__ EXPORT_C void TRWSpinLock::UnlockIrqW() |
|
849 { |
|
850 THISCALL_PROLOG0() |
|
851 RWSPIN_WUNLOCK_ENTRY_CHECK() |
|
852 asm("mov ax, [ecx+2] "); /* ah = out.r, al = out.w */ |
|
853 asm("rwl_wunlockirq_loop: "); |
|
854 asm("mov edx, eax "); |
|
855 asm("inc dl "); /* dh = out.r, dl = out.w+1 */ |
|
856 asm("lock cmpxchg [ecx+2], dx "); /* attempt to update out.w */ |
|
857 asm("jne short rwl_wunlockirq_loop "); /* loop if failed */ |
|
858 asm("sti "); |
|
859 THISCALL_EPILOG0() |
|
860 } |
|
861 |
|
862 extern "C" TBool __fastcall rwspin_wlock_flash_irq(TRWSpinLock* a) |
|
863 { |
|
864 a->UnlockIrqW(); |
|
865 a->LockIrqW(); |
|
866 return TRUE; |
|
867 } |
|
868 |
|
869 __NAKED__ EXPORT_C TBool TRWSpinLock::FlashIrqW() |
|
870 { |
|
871 THISCALL_PROLOG0() |
|
872 asm("mov eax, [ecx] "); /* al=in.w, ah=in.r, byte2=out.w, byte3=out.r */ |
|
873 asm("mov edx, eax "); |
|
874 asm("shr edx, 16 "); /* dl=out.w, dh=out.r */ |
|
875 asm("inc dl "); /* dx==ax now means no-one else is waiting for lock */ |
|
876 asm("xor eax, edx "); |
|
877 asm("and eax, 0xffff "); |
|
878 asm("jne %a0" : : "i" (&rwspin_wlock_flash_irq)); |
|
879 THISCALL_EPILOG0() |
|
880 } |
|
881 |
|
882 |
|
883 |
|
884 /*----------------------------------------------------------------------------- |
|
885 - Read locks leaving IRQ alone |
|
886 -----------------------------------------------------------------------------*/ |
|
887 __NAKED__ EXPORT_C void TRWSpinLock::LockOnlyR() |
|
888 { |
|
889 THISCALL_PROLOG0() |
|
890 RWSPIN_RLOCK_ENTRY_CHECK() |
|
891 asm("mov ax, 0x100 "); |
|
892 asm("lock xadd [ecx], ax "); /* ah = in.r++, al = in.w */ |
|
893 asm("rwl_rlockonly_loop: "); |
|
894 asm("cmp al, [ecx+2] "); /* compare al to out.w */ |
|
895 asm("jnz short rwl_rlockonly_loop2 "); |
|
896 RWSPIN_RLOCK_MARK_ACQ() |
|
897 asm("lock add dword ptr [esp], 0 "); /* make sure subsequent accesses don't happen until lock acquired */ |
|
898 THISCALL_EPILOG0() |
|
899 |
|
900 asm("rwl_rlockonly_loop2: "); |
|
901 X86_PAUSE |
|
902 asm("jmp short rwl_rlockonly_loop "); |
|
903 } |
|
904 |
|
905 __NAKED__ EXPORT_C void TRWSpinLock::UnlockOnlyR() |
|
906 { |
|
907 THISCALL_PROLOG0() |
|
908 RWSPIN_RUNLOCK_ENTRY_CHECK() |
|
909 asm("lock add word ptr [ecx+2], 0x100 "); /* ++out.r */ |
|
910 THISCALL_EPILOG0() |
|
911 } |
|
912 |
|
913 extern "C" TBool __fastcall rwspin_rlock_flash_only(TRWSpinLock* a) |
|
914 { |
|
915 a->UnlockOnlyR(); |
|
916 a->LockOnlyR(); |
|
917 return TRUE; |
|
918 } |
|
919 |
|
920 __NAKED__ EXPORT_C TBool TRWSpinLock::FlashOnlyR() |
|
921 { |
|
922 THISCALL_PROLOG0() |
|
923 asm("mov eax, [ecx] "); /* al=in.w, ah=in.r, byte2=out.w, byte3=out.r */ |
|
924 asm("mov edx, eax "); |
|
925 asm("shr edx, 16 "); /* dl=out.w */ |
|
926 asm("xor eax, edx "); /* al = in.w ^ out.w = 0 if no writers waiting */ |
|
927 asm("and eax, 0xff "); |
|
928 asm("jne %a0" : : "i" (&rwspin_rlock_flash_only)); |
|
929 THISCALL_EPILOG0() |
|
930 } |
|
931 |
|
932 |
|
933 /*----------------------------------------------------------------------------- |
|
934 - Write locks leaving IRQ alone |
|
935 -----------------------------------------------------------------------------*/ |
|
936 __NAKED__ EXPORT_C void TRWSpinLock::LockOnlyW() |
|
937 { |
|
938 THISCALL_PROLOG0() |
|
939 RWSPIN_WLOCK_ENTRY_CHECK() |
|
940 asm("mov ax, [ecx] "); /* ah = in.r, al = in.w */ |
|
941 asm("rwl_wlockonly_loop3: "); |
|
942 asm("mov edx, eax "); |
|
943 asm("inc dl "); /* dh = in.r, dl = in.w+1 */ |
|
944 asm("lock cmpxchg [ecx], dx "); /* attempt to update in.w */ |
|
945 asm("jne short rwl_wlockonly_loop3 "); /* loop if failed */ |
|
946 asm("rwl_wlockonly_loop: "); |
|
947 asm("cmp ax, [ecx+2] "); /* compare ax to (out.w,out.r) */ |
|
948 asm("jnz short rwl_wlockonly_loop2 "); |
|
949 RWSPIN_WLOCK_MARK_ACQ() |
|
950 asm("lock add dword ptr [esp], 0 "); /* make sure subsequent accesses don't happen until lock acquired */ |
|
951 THISCALL_EPILOG0() |
|
952 |
|
953 asm("rwl_wlockonly_loop2: "); |
|
954 X86_PAUSE |
|
955 asm("jmp short rwl_wlockonly_loop "); |
|
956 } |
|
957 |
|
958 __NAKED__ EXPORT_C void TRWSpinLock::UnlockOnlyW() |
|
959 { |
|
960 THISCALL_PROLOG0() |
|
961 RWSPIN_WUNLOCK_ENTRY_CHECK() |
|
962 asm("mov ax, [ecx+2] "); /* ah = out.r, al = out.w */ |
|
963 asm("rwl_wunlockonly_loop: "); |
|
964 asm("mov edx, eax "); |
|
965 asm("inc dl "); /* dh = out.r, dl = out.w+1 */ |
|
966 asm("lock cmpxchg [ecx+2], dx "); /* attempt to update out.w */ |
|
967 asm("jne short rwl_wunlockonly_loop "); /* loop if failed */ |
|
968 THISCALL_EPILOG0() |
|
969 } |
|
970 |
|
971 extern "C" TBool __fastcall rwspin_wlock_flash_only(TRWSpinLock* a) |
|
972 { |
|
973 a->UnlockOnlyW(); |
|
974 a->LockOnlyW(); |
|
975 return TRUE; |
|
976 } |
|
977 |
|
978 __NAKED__ EXPORT_C TBool TRWSpinLock::FlashOnlyW() |
|
979 { |
|
980 THISCALL_PROLOG0() |
|
981 asm("mov eax, [ecx] "); /* al=in.w, ah=in.r, byte2=out.w, byte3=out.r */ |
|
982 asm("mov edx, eax "); |
|
983 asm("shr edx, 16 "); /* dl=out.w, dh=out.r */ |
|
984 asm("inc dl "); /* dx==ax now means no-one else is waiting for lock */ |
|
985 asm("xor eax, edx "); |
|
986 asm("and eax, 0xffff "); |
|
987 asm("jne %a0" : : "i" (&rwspin_wlock_flash_only)); |
|
988 THISCALL_EPILOG0() |
|
989 } |
|
990 |
|
991 |
|
992 |
|
993 /*----------------------------------------------------------------------------- |
|
994 - Read locks disabling IRQ with save/restore IRQ state |
|
995 -----------------------------------------------------------------------------*/ |
|
996 __NAKED__ EXPORT_C TInt TRWSpinLock::LockIrqSaveR() |
|
997 { |
|
998 THISCALL_PROLOG0() |
|
999 asm("pushfd "); |
|
1000 asm("cli "); |
|
1001 RWSPIN_RLOCK_ENTRY_CHECK() |
|
1002 asm("mov ax, 0x100 "); |
|
1003 asm("lock xadd [ecx], ax "); /* ah = in.r++, al = in.w */ |
|
1004 asm("rwl_rlockirqs_loop: "); |
|
1005 asm("cmp al, [ecx+2] "); /* compare al to out.w */ |
|
1006 asm("jnz short rwl_rlockirqs_loop2 "); |
|
1007 RWSPIN_RLOCK_MARK_ACQ() |
|
1008 asm("lock add dword ptr [esp], 0 "); /* make sure subsequent accesses don't happen until lock acquired */ |
|
1009 asm("pop eax "); /* retrieve saved EFLAGS */ |
|
1010 asm("and eax, 0x200 "); /* return just interrupt mask bit */ |
|
1011 THISCALL_EPILOG0() |
|
1012 |
|
1013 asm("rwl_rlockirqs_loop2: "); |
|
1014 X86_PAUSE |
|
1015 asm("jmp short rwl_rlockirqs_loop "); |
|
1016 } |
|
1017 |
|
1018 __NAKED__ EXPORT_C void TRWSpinLock::UnlockIrqRestoreR(TInt) |
|
1019 { |
|
1020 THISCALL_PROLOG1() |
|
1021 RWSPIN_RUNLOCK_ENTRY_CHECK() |
|
1022 asm("lock add word ptr [ecx+2], 0x100 "); /* ++out.r */ |
|
1023 asm("test dword ptr [esp+4], 0x200 "); |
|
1024 asm("jz short rwl_runlockirqr_1 "); |
|
1025 asm("sti "); |
|
1026 asm("rwl_runlockirqr_1: "); |
|
1027 THISCALL_EPILOG1() |
|
1028 } |
|
1029 |
|
1030 __NAKED__ EXPORT_C TBool TRWSpinLock::FlashIrqRestoreR(TInt) |
|
1031 { |
|
1032 /* don't mess with stacked args, yet */ |
|
1033 THISCALL_PROLOG0() |
|
1034 asm("mov eax, [ecx] "); /* al=in.w, ah=in.r, byte2=out.w, byte3=out.r */ |
|
1035 asm("mov edx, eax "); |
|
1036 asm("shr edx, 16 "); /* dl=out.w */ |
|
1037 asm("xor eax, edx "); /* al = in.w ^ out.w = 0 if no writers waiting */ |
|
1038 asm("and eax, 0xff "); |
|
1039 asm("jne short rwl_rflashirqr_1 "); |
|
1040 |
|
1041 /* now we can remove stacked arg since we don't need it */ |
|
1042 THISCALL_EPILOG1() |
|
1043 |
|
1044 asm("rwl_rflashirqr_1: "); |
|
1045 THISCALL_PROLOG1() |
|
1046 asm("test dword ptr [esp+4], 0x200 "); |
|
1047 asm("jnz short rwl_rflashirqr_2 "); |
|
1048 asm("call %a0" : : "i" (&rwspin_rlock_flash_only)); |
|
1049 asm("jmp short rwl_rflashirqr_3 "); |
|
1050 asm("rwl_rflashirqr_2: "); |
|
1051 asm("call %a0" : : "i" (&rwspin_rlock_flash_irq)); |
|
1052 asm("rwl_rflashirqr_3: "); |
|
1053 THISCALL_EPILOG1() |
|
1054 } |
|
1055 |
|
1056 |
|
1057 /*----------------------------------------------------------------------------- |
|
1058 - Write locks disabling IRQ with save/restore IRQ state |
|
1059 -----------------------------------------------------------------------------*/ |
|
1060 __NAKED__ EXPORT_C TInt TRWSpinLock::LockIrqSaveW() |
|
1061 { |
|
1062 THISCALL_PROLOG0() |
|
1063 asm("pushfd "); |
|
1064 asm("cli "); |
|
1065 RWSPIN_WLOCK_ENTRY_CHECK() |
|
1066 asm("mov ax, [ecx] "); /* ah = in.r, al = in.w */ |
|
1067 asm("rwl_wlockirqs_loop3: "); |
|
1068 asm("mov edx, eax "); |
|
1069 asm("inc dl "); /* dh = in.r, dl = in.w+1 */ |
|
1070 asm("lock cmpxchg [ecx], dx "); /* attempt to update in.w */ |
|
1071 asm("jne short rwl_wlockirqs_loop3 "); /* loop if failed */ |
|
1072 asm("rwl_wlockirqs_loop: "); |
|
1073 asm("cmp ax, [ecx+2] "); /* compare ax to (out.w,out.r) */ |
|
1074 asm("jnz short rwl_wlockirqs_loop2 "); |
|
1075 RWSPIN_WLOCK_MARK_ACQ() |
|
1076 asm("lock add dword ptr [esp], 0 "); /* make sure subsequent accesses don't happen until lock acquired */ |
|
1077 asm("pop eax "); /* retrieve saved EFLAGS */ |
|
1078 asm("and eax, 0x200 "); /* return just interrupt mask bit */ |
|
1079 THISCALL_EPILOG0() |
|
1080 |
|
1081 asm("rwl_wlockirqs_loop2: "); |
|
1082 X86_PAUSE |
|
1083 asm("jmp short rwl_wlockirqs_loop "); |
|
1084 } |
|
1085 |
|
1086 __NAKED__ EXPORT_C void TRWSpinLock::UnlockIrqRestoreW(TInt) |
|
1087 { |
|
1088 THISCALL_PROLOG1() |
|
1089 RWSPIN_WUNLOCK_ENTRY_CHECK() |
|
1090 asm("mov ax, [ecx+2] "); /* ah = out.r, al = out.w */ |
|
1091 asm("rwl_wunlockirqr_loop: "); |
|
1092 asm("mov edx, eax "); |
|
1093 asm("inc dl "); /* dh = out.r, dl = out.w+1 */ |
|
1094 asm("lock cmpxchg [ecx+2], dx "); /* attempt to update out.w */ |
|
1095 asm("jne short rwl_wunlockirqr_loop "); /* loop if failed */ |
|
1096 asm("test dword ptr [esp+4], 0x200 "); |
|
1097 asm("jz short rwl_wunlockirqr_1 "); |
|
1098 asm("sti "); |
|
1099 asm("rwl_wunlockirqr_1: "); |
|
1100 THISCALL_EPILOG1() |
|
1101 } |
|
1102 |
|
1103 __NAKED__ EXPORT_C TBool TRWSpinLock::FlashIrqRestoreW(TInt) |
|
1104 { |
|
1105 /* don't mess with stacked args, yet */ |
|
1106 THISCALL_PROLOG0() |
|
1107 asm("mov eax, [ecx] "); /* al=in.w, ah=in.r, byte2=out.w, byte3=out.r */ |
|
1108 asm("mov edx, eax "); |
|
1109 asm("shr edx, 16 "); /* dl=out.w, dh=out.r */ |
|
1110 asm("inc dl "); /* dx==ax now means no-one else is waiting for lock */ |
|
1111 asm("xor eax, edx "); |
|
1112 asm("and eax, 0xffff "); |
|
1113 asm("jne short rwl_wflashirqr_1 "); |
|
1114 |
|
1115 /* now we can remove stacked arg since we don't need it */ |
|
1116 THISCALL_EPILOG1() |
|
1117 |
|
1118 asm("rwl_wflashirqr_1: "); |
|
1119 THISCALL_PROLOG1() |
|
1120 asm("test dword ptr [esp+4], 0x200 "); |
|
1121 asm("jnz short rwl_wflashirqr_2 "); |
|
1122 asm("call %a0" : : "i" (&rwspin_wlock_flash_only)); |
|
1123 asm("jmp short rwl_wflashirqr_3 "); |
|
1124 asm("rwl_wflashirqr_2: "); |
|
1125 asm("call %a0" : : "i" (&rwspin_wlock_flash_irq)); |
|
1126 asm("rwl_wflashirqr_3: "); |
|
1127 THISCALL_EPILOG1() |
|
1128 } |
|
1129 |
|
1130 |
|
1131 /*----------------------------------------------------------------------------- |
|
1132 - Read lock flash allowing preemption |
|
1133 -----------------------------------------------------------------------------*/ |
|
1134 extern "C" TBool __fastcall rwspin_rlock_flash_preempt(TRWSpinLock* a) |
|
1135 { |
|
1136 a->UnlockOnlyR(); |
|
1137 NKern::PreemptionPoint(); |
|
1138 a->LockOnlyR(); |
|
1139 return TRUE; |
|
1140 } |
|
1141 |
|
1142 __NAKED__ EXPORT_C TBool TRWSpinLock::FlashPreemptR() |
|
1143 { |
|
1144 THISCALL_PROLOG0() |
|
1145 asm("mov eax, [ecx] "); /* al=in.w, ah=in.r, byte2=out.w, byte3=out.r */ |
|
1146 asm("mov edx, eax "); |
|
1147 asm("shr edx, 16 "); /* dl=out.w */ |
|
1148 asm("xor eax, edx "); /* al = in.w ^ out.w = 0 if no writers waiting */ |
|
1149 asm("and eax, 0xff "); |
|
1150 asm("jne %a0" : : "i" (&rwspin_rlock_flash_preempt)); |
|
1151 THISCALL_EPILOG0() |
|
1152 } |
|
1153 |
|
1154 |
|
1155 /*----------------------------------------------------------------------------- |
|
1156 - Write lock flash allowing preemption |
|
1157 -----------------------------------------------------------------------------*/ |
|
1158 extern "C" TBool __fastcall rwspin_wlock_flash_preempt(TRWSpinLock* a) |
|
1159 { |
|
1160 a->UnlockOnlyW(); |
|
1161 NKern::PreemptionPoint(); |
|
1162 a->LockOnlyW(); |
|
1163 return TRUE; |
|
1164 } |
|
1165 |
|
1166 __NAKED__ EXPORT_C TBool TRWSpinLock::FlashPreemptW() |
|
1167 { |
|
1168 THISCALL_PROLOG0() |
|
1169 asm("mov eax, [ecx] "); /* al=in.w, ah=in.r, byte2=out.w, byte3=out.r */ |
|
1170 asm("mov edx, eax "); |
|
1171 asm("shr edx, 16 "); /* dl=out.w, dh=out.r */ |
|
1172 asm("inc dl "); /* dx==ax now means no-one else is waiting for lock */ |
|
1173 asm("xor eax, edx "); |
|
1174 asm("and eax, 0xffff "); |
|
1175 asm("jne %a0" : : "i" (&rwspin_wlock_flash_preempt)); |
|
1176 THISCALL_EPILOG0() |
|
1177 } |
|
1178 |