|
1 // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of the License "Eclipse Public License v1.0" |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: |
|
14 // e32\nkernsmp\x86\ncthrd.cia |
|
15 // |
|
16 // |
|
17 |
|
18 #include <x86.h> |
|
19 #include <apic.h> |
|
20 |
|
21 const TLinAddr NKern_Exit = (TLinAddr)NKern::Exit; |
|
22 //const TLinAddr NKern_Lock = (TLinAddr)NKern::Lock; |
|
23 |
|
24 extern "C" void send_resched_ipis(TUint32 aMask); |
|
25 extern "C" void __fastcall add_dfc(TDfc* aDfc); |
|
26 |
|
27 |
|
28 __NAKED__ void __StartThread() |
|
29 { |
|
30 // On entry interrupts disabled, SThreadExcStack on stack |
|
31 asm("mov eax, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID)); |
|
32 asm("add esp, 4 "); // get rid of iReason |
|
33 asm("shr eax, 24 "); |
|
34 asm("mov esi, [eax*4+%0]" : : "i" (&SubSchedulerLookupTable)); |
|
35 asm("xor eax, eax "); |
|
36 asm("lock xchg eax, [esi+%0]" : : "i" _FOFF(TSubScheduler, iReschedIPIs)); |
|
37 asm("test eax, eax "); |
|
38 asm("jz short no_resched_ipis "); |
|
39 asm("push eax "); |
|
40 asm("call %a0" : : "i" (&send_resched_ipis)); |
|
41 asm("add esp, 4 "); |
|
42 asm("no_resched_ipis: "); |
|
43 asm("pop ecx "); |
|
44 asm("pop edx "); |
|
45 asm("pop ebx "); |
|
46 asm("pop esi "); |
|
47 asm("pop edi "); |
|
48 asm("pop ebp "); |
|
49 asm("pop eax "); |
|
50 asm("pop ds "); |
|
51 asm("pop es "); |
|
52 asm("pop fs "); |
|
53 asm("pop gs "); |
|
54 asm("sti "); |
|
55 asm("push ebx "); |
|
56 asm("call eax "); |
|
57 asm("add esp, 4 "); |
|
58 asm("call %a0" : : "i" (NKern_Exit)); |
|
59 } |
|
60 |
|
61 extern "C" __NAKED__ TUint __tr() |
|
62 { |
|
63 asm("xor eax, eax"); |
|
64 asm("str ax"); |
|
65 asm("ret"); |
|
66 } |
|
67 |
|
68 __NAKED__ TUint32 X86::GetCR0() |
|
69 { |
|
70 asm("mov eax, cr0"); |
|
71 asm("ret"); |
|
72 } |
|
73 |
|
74 __NAKED__ void X86::SetCR0(TUint32) |
|
75 { |
|
76 asm("mov eax, [esp+4]"); |
|
77 asm("mov cr0, eax"); |
|
78 asm("ret"); |
|
79 } |
|
80 |
|
81 __NAKED__ TUint32 X86::ModifyCR0(TUint32 /*clear*/, TUint32 /*set*/) |
|
82 { |
|
83 asm("mov ecx, [esp+4]"); |
|
84 asm("mov edx, [esp+8]"); |
|
85 asm("mov eax, cr0"); |
|
86 asm("not ecx"); |
|
87 asm("and ecx, eax"); |
|
88 asm("or ecx, edx"); |
|
89 asm("mov cr0, ecx"); |
|
90 asm("ret"); |
|
91 } |
|
92 |
|
93 /** Mark the beginning of an event handler tied to a thread or thread group |
|
94 |
|
95 Return the number of the CPU on which the event handler should run |
|
96 */ |
|
97 __NAKED__ TInt NSchedulable::BeginTiedEvent() |
|
98 { |
|
99 THISCALL_PROLOG0() |
|
100 asm("mov eax, 0x10000 "); // EEventCountInc |
|
101 asm("lock xadd [ecx+%0], eax" : : "i" _FOFF(NSchedulable,iEventState)); |
|
102 asm("test eax, 0x8000 "); // EEventParent |
|
103 asm("jz short bte0 "); // not set so don't look at group |
|
104 asm("mov edx, [ecx+%0]" : : "i" _FOFF(NSchedulable,iParent)); |
|
105 asm("cmp edx, 0 "); |
|
106 asm("jz short bte_bad "); // no parent - shouldn't happen |
|
107 asm("cmp edx, ecx "); |
|
108 asm("jz short bte2 "); // parent not yet updated, use iNewParent |
|
109 asm("bte1: "); |
|
110 asm("mov eax, 0x10000 "); // EEventCountInc |
|
111 asm("lock xadd [edx+%0], eax" : : "i" _FOFF(NSchedulable,iEventState)); |
|
112 asm("bte0: "); |
|
113 asm("and eax, 0x1f "); // EEventCpuMask |
|
114 THISCALL_EPILOG0() |
|
115 |
|
116 asm("bte2: "); |
|
117 asm("lock add dword ptr [esp], 0 "); // make sure iNewParent is read after iParent |
|
118 asm("mov edx, [ecx+%0]" : : "i" _FOFF(NThreadBase,iNewParent)); |
|
119 asm("cmp edx, 0 "); |
|
120 asm("jnz short bte1 "); |
|
121 asm("lock add dword ptr [esp], 0 "); // make sure iParent is read after iNewParent |
|
122 asm("mov edx, [ecx+%0]" : : "i" _FOFF(NSchedulable,iParent)); // iNewParent has been cleared, so iParent must now have been set |
|
123 asm("cmp edx, ecx "); |
|
124 asm("jnz short bte1 "); // if iParent still not set, something is wrong |
|
125 |
|
126 asm("bte_bad: "); |
|
127 asm("int 0xff "); |
|
128 } |
|
129 |
|
130 |
|
131 /** Mark the end of an event handler tied to a thread or thread group |
|
132 |
|
133 */ |
|
134 __NAKED__ void NSchedulable::EndTiedEvent() |
|
135 { |
|
136 THISCALL_PROLOG0() |
|
137 asm("test dword ptr [ecx+%0], 0x800" : : "i" _FOFF(NSchedulable,iEventState)); // EEventParent |
|
138 asm("jnz short etep0 "); |
|
139 asm("ete1: "); |
|
140 asm("mov eax, [ecx+%0]" : : "i" _FOFF(NSchedulable,iEventState)); |
|
141 asm("ete2: "); |
|
142 asm("mov edx, eax "); |
|
143 asm("sub edx, 0x10000 "); // EEventCountInc |
|
144 asm("cmp edx, 0x10000 "); // EEventCountInc |
|
145 asm("jae short ete3 "); |
|
146 asm("mov dl, dh "); |
|
147 asm("and dl, 0x1f "); // event cpu = thread cpu |
|
148 asm("ete3: "); |
|
149 asm("lock cmpxchg [ecx+%0], edx" : : "i" _FOFF(NSchedulable,iEventState)); |
|
150 asm("jne short ete2 "); |
|
151 asm("cmp edx, 0x10000 "); // EEventCountInc |
|
152 asm("jae short ete4 "); // If this wasn't last tied event, finish |
|
153 asm("test edx, 0x4000 "); // test deferred ready flag |
|
154 asm("jz short ete4 "); |
|
155 asm("push ecx "); |
|
156 asm("lea ecx, [ecx+%0]" : : "i" _FOFF(NSchedulable,i_IDfcMem)); |
|
157 asm("call %a0" : : "i" (add_dfc)); |
|
158 asm("pop ecx "); |
|
159 asm("ete4: "); |
|
160 THISCALL_EPILOG0() |
|
161 |
|
162 asm("etep0: "); |
|
163 asm("lock add dword ptr [esp], 0 "); // make sure iParent is read after seeing parent flag set |
|
164 asm("mov edx, [ecx+%0]" : : "i" _FOFF(NSchedulable,iParent)); |
|
165 asm("cmp edx, 0 "); |
|
166 asm("jz short ete_bad "); // no parent - shouldn't happen |
|
167 asm("cmp edx, ecx "); |
|
168 asm("jz short etep1 "); // parent not yet updated, use iNewParent |
|
169 asm("etep2: "); |
|
170 asm("push ecx "); |
|
171 asm("mov ecx, edx "); |
|
172 asm("call ete1 "); // operate on parent state |
|
173 asm("pop ecx "); // restore this |
|
174 // mb(); |
|
175 asm("mov eax, 0xffff0000 "); // -EEventCountInc |
|
176 asm("lock xadd [ecx+%0], eax" : : "i" _FOFF(NSchedulable,iEventState)); // decrement thread's event count |
|
177 THISCALL_EPILOG0() |
|
178 |
|
179 asm("etep1: "); |
|
180 asm("lock add dword ptr [esp], 0 "); // make sure iNewParent is read after iParent |
|
181 asm("mov edx, [ecx+%0]" : : "i" _FOFF(NThreadBase,iNewParent)); |
|
182 asm("cmp edx, 0 "); |
|
183 asm("jnz short etep2 "); |
|
184 asm("lock add dword ptr [esp], 0 "); // make sure iParent is read after iNewParent |
|
185 asm("mov edx, [ecx+%0]" : : "i" _FOFF(NSchedulable,iParent)); // iNewParent has been cleared, so iParent must now have been set |
|
186 asm("cmp edx, ecx "); |
|
187 asm("jnz short etep2 "); // if iParent still not set, something is wrong |
|
188 |
|
189 asm("ete_bad: "); |
|
190 asm("int 0xff "); |
|
191 } |
|
192 |
|
193 |
|
194 /** Check for concurrent tied events when a thread/group becomes ready |
|
195 |
|
196 This is only ever called on a lone thread or a group, not on a thread |
|
197 which is part of a group. |
|
198 |
|
199 Update the thread CPU field in iEventState |
|
200 If thread CPU != event CPU and event count nonzero, atomically |
|
201 set the ready deferred flag and return TRUE, else return FALSE. |
|
202 If event count zero, set event CPU = thread CPU atomically. |
|
203 |
|
204 @param aCpu the CPU on which the thread/group is to become ready |
|
205 @return TRUE if the ready must be deferred. |
|
206 */ |
|
207 __NAKED__ TBool NSchedulable::TiedEventReadyInterlock(TInt aCpu) |
|
208 { |
|
209 THISCALL_PROLOG1() |
|
210 asm("push ebx "); |
|
211 asm("mov ebx, [esp+8] "); // ebx = aCpu |
|
212 asm("and ebx, 0x1f "); |
|
213 asm("mov eax, [ecx+%0]" : : "i" _FOFF(NSchedulable,iEventState)); |
|
214 asm("teri1: "); |
|
215 asm("mov edx, eax "); |
|
216 asm("and dh, 0xe0 "); |
|
217 asm("or dh, bl "); // set thread CPU field |
|
218 asm("cmp edx, 0x10000 "); // EEventCountInc |
|
219 asm("jb short teri2 "); // skip if event count zero |
|
220 asm("cmp dl, bl "); // thread CPU = event CPU? |
|
221 asm("je short teri3 "); // skip if same |
|
222 asm("or edx, 0x4000 "); // EDeferredReady |
|
223 asm("jmp short teri3 "); |
|
224 asm("teri2: "); |
|
225 asm("mov dl, dh "); |
|
226 asm("and dl, 0x1f "); // event CPU = thread CPU |
|
227 asm("teri3: "); |
|
228 asm("lock cmpxchg [ecx+%0], edx" : : "i" _FOFF(NSchedulable,iEventState)); |
|
229 asm("jne short teri1 "); |
|
230 asm("xor eax, edx "); // old iEventState ^ new iEventState |
|
231 asm("pop ebx "); |
|
232 asm("and eax, 0x4000 "); // return TRUE if EDeferredReady was set |
|
233 THISCALL_EPILOG1() |
|
234 } |
|
235 |
|
236 |
|
237 /** Check for concurrent tied events when a thread leaves a group |
|
238 |
|
239 If event count zero, atomically set the event and thread CPUs to the |
|
240 current CPU, clear the parent flag and return TRUE, else return FALSE. |
|
241 |
|
242 @return TRUE if the parent flag has been cleared |
|
243 */ |
|
244 __NAKED__ TBool NThreadBase::TiedEventLeaveInterlock() |
|
245 { |
|
246 THISCALL_PROLOG0() |
|
247 asm("push ebx "); |
|
248 asm("xor ebx, ebx "); |
|
249 asm("str bx "); |
|
250 asm("sub bl, 0x28 "); |
|
251 asm("shr bl, 3 "); |
|
252 asm("mov bh, bl "); |
|
253 asm("mov eax, [ecx+%0]" : : "i" _FOFF(NSchedulable,iEventState)); |
|
254 asm("teli1: "); |
|
255 asm("cmp eax, 0x10000 "); // EEventCountInc |
|
256 asm("jae short teli0 "); // if count >=1, finish and return FALSE |
|
257 asm("mov edx, ebx "); // update CPUs, clear parent flag |
|
258 // NOTE: Deferred ready flag must have been clear since thread is running |
|
259 asm("lock cmpxchg [ecx+%0], edx" : : "i" _FOFF(NSchedulable,iEventState)); |
|
260 asm("jne short teli1 "); |
|
261 asm("pop ebx "); |
|
262 asm("mov eax, 1 "); // return TRUE |
|
263 THISCALL_EPILOG0() |
|
264 asm("teli0: "); |
|
265 asm("pop ebx "); |
|
266 asm("xor eax, eax "); // return FALSE |
|
267 THISCALL_EPILOG0() |
|
268 } |
|
269 |
|
270 |
|
271 /** Check for concurrent tied events when a thread joins a group |
|
272 |
|
273 If event count zero, atomically set the parent flag and return TRUE, |
|
274 else return FALSE. |
|
275 |
|
276 @return TRUE if the parent flag has been set |
|
277 */ |
|
278 __NAKED__ TBool NThreadBase::TiedEventJoinInterlock() |
|
279 { |
|
280 THISCALL_PROLOG0() |
|
281 asm("mov eax, [ecx+%0]" : : "i" _FOFF(NSchedulable,iEventState)); |
|
282 asm("teji1: "); |
|
283 asm("cmp eax, 0x10000 "); // EEventCountInc |
|
284 asm("jae short teji0 "); // if count >=1, finish and return FALSE |
|
285 asm("mov edx, eax "); |
|
286 asm("or edx, 0x8000 "); // set parent flag |
|
287 asm("lock cmpxchg [ecx+%0], edx" : : "i" _FOFF(NSchedulable,iEventState)); |
|
288 asm("jne short teji1 "); |
|
289 asm("mov eax, 1 "); // return TRUE |
|
290 THISCALL_EPILOG0() |
|
291 asm("teji0: "); |
|
292 asm("xor eax, eax "); // return FALSE |
|
293 THISCALL_EPILOG0() |
|
294 } |
|
295 |
|
296 |
|
297 /** Decrement a fast semaphore count |
|
298 |
|
299 If count > 0, decrement and do memory barrier |
|
300 If count = 0, set equal to (thread>>2)|0x80000000 |
|
301 Return original count |
|
302 */ |
|
303 __NAKED__ TInt NFastSemaphore::Dec(NThreadBase*) |
|
304 { |
|
305 THISCALL_PROLOG1() |
|
306 asm("mov eax, [ecx]"); |
|
307 asm("fsdec:"); |
|
308 asm("mov edx, eax"); |
|
309 asm("dec edx"); |
|
310 asm("jns short fsdec1"); |
|
311 asm("mov edx, [esp+4]"); |
|
312 asm("shr edx, 2"); |
|
313 asm("or edx, 0x80000000"); |
|
314 asm("fsdec1:"); |
|
315 asm("lock cmpxchg [ecx], edx"); |
|
316 asm("jne short fsdec"); |
|
317 THISCALL_EPILOG1() |
|
318 } |
|
319 |
|
320 /** Increment a fast semaphore count |
|
321 |
|
322 Do memory barrier |
|
323 If iCount >= 0, increment by aCount and return 0 |
|
324 If iCount < 0, set count equal to aCount-1 and return (original count << 2) |
|
325 */ |
|
326 __NAKED__ NThreadBase* NFastSemaphore::Inc(TInt) |
|
327 { |
|
328 THISCALL_PROLOG1() |
|
329 asm("mov eax, [ecx]"); |
|
330 asm("fsinc:"); |
|
331 asm("mov edx, [esp+4]"); |
|
332 asm("test eax, eax"); |
|
333 asm("js short fsinc1"); |
|
334 asm("lea edx, [edx+eax+1]"); |
|
335 asm("fsinc1:"); |
|
336 asm("dec edx"); |
|
337 asm("lock cmpxchg [ecx], edx"); |
|
338 asm("jne short fsinc"); |
|
339 asm("add eax, eax"); |
|
340 asm("jc short fsinc2"); |
|
341 asm("xor eax, eax"); |
|
342 asm("fsinc2:"); |
|
343 asm("add eax, eax"); |
|
344 THISCALL_EPILOG1() |
|
345 } |
|
346 |
|
347 /** Reset a fast semaphore count |
|
348 |
|
349 Do memory barrier |
|
350 If iCount >= 0, set iCount=0 and return 0 |
|
351 If iCount < 0, set iCount=0 and return (original count << 2) |
|
352 */ |
|
353 __NAKED__ NThreadBase* NFastSemaphore::DoReset() |
|
354 { |
|
355 THISCALL_PROLOG0() |
|
356 asm("xor eax, eax"); |
|
357 asm("lock xchg eax, [ecx]"); |
|
358 asm("add eax, eax"); |
|
359 asm("jc short fsrst0"); |
|
360 asm("xor eax, eax"); |
|
361 asm("fsrst0:"); |
|
362 asm("add eax, eax"); |
|
363 THISCALL_EPILOG0() |
|
364 } |
|
365 |
|
366 /** Check whether a thread holds a fast mutex. |
|
367 If so set the mutex contention flag and return TRUE, else return FALSE. |
|
368 |
|
369 Called with kernel lock held |
|
370 |
|
371 @internalComponent |
|
372 */ |
|
373 __NAKED__ TBool NThreadBase::CheckFastMutexDefer() |
|
374 { |
|
375 THISCALL_PROLOG0() |
|
376 asm("mov eax, [ecx+%0]": :"i"_FOFF(NThreadBase, iHeldFastMutex)); |
|
377 asm("mov edx, 0xfffffffc"); |
|
378 asm("and edx, eax"); // edx points to mutex if any, eax bit 0 = flag |
|
379 asm("jnz short checkfmd1"); |
|
380 asm("xor eax, eax"); // no mutex - return FALSE |
|
381 THISCALL_EPILOG0() |
|
382 |
|
383 // iHeldFastMutex points to a mutex |
|
384 asm("checkfmd1:"); |
|
385 asm("test al, 1"); |
|
386 asm("jz short checkfmd2"); |
|
387 |
|
388 // mutex being released |
|
389 asm("mov eax, ecx"); |
|
390 asm("inc ecx"); |
|
391 asm("lock cmpxchg [edx], ecx"); // if m->iHoldingThread==this, set m->iHoldingThread = this+1 ... |
|
392 asm("jz short checkfmd3"); // ... and return TRUE |
|
393 asm("cmp eax, ecx"); // otherwise check if contention flag already set |
|
394 asm("jz short checkfmd3"); // if so return TRUE |
|
395 asm("xor eax, eax"); |
|
396 asm("dec ecx"); |
|
397 asm("mov [ecx+%0], eax": :"i"_FOFF(NThreadBase, iHeldFastMutex)); // else already released, so set iHeldFastMutex=0 |
|
398 THISCALL_EPILOG0() // and return FALSE |
|
399 |
|
400 // mutex being acquired or has been acquired |
|
401 // if it has been acquired set the contention flag and return TRUE, else return FALSE |
|
402 asm("checkfmd2:"); |
|
403 asm("mov eax, ecx"); |
|
404 asm("inc ecx"); |
|
405 asm("lock cmpxchg [edx], ecx"); // if m->iHoldingThread==this, set m->iHoldingThread = this+1 |
|
406 asm("jz short checkfmd3"); // ... and return TRUE |
|
407 asm("cmp eax, ecx"); // otherwise check if contention flag already set |
|
408 asm("jz short checkfmd3"); // if so return TRUE |
|
409 asm("xor eax, eax"); |
|
410 THISCALL_EPILOG0() // else return FALSE |
|
411 |
|
412 asm("checkfmd3:"); |
|
413 asm("mov eax, 1"); // return TRUE |
|
414 THISCALL_EPILOG0() |
|
415 } |
|
416 |
|
417 |
|
418 /** Transition the state of an IDFC or DFC when Add() is called |
|
419 |
|
420 0000->008n, 00Cn->00En, all other states unchanged |
|
421 Return original state. |
|
422 |
|
423 Enter and return with interrupts disabled. |
|
424 */ |
|
425 __NAKED__ TUint32 TDfc::AddStateChange() |
|
426 { |
|
427 THISCALL_PROLOG0() |
|
428 asm("xor eax, eax "); |
|
429 asm("mov ax, [ecx+10] "); |
|
430 asm("ascr: "); |
|
431 asm("mov edx, eax "); |
|
432 asm("test eax, eax "); |
|
433 asm("jne short asc1 "); |
|
434 asm("str dx "); |
|
435 asm("shr dl, 3 "); // dl = current CPU number + 5 |
|
436 asm("add dl, 0x7b "); // 0000->008n |
|
437 asm("jmp short asc0 "); |
|
438 asm("asc1: "); |
|
439 asm("cmp eax, 0xE0 "); |
|
440 asm("jae short asc0 "); // if outside range 00C0-00DF leave alone |
|
441 asm("cmp eax, 0xC0 "); |
|
442 asm("jb short asc0 "); |
|
443 asm("add dl, 0x20 "); // 00Cn->00En |
|
444 asm("asc0: "); |
|
445 asm("lock cmpxchg [ecx+10], dx "); |
|
446 asm("jne short ascr "); |
|
447 THISCALL_EPILOG0() |
|
448 } |
|
449 |
|
450 /** Transition the state of an IDFC just before running it. |
|
451 |
|
452 002g->00Cn, 008n->00Cn, 00An->00Cn, XXYY->XX00, XX00->0000 |
|
453 other initial states invalid |
|
454 Return original state |
|
455 |
|
456 Enter and return with interrupts disabled. |
|
457 */ |
|
458 __NAKED__ TUint32 TDfc::RunIDFCStateChange() |
|
459 { |
|
460 THISCALL_PROLOG0() |
|
461 asm("xor eax, eax "); |
|
462 asm("mov ax, [ecx+10] "); |
|
463 asm("risr: "); |
|
464 asm("cmp ah, 0 "); |
|
465 asm("jne short ris1 "); |
|
466 asm("mov edx, eax "); |
|
467 asm("and dl, 0xfe "); |
|
468 asm("cmp dl, 0x20 "); |
|
469 asm("je short ris2 "); // 002g |
|
470 asm("mov edx, eax "); |
|
471 asm("cmp dl, 0xc0 "); |
|
472 asm("jge short ris_bad "); // not 80-BF |
|
473 asm("and dl, 0x1f "); |
|
474 |
|
475 asm("push ebx "); |
|
476 asm("str bx "); |
|
477 asm("sub bl, 0x28 "); |
|
478 asm("shr bl, 3 "); |
|
479 asm("cmp bl, dl "); |
|
480 asm("pop ebx "); |
|
481 asm("jne short ris_bad "); |
|
482 |
|
483 asm("or dl, 0xc0 "); // 008n->00Cn, 00An->00Cn |
|
484 asm("jmp short ris0 "); |
|
485 asm("ris_bad: "); |
|
486 asm("int 0xff "); // DIE |
|
487 asm("ris2: "); |
|
488 asm("mov edx, eax "); |
|
489 asm("xor dl, 0x21 "); |
|
490 asm("cmp dl, [%a0]" : : "i" (&TheScheduler.iIdleGeneration)); |
|
491 asm("jne short ris_bad "); |
|
492 asm("str dx "); |
|
493 asm("shr dl, 3 "); // dl = current CPU number + 5 |
|
494 asm("add dl, 0xbb "); // 002g->00Cn |
|
495 asm("jmp short ris0 "); |
|
496 asm("ris1: "); |
|
497 asm("xor edx, edx "); |
|
498 asm("cmp al, 0 "); |
|
499 asm("je short ris0 "); // XX00->0000 |
|
500 asm("str dx "); |
|
501 asm("sub dl, 0x28 "); |
|
502 asm("shr dl, 3 "); |
|
503 asm("xor dl, al "); |
|
504 asm("and dl, 0x1f "); |
|
505 asm("jne short ris_bad "); |
|
506 asm("xor edx, edx "); |
|
507 asm("mov dh, ah "); // XXYY->XX00 |
|
508 asm("ris0: "); |
|
509 asm("lock cmpxchg [ecx+10], dx "); |
|
510 asm("jne short risr "); |
|
511 THISCALL_EPILOG0() |
|
512 } |
|
513 |
|
514 /** Transition the state of an IDFC just after running it. |
|
515 |
|
516 First swap aS->iCurrentIDFC with 0 |
|
517 If original value != this, return 0xFFFFFFFF and don't touch *this |
|
518 Else 00Cn->0000, 00En->008n, 006n->006n, XXCn->XX00, XXEn->XX00, XX6n->XX00, XX00->0000 |
|
519 other initial states invalid |
|
520 Return original state |
|
521 |
|
522 Enter and return with interrupts disabled. |
|
523 */ |
|
524 __NAKED__ TUint32 TDfc::EndIDFCStateChange(TSubScheduler* /*aS*/) |
|
525 { |
|
526 THISCALL_PROLOG1() |
|
527 asm("mov edx, [esp+4] "); // edx = aS |
|
528 asm("xor eax, eax "); |
|
529 asm("lock xchg eax, [edx+%0]" : : "i" _FOFF(TSubScheduler,iCurrentIDFC)); // swap aS->iCurrentIDFC with 0 |
|
530 asm("xor eax, ecx "); // if aS->iCurrentIDFC==this originally, eax=0 |
|
531 asm("jne short eis9 "); // else bail out |
|
532 asm("mov ax, [ecx+10] "); |
|
533 asm("eisr: "); |
|
534 asm("xor edx, edx "); |
|
535 asm("cmp al, 0 "); |
|
536 asm("je short eis0 "); // XX00->0000 |
|
537 asm("cmp al, 0x60 "); |
|
538 asm("jb short eis_bad "); // bad if < 60 |
|
539 asm("cmp al, 0xC0 "); |
|
540 asm("jl short eis_bad "); // bad if 80-BF |
|
541 asm("str dx "); |
|
542 asm("sub dl, 0x28 "); |
|
543 asm("shr dl, 3 "); |
|
544 asm("xor dl, al "); |
|
545 asm("and dl, 0x1f "); |
|
546 asm("jne short eis_bad "); |
|
547 asm("xor edx, edx "); |
|
548 asm("cmp ah, 0 "); |
|
549 asm("je short eis1 "); |
|
550 asm("mov dh, ah "); // XX6n->XX00, XXCn->XX00, XXEn->XX00 |
|
551 asm("jmp short eis0 "); |
|
552 asm("eis1: "); |
|
553 asm("cmp al, 0xE0 "); |
|
554 asm("jl short eis0 "); // 00Cn->0000 |
|
555 asm("mov dl, al "); |
|
556 asm("jb short eis0 "); // 006n->006n |
|
557 asm("sub dl, 0x60 "); // 00En->008n |
|
558 asm("eis0: "); |
|
559 asm("lock cmpxchg [ecx+10], dx "); |
|
560 asm("jne short eisr "); |
|
561 THISCALL_EPILOG1() |
|
562 asm("eis9: "); |
|
563 asm("mov eax, 0xffffffff "); |
|
564 THISCALL_EPILOG1() |
|
565 asm("eis_bad: "); |
|
566 asm("int 0xff "); |
|
567 } |
|
568 |
|
569 /** Transition the state of an IDFC just after running it. |
|
570 |
|
571 006n->002g where g = TheScheduler.iIdleGeneration |
|
572 XX6n->XX00 |
|
573 other initial states invalid |
|
574 Return original state |
|
575 |
|
576 Enter and return with interrupts disabled. |
|
577 */ |
|
578 __NAKED__ TUint32 TDfc::EndIDFCStateChange2() |
|
579 { |
|
580 THISCALL_PROLOG0() |
|
581 asm("xor eax, eax "); |
|
582 asm("mov ax, [ecx+10] "); |
|
583 asm("eis2r: "); |
|
584 asm("xor edx, edx "); |
|
585 asm("cmp al, 0x60 "); |
|
586 asm("jl short eis2_bad "); // if not 006n or XX6n, invalid |
|
587 asm("str dx "); |
|
588 asm("sub dl, 0x28 "); |
|
589 asm("shr dl, 3 "); |
|
590 asm("xor dl, al "); |
|
591 asm("and dl, 0x1f "); |
|
592 asm("jne short eis2_bad "); |
|
593 asm("xor edx, edx "); |
|
594 asm("or dh, ah "); |
|
595 asm("jne short eis20 "); // XX6n->XX00 |
|
596 asm("mov edx, 0x20 "); |
|
597 asm("or dl, [%a0]" : : "i" (&TheScheduler.iIdleGeneration)); |
|
598 asm("eis20: "); |
|
599 asm("lock cmpxchg [ecx+10], dx "); |
|
600 asm("jne short eis2r "); |
|
601 THISCALL_EPILOG0() |
|
602 asm("eis2_bad: "); |
|
603 asm("int 0xff "); |
|
604 } |
|
605 |
|
606 /** Transition the state of a DFC just before moving it from the IDFC queue to |
|
607 its final queue. |
|
608 |
|
609 002g->0001, 008n->0001, XX2g->XX00, XX8n->XX00, XX00->0000 |
|
610 other initial states invalid |
|
611 Return original state |
|
612 */ |
|
613 __NAKED__ TUint32 TDfc::MoveToFinalQStateChange() |
|
614 { |
|
615 THISCALL_PROLOG0() |
|
616 asm("xor eax, eax "); |
|
617 asm("mov ax, [ecx+10] "); |
|
618 asm("mfqr: "); |
|
619 asm("xor edx, edx "); |
|
620 asm("cmp al, 0xa0 "); |
|
621 asm("jl short mfq1a "); // 80-9F ok |
|
622 asm("cmp al, 0x20 "); |
|
623 asm("je short mfq1 "); // 20 ok |
|
624 asm("cmp al, 0x21 "); |
|
625 asm("je short mfq1 "); // 21 ok |
|
626 asm("cmp eax, 0 "); |
|
627 asm("je short mfq_bad "); // 0000 -> bad |
|
628 asm("cmp al, 0 "); // XX00 ok |
|
629 asm("je short mfq0 "); // XX00->0000 |
|
630 asm("jmp short mfq_bad "); // not 002g, 008n, XX2g, XX8n, XX00 |
|
631 asm("mfq1a: "); |
|
632 asm("str dx "); |
|
633 asm("sub dl, 0x28 "); |
|
634 asm("shr dl, 3 "); |
|
635 asm("xor dl, al "); |
|
636 asm("and dl, 0x1f "); |
|
637 asm("jne short mfq_bad "); |
|
638 asm("xor edx, edx "); |
|
639 asm("mfq1: "); |
|
640 asm("cmp ah, 0 "); |
|
641 asm("jne short mfq2 "); |
|
642 asm("mov dl, 1 "); |
|
643 asm("jmp short mfq0 "); // 002g->0001, 008n->0001 |
|
644 asm("mfq2: "); |
|
645 asm("mov dh, ah "); // XXYY->XX00 |
|
646 asm("mfq0: "); |
|
647 asm("lock cmpxchg [ecx+10], dx "); |
|
648 asm("jne short mfqr "); |
|
649 THISCALL_EPILOG0() |
|
650 asm("mfq_bad: "); |
|
651 asm("int 0xff "); |
|
652 } |
|
653 |
|
654 /** Transition the state of an IDFC when transferring it to another CPU |
|
655 |
|
656 002g->00Am, 008n->00Am, XXYY->XX00, XX00->0000 |
|
657 other initial states invalid |
|
658 Return original state |
|
659 |
|
660 Enter and return with interrupts disabled and target CPU's ExIDfcLock held. |
|
661 */ |
|
662 __NAKED__ TUint32 TDfc::TransferIDFCStateChange(TInt /*aCpu*/) |
|
663 { |
|
664 THISCALL_PROLOG1() |
|
665 asm("xor eax, eax "); |
|
666 asm("mov ax, [ecx+10] "); |
|
667 asm("tisr: "); |
|
668 asm("xor edx, edx "); |
|
669 asm("cmp al, 0xa0 "); |
|
670 asm("jl short tis1a "); // 80-9F ok |
|
671 asm("cmp al, 0x20 "); |
|
672 asm("je short tis1 "); // 20 ok |
|
673 asm("cmp al, 0x21 "); |
|
674 asm("je short tis1 "); // 21 ok |
|
675 asm("jne short tis_bad "); // not 002g or 008n -> bad |
|
676 asm("tis1a: "); |
|
677 asm("str dx "); |
|
678 asm("sub dl, 0x28 "); |
|
679 asm("shr dl, 3 "); |
|
680 asm("xor dl, al "); |
|
681 asm("and dl, 0x1f "); |
|
682 asm("jne short tis_bad "); |
|
683 asm("xor edx, edx "); |
|
684 asm("tis1: "); |
|
685 asm("cmp ah, 0 "); |
|
686 asm("jne short tis2 "); |
|
687 asm("mov dl, [esp+4] "); |
|
688 asm("or dl, 0xA0 "); |
|
689 asm("jmp short tis0 "); // 002g->00Am, 008n->00Am |
|
690 asm("tis2: "); |
|
691 asm("cmp al, 0 "); |
|
692 asm("je short tis0 "); // XX00->0000 |
|
693 asm("mov dh, ah "); // XXYY->XX00 |
|
694 asm("tis0: "); |
|
695 asm("lock cmpxchg [ecx+10], dx "); |
|
696 asm("jne short tisr "); |
|
697 THISCALL_EPILOG1() |
|
698 asm("tis_bad: "); |
|
699 asm("int 0xff "); |
|
700 } |
|
701 |
|
702 /** Transition the state of an IDFC/DFC just before cancelling it. |
|
703 |
|
704 0000->0000, XX00->ZZ00, xxYY->zzYY |
|
705 Return original state |
|
706 |
|
707 Enter and return with interrupts disabled. |
|
708 */ |
|
709 __NAKED__ TUint32 TDfc::CancelInitialStateChange() |
|
710 { |
|
711 THISCALL_PROLOG0() |
|
712 asm("push ebx "); |
|
713 asm("str bx "); |
|
714 asm("shr bl, 3 "); |
|
715 asm("add bl, 3 "); // bl = current cpu number + 8 |
|
716 asm("xor eax, eax "); |
|
717 asm("mov ax, [ecx+10] "); |
|
718 asm("cisr: "); |
|
719 asm("mov edx, eax "); |
|
720 asm("test eax, eax "); |
|
721 asm("je short cis0 "); // 0000->0000 |
|
722 asm("bts edx, ebx "); // XX00->ZZ00, xxYY->zzYY |
|
723 asm("cis0: "); |
|
724 asm("lock cmpxchg [ecx+10], dx "); |
|
725 asm("jne short cisr "); |
|
726 asm("pop ebx "); |
|
727 THISCALL_EPILOG0() |
|
728 } |
|
729 |
|
730 /** Transition the state of an IDFC/DFC at the end of a cancel operation |
|
731 |
|
732 XXYY->XX00, XX00->0000 |
|
733 Return original state |
|
734 |
|
735 Enter and return with interrupts disabled. |
|
736 */ |
|
737 __NAKED__ TUint32 TDfc::CancelFinalStateChange() |
|
738 { |
|
739 THISCALL_PROLOG0() |
|
740 asm("xor eax, eax "); |
|
741 asm("mov ax, [ecx+10] "); |
|
742 asm("cfsr: "); |
|
743 asm("xor edx, edx "); |
|
744 asm("cmp al, 0 "); |
|
745 asm("je short cfs0 "); // XX00->0000 |
|
746 asm("mov dh, ah "); // XXYY->XX00 |
|
747 asm("cfs0: "); |
|
748 asm("lock cmpxchg [ecx+10], dx "); |
|
749 asm("jne short cfsr "); |
|
750 THISCALL_EPILOG0() |
|
751 } |
|
752 |
|
753 /** Transition the state of an IDFC or DFC when QueueOnIdle() is called |
|
754 |
|
755 0000->002g where g = TheScheduler.iIdleGeneration, |
|
756 00Cn->006n, all other states unchanged |
|
757 Return original state. |
|
758 |
|
759 Enter and return with interrupts disabled and IdleSpinLock held. |
|
760 */ |
|
761 __NAKED__ TUint32 TDfc::QueueOnIdleStateChange() |
|
762 { |
|
763 THISCALL_PROLOG0() |
|
764 asm("xor eax, eax "); |
|
765 asm("mov ax, [ecx+10] "); |
|
766 asm("qisr: "); |
|
767 asm("mov edx, eax "); |
|
768 asm("test eax, eax "); |
|
769 asm("jne short qis1 "); |
|
770 asm("mov edx, 0x20 "); |
|
771 asm("or dl, [%a0]" : : "i" (&TheScheduler.iIdleGeneration)); |
|
772 asm("jmp short qis0 "); |
|
773 asm("qis1: "); |
|
774 asm("cmp eax, 0xE0 "); |
|
775 asm("jae short qis0 "); // if outside range 00C0-00DF leave alone |
|
776 asm("cmp eax, 0xC0 "); |
|
777 asm("jb short qis0 "); |
|
778 asm("sub dl, 0x60 "); // 00Cn->006n |
|
779 asm("qis0: "); |
|
780 asm("lock cmpxchg [ecx+10], dx "); |
|
781 asm("jne short qisr "); |
|
782 THISCALL_EPILOG0() |
|
783 } |
|
784 |
|
785 |
|
786 __NAKED__ void TDfc::ResetState() |
|
787 { |
|
788 THISCALL_PROLOG0() |
|
789 asm("xor eax, eax "); |
|
790 asm("lock xchg ax, [ecx+10] "); |
|
791 asm("cmp eax, 0 "); |
|
792 asm("je short rst_bad "); |
|
793 THISCALL_EPILOG0() |
|
794 asm("rst_bad: "); |
|
795 asm("int 0xf8 "); |
|
796 } |
|
797 |
|
798 |
|
799 |