|
1 // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of the License "Eclipse Public License v1.0" |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: |
|
14 // e32\nkernsmp\x86\ncsched.cia |
|
15 // |
|
16 // |
|
17 |
|
18 #include <x86.h> |
|
19 #include <apic.h> |
|
20 |
|
21 // SubSchedulerLookupTable : global data, type: TSubScheduler* [256]; |
|
22 // BTraceLock : global data, type: TSpinLock |
|
23 |
|
24 const TLinAddr TScheduler_Reschedule = (TLinAddr)&TScheduler::Reschedule; |
|
25 //const TLinAddr TheScheduler_iRescheduleNeededFlag = (TLinAddr)&TheScheduler.iRescheduleNeededFlag; |
|
26 const TLinAddr NKern_FastCounter = (TLinAddr)&NKern::FastCounter; |
|
27 const TLinAddr NKern_Lock = (TLinAddr)&NKern::Lock; |
|
28 const TLinAddr NKern_Unlock = (TLinAddr)&NKern::Unlock; |
|
29 const TLinAddr addressof_TheScheduler = (TLinAddr)&TheScheduler; |
|
30 const TUint32 new_thread_trace_header = ((8<<BTrace::ESizeIndex) + (BTrace::EContextIdPresent<<BTrace::EFlagsIndex*8) + (BTrace::ECpuUsage<<BTrace::ECategoryIndex*8) + (BTrace::ENewThreadContext<<BTrace::ESubCategoryIndex*8)); |
|
31 |
|
32 extern "C" void __fastcall queue_dfcs(TSubScheduler* aS); |
|
33 extern "C" NThreadBase* __fastcall select_next_thread(TSubScheduler* aS); |
|
34 extern "C" void send_resched_ipis(TUint32 aMask); |
|
35 extern "C" void __fastcall do_forced_exit(NThreadBase* aT); |
|
36 extern "C" void NewThreadTrace(NThread* a); |
|
37 |
|
38 |
|
39 /*************************************************************************** |
|
40 * Reschedule |
|
41 * Enter with: |
|
42 * Kernel locked, interrupts enabled or disabled |
|
43 * Return with: |
|
44 * Kernel unlocked, interrupts disabled |
|
45 * EAX=0 if no reschedule occurred, 1 if it did |
|
46 * ESI pointing to TSubScheduler for current CPU |
|
47 * EDI pointing to current NThread |
|
48 ***************************************************************************/ |
|
49 __NAKED__ void TScheduler::Reschedule() |
|
50 { |
|
51 asm("push 0 "); |
|
52 asm("mov eax, ds:[%0]" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID)); // OK since kernel locked |
|
53 asm("mov edi, %0" : : "i" (addressof_TheScheduler)); |
|
54 asm("shr eax, 24 "); |
|
55 asm("mov esi, [eax*4+%0]" : : "i" (&SubSchedulerLookupTable)); |
|
56 asm("cli "); |
|
57 asm("start_resched: "); |
|
58 // _asm cmp dword ptr [esi]TSubScheduler.iRescheduleNeededFlag, 10000h VC6 ignores the "dword ptr" |
|
59 asm("lea eax, [esi+%0]" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag)); |
|
60 asm("cmp dword ptr [eax], 0x10000 "); |
|
61 asm("jb short resched_no_dfcs "); |
|
62 asm("mov ecx, esi "); |
|
63 asm("call %a0" : : "i" (&queue_dfcs)); |
|
64 asm("resched_no_dfcs: "); |
|
65 asm("cmp byte ptr [esi+%0], 0" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag)); |
|
66 asm("jz resched_not_needed "); |
|
67 asm("sti "); |
|
68 asm("mov dword ptr [esp], 1 "); |
|
69 asm("mov ebp, [esi+%0]" : : "i"_FOFF(TSubScheduler, iCurrentThread)); // EBP -> original thread |
|
70 asm("mov eax, cr0"); |
|
71 asm("push eax"); |
|
72 asm("mov [ebp+%0], esp" : : "i" _FOFF(NThreadBase, iSavedSP)); // Save original thread stack pointer |
|
73 |
|
74 // We must move to a temporary stack before selecting the next thread. |
|
75 // This is because another CPU may begin executing this thread before the |
|
76 // select_next_thread() function returns and our stack would then be |
|
77 // corrupted. We use the stack belonging to this CPU's initial thread since |
|
78 // we are guaranteed that will never run on another CPU. |
|
79 asm("mov ecx, [esi+%0]" : : "i" _FOFF(TSubScheduler, iInitialThread)); |
|
80 asm("mov esp, [ecx+%0]" : : "i" _FOFF(NThreadBase, iSavedSP)); |
|
81 |
|
82 asm("select_thread:"); |
|
83 asm("mov ecx, esi "); |
|
84 asm("call %a0" : : "i" (&select_next_thread)); |
|
85 asm("mov ebx, eax "); |
|
86 asm("cmp ebx, 0 "); |
|
87 asm("jz no_thread "); |
|
88 asm("mov esp, [ebx+%0]" : : "i" _FOFF(NThreadBase, iSavedSP)); // move to new thread's stack |
|
89 |
|
90 #ifdef BTRACE_CPU_USAGE |
|
91 asm("cmp byte ptr %a0, 0" : : "i" (&BTraceData.iFilter[4])); |
|
92 asm("jz short no_trace "); |
|
93 asm("push ebx "); |
|
94 asm("call %a0" : : "i" (NewThreadTrace)); |
|
95 asm("pop ebx "); |
|
96 asm("no_trace: "); |
|
97 #endif // BTRACE_CPU_USAGE |
|
98 |
|
99 asm("cmp ebp, ebx "); |
|
100 asm("je same_thread "); |
|
101 asm("mov eax, [ebx+%0]" : : "i" _FOFF(NThreadBase, iStackBase)); |
|
102 asm("add eax, [ebx+%0]" : : "i" _FOFF(NThreadBase, iStackSize)); |
|
103 asm("mov ecx, [esi+60+%0]" : : "i" _FOFF(TSubScheduler, iExtras)); // iExtras[15] points to TSS |
|
104 asm("mov [ecx+%0], eax" : : "i" _FOFF(TX86Tss, iEsp0)); // set ESP0 to top of new thread supervisor stack |
|
105 |
|
106 asm("test byte ptr [ebx+%0], 2" : : "i" _FOFF(NThreadBase,i_ThrdAttr)); // test for address space switch |
|
107 asm("jz short resched_no_as_switch "); |
|
108 asm("call [edi+%0]" : : "i" _FOFF(TScheduler, iProcessHandler)); // call handler with |
|
109 // EBX=pointer to new thread, EDI->scheduler, ESI->subscheduler |
|
110 asm("resched_no_as_switch: "); |
|
111 asm("same_thread: "); |
|
112 asm("pop eax "); |
|
113 asm("mov cr0, eax "); |
|
114 asm("cli "); |
|
115 // asm("cmp dword ptr [esi]TSubScheduler.iRescheduleNeededFlag, 0 VC6 ignores the "dword ptr" |
|
116 asm("lea eax, [esi+%0]" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag)); |
|
117 asm("cmp dword ptr [eax], 0 "); |
|
118 asm("jnz start_resched "); |
|
119 |
|
120 asm("resched_not_needed: "); |
|
121 asm("mov edi, [esi+%0]" : : "i" _FOFF(TSubScheduler, iCurrentThread)); |
|
122 asm("cmp dword ptr [edi+%0], -3" : : "i" _FOFF(NThreadBase, iCsFunction)); // ECSDivertPending |
|
123 asm("je resched_thread_divert "); |
|
124 asm("mov dword ptr [esi+%0], 0" : : "i" _FOFF(TSubScheduler, iKernLockCount)); |
|
125 asm("pop eax "); |
|
126 asm("ret "); |
|
127 |
|
128 asm("resched_thread_divert: "); |
|
129 asm("push edi "); |
|
130 asm("xor eax, eax "); |
|
131 asm("lock xchg eax, [esi+%0]" : : "i" _FOFF(TSubScheduler, iReschedIPIs)); |
|
132 asm("test eax, eax "); |
|
133 asm("jz short no_resched_ipis "); |
|
134 asm("push eax "); |
|
135 asm("call %a0" : : "i" (&send_resched_ipis)); |
|
136 asm("add esp, 4 "); |
|
137 asm("no_resched_ipis: "); |
|
138 |
|
139 asm("sti "); |
|
140 asm("mov ecx, [esp+12] "); // SThreadReschedStack iReason 0 not run 1 unlock 2 IRQ |
|
141 asm("cmp ecx, 2 "); |
|
142 asm("ja short rtd_unknown "); // unknown - die |
|
143 asm("shl ecx, 2 "); // reason * 4 |
|
144 asm("mov eax, 0xa1a "); |
|
145 asm("shr eax, cl "); |
|
146 asm("and eax, 15 "); |
|
147 asm("mov gs, [esp+eax*4+16] "); // restore GS |
|
148 |
|
149 asm("pop ecx "); // exiting thread pointer |
|
150 asm("call %a0" : : "i" (&do_forced_exit)); |
|
151 asm("int 0xff "); // should never get here |
|
152 |
|
153 asm("rtd_unknown: "); |
|
154 asm("int 0xff "); // should never get here |
|
155 |
|
156 |
|
157 // There is no thread ready to run |
|
158 asm("no_thread: "); |
|
159 asm("cli "); |
|
160 asm("xor eax, eax "); |
|
161 asm("lock xchg eax, [esi+%0]" : : "i" _FOFF(TSubScheduler, iReschedIPIs)); |
|
162 asm("test eax, eax "); |
|
163 asm("jz short no_resched_ipis2 "); |
|
164 asm("push eax "); |
|
165 asm("call %a0" : : "i" (&send_resched_ipis)); |
|
166 asm("add esp, 4 "); |
|
167 asm("no_resched_ipis2: "); |
|
168 asm("sti "); |
|
169 asm("hlt "); |
|
170 asm("no_thread2: "); |
|
171 // _asm cmp dword ptr [esi]TSubScheduler.iRescheduleNeededFlag, 10000h VC6 ignores the "dword ptr" |
|
172 asm("lea eax, [esi+%0]" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag)); |
|
173 asm("cmp dword ptr [eax], 0x10000 "); |
|
174 asm("jb short no_thread "); |
|
175 asm("mov ecx, esi "); |
|
176 asm("call %a0" : : "i" (&queue_dfcs)); |
|
177 asm("cmp byte ptr [esi+%0], 0" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag)); |
|
178 asm("jz short no_thread2 "); |
|
179 asm("jmp select_thread "); |
|
180 } |
|
181 |
|
182 |
|
183 /** Disable interrupts to the specified level |
|
184 |
|
185 If aLevel = 0 does not affect interrupt state |
|
186 If aLevel <>0 disables all maskable interrupts. |
|
187 |
|
188 @param aLevel level to which to disable |
|
189 @return Cookie to pass into RestoreInterrupts() |
|
190 */ |
|
191 EXPORT_C __NAKED__ TInt NKern::DisableInterrupts(TInt /*aLevel*/) |
|
192 { |
|
193 asm("pushfd"); |
|
194 asm("mov ecx, [esp+4]"); |
|
195 asm("pop eax"); |
|
196 asm("and eax, 0x200"); |
|
197 asm("test ecx, ecx"); |
|
198 asm("jz disable_ints_0"); |
|
199 asm("cli"); |
|
200 asm("disable_ints_0:"); |
|
201 asm("ret"); |
|
202 } |
|
203 |
|
204 |
|
205 /** Disable all maskable interrupts |
|
206 |
|
207 @return Cookie to pass into RestoreInterrupts() |
|
208 */ |
|
209 EXPORT_C __NAKED__ TInt NKern::DisableAllInterrupts() |
|
210 { |
|
211 asm("pushfd"); |
|
212 asm("pop eax"); |
|
213 asm("and eax, 0x200"); |
|
214 asm("cli"); |
|
215 asm("ret"); |
|
216 } |
|
217 |
|
218 |
|
219 /** Restore interrupt mask to state preceding a DisableInterrupts() call |
|
220 |
|
221 @param aLevel Cookie returned by Disable(All)Interrupts() |
|
222 */ |
|
223 EXPORT_C __NAKED__ void NKern::RestoreInterrupts(TInt aLevel) |
|
224 { |
|
225 asm("test byte ptr [esp+5], 2"); // test saved I flag |
|
226 asm("jz restore_irq_off"); // jump if clear |
|
227 asm("sti"); // else reenable interrupts |
|
228 asm("ret"); |
|
229 asm("restore_irq_off:"); |
|
230 asm("cli"); |
|
231 asm("ret"); |
|
232 } |
|
233 |
|
234 |
|
235 /** Enable all maskable interrupts |
|
236 |
|
237 @internalComponent |
|
238 */ |
|
239 EXPORT_C __NAKED__ void NKern::EnableAllInterrupts() |
|
240 { |
|
241 asm("sti"); |
|
242 asm("ret"); |
|
243 } |
|
244 |
|
245 |
|
246 /** Unlocks the kernel |
|
247 Decrements iKernCSLocked; if it becomes zero and IDFCs or a reschedule are |
|
248 pending, calls the scheduler to process them. |
|
249 |
|
250 @pre Thread or IDFC context. Don't call from ISRs. |
|
251 */ |
|
252 EXPORT_C __NAKED__ void NKern::Unlock() |
|
253 { |
|
254 asm("mov eax, ds:[%0]" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID)); // OK since kernel locked |
|
255 asm("shr eax, 24 "); |
|
256 asm("push esi "); |
|
257 asm("mov esi, [eax*4+%0]" : : "i" (&SubSchedulerLookupTable)); |
|
258 #ifdef _DEBUG |
|
259 asm("cmp dword ptr [esi+%0], 0" : : "i" _FOFF(TSubScheduler, iKernLockCount)); |
|
260 asm("jg short _dbg1 "); |
|
261 asm("int 0xff "); |
|
262 asm("_dbg1: "); |
|
263 #endif |
|
264 asm("cli "); |
|
265 asm("dec dword ptr [esi+%0]" : : "i" _FOFF(TSubScheduler, iKernLockCount)); |
|
266 asm("jnz short still_locked "); |
|
267 // asm("cmp dword ptr [esi]TSubScheduler.iRescheduleNeededFlag, 0 VC6 ignores the "dword ptr" |
|
268 asm("lea eax, [esi+%0]" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag)); |
|
269 asm("cmp dword ptr [eax], 0 "); |
|
270 asm("jz short no_resched "); |
|
271 |
|
272 asm("mov dword ptr [esi+%0], 1" : : "i" _FOFF(TSubScheduler, iKernLockCount)); |
|
273 asm("push edi "); |
|
274 asm("push ebp "); |
|
275 asm("push ebx "); |
|
276 asm("push gs "); |
|
277 asm("push fs "); |
|
278 asm("sti "); |
|
279 |
|
280 // Reschedule - return with local interrupts disabled, iKernLockCount=0 |
|
281 asm("push 1 "); |
|
282 asm("call %a0" : : "i" (TScheduler_Reschedule)); |
|
283 asm("add esp, 4 "); |
|
284 |
|
285 asm("xor eax, eax "); |
|
286 asm("lock xchg eax, [esi+%0]" : : "i" _FOFF(TSubScheduler, iReschedIPIs)); |
|
287 asm("test eax, eax "); |
|
288 asm("jz short no_resched_ipis_ul "); |
|
289 |
|
290 asm("unlock_do_resched_ipis: "); |
|
291 asm("push eax "); |
|
292 asm("call %a0" : : "i" (&send_resched_ipis)); |
|
293 asm("add esp, 4 "); |
|
294 |
|
295 asm("no_resched_ipis_ul: "); |
|
296 asm("pop fs "); |
|
297 asm("pop gs "); |
|
298 asm("pop ebx "); |
|
299 asm("pop ebp "); |
|
300 asm("pop edi "); |
|
301 |
|
302 asm("still_locked: "); |
|
303 asm("sti "); |
|
304 asm("pop esi "); |
|
305 asm("ret "); |
|
306 |
|
307 asm("no_resched: "); |
|
308 asm("xor eax, eax "); |
|
309 asm("lock xchg eax, [esi+%0]" : : "i" _FOFF(TSubScheduler, iReschedIPIs)); |
|
310 asm("test eax, eax "); |
|
311 asm("jz short still_locked "); |
|
312 asm("push edi "); |
|
313 asm("push ebp "); |
|
314 asm("push ebx "); |
|
315 asm("push gs "); |
|
316 asm("push fs "); |
|
317 asm("jmp short unlock_do_resched_ipis "); |
|
318 } |
|
319 |
|
320 |
|
321 /** Locks the kernel |
|
322 Defer IDFCs and preemption |
|
323 |
|
324 @pre Thread or IDFC context. Don't call from ISRs. |
|
325 */ |
|
326 EXPORT_C __NAKED__ void NKern::Lock() |
|
327 { |
|
328 asm("cli"); // stop thread migration between reading APIC ID and subscheduler stuff |
|
329 asm("mov eax, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID)); |
|
330 asm("shr eax, 24"); |
|
331 asm("mov ecx, [eax*4+%0]" : : "i"(&SubSchedulerLookupTable)); |
|
332 asm("inc dword ptr [ecx+%0]": : "i"_FOFF(TSubScheduler, iKernLockCount)); |
|
333 asm("sti"); |
|
334 asm("ret"); |
|
335 } |
|
336 |
|
337 |
|
338 /** Locks the kernel and returns a pointer to the current thread |
|
339 Defer IDFCs and preemption |
|
340 |
|
341 @pre Thread or IDFC context. Don't call from ISRs. |
|
342 */ |
|
343 EXPORT_C __NAKED__ NThread* NKern::LockC() |
|
344 { |
|
345 asm("cli"); // stop thread migration between reading APIC ID and subscheduler stuff |
|
346 asm("mov eax, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID)); |
|
347 asm("shr eax, 24"); |
|
348 asm("mov ecx, [eax*4+%0]" : : "i"(&SubSchedulerLookupTable)); |
|
349 asm("inc dword ptr [ecx+%0]": : "i"_FOFF(TSubScheduler, iKernLockCount)); |
|
350 asm("mov eax, [ecx+%0]" : : "i"_FOFF(TSubScheduler, iCurrentThread)); |
|
351 asm("sti"); |
|
352 asm("ret"); |
|
353 } |
|
354 |
|
355 |
|
356 /** Allows IDFCs and rescheduling if they are pending. |
|
357 If IDFCs or a reschedule are pending and iKernCSLocked is exactly equal to 1 |
|
358 calls the scheduler to process the IDFCs and possibly reschedule. |
|
359 |
|
360 @return Nonzero if a reschedule actually occurred, zero if not. |
|
361 @pre Thread or IDFC context. Don't call from ISRs. |
|
362 */ |
|
363 EXPORT_C __NAKED__ TInt NKern::PreemptionPoint() |
|
364 { |
|
365 asm("mov eax, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID)); |
|
366 asm("shr eax, 24"); |
|
367 asm("mov ecx, [eax*4+%0]" : : "i"(&SubSchedulerLookupTable)); |
|
368 #ifdef _DEBUG |
|
369 asm("cmp dword ptr [ecx+%0], 0": : "i"_FOFF(TSubScheduler, iKernLockCount)); |
|
370 asm("jg _dbg1_pp"); |
|
371 asm("int 0xff"); |
|
372 asm("_dbg1_pp:"); |
|
373 #endif |
|
374 asm("cmp dword ptr [ecx+%0], 1": : "i"_FOFF(TSubScheduler, iKernLockCount)); |
|
375 asm("jnz still_locked_pp"); |
|
376 // asm("cmp dword ptr [ecx]TSubScheduler.iRescheduleNeededFlag, 0 VC6 ignores the "dword ptr" |
|
377 asm("lea eax, [ecx+%0]": : "i"_FOFF(TSubScheduler, iRescheduleNeededFlag)); |
|
378 asm("cmp dword ptr [eax], 0"); |
|
379 asm("jnz do_resched"); |
|
380 asm("cli"); |
|
381 asm("lock xchg eax, [ecx+%0]": : "i"_FOFF(TSubScheduler, iReschedIPIs)); |
|
382 asm("test eax, eax"); |
|
383 asm("jz pp_no_resched_ipis"); |
|
384 asm("push eax"); |
|
385 asm("call %a0": :"i"(&send_resched_ipis)); |
|
386 asm("add esp, 4"); |
|
387 asm("pp_no_resched_ipis:"); |
|
388 asm("sti"); |
|
389 |
|
390 asm("still_locked_pp:"); |
|
391 asm("xor eax, eax"); |
|
392 asm("ret"); |
|
393 |
|
394 asm("do_resched:"); |
|
395 asm("call %a0" : : "i"(NKern_Unlock)); |
|
396 asm("call %a0" : : "i"(NKern_Lock)); |
|
397 asm("mov eax, 1"); |
|
398 asm("ret"); |
|
399 } |
|
400 |
|
401 |
|
402 /** Complete the saving of a thread's context |
|
403 |
|
404 This saves the FPU registers if necessary once we know that we are definitely |
|
405 switching threads. |
|
406 |
|
407 @internalComponent |
|
408 */ |
|
409 __NAKED__ void NThread::CompleteContextSave() |
|
410 { |
|
411 THISCALL_PROLOG0() |
|
412 asm("mov edx, [ecx+%0]": : "i"_FOFF(NThreadBase,iSavedSP)); // EDX points to saved state on thread stack |
|
413 asm("test byte ptr [edx], 8"); // test thread's saved TS flag |
|
414 asm("jnz no_fpu"); // if set, thread did not use FPU |
|
415 asm("clts"); |
|
416 asm("fnsave [ecx+%0]": : "i"_FOFF(NThread, iCoprocessorState)); // else thread did use FPU - save its state |
|
417 asm("or byte ptr [edx], 8"); // set TS flag so thread aborts next time it uses FPU |
|
418 asm("fwait"); |
|
419 |
|
420 asm("no_fpu:"); |
|
421 THISCALL_EPILOG0() |
|
422 } |
|
423 |
|
424 |
|
425 /** Check if the kernel is locked the specified number of times. |
|
426 |
|
427 @param aCount The number of times the kernel should be locked |
|
428 If zero, tests if it is locked at all |
|
429 @return TRUE if the tested condition is true. |
|
430 |
|
431 @internalTechnology |
|
432 */ |
|
433 EXPORT_C __NAKED__ TBool NKern::KernelLocked(TInt /*aCount*/) |
|
434 { |
|
435 asm("pushfd"); |
|
436 asm("cli"); |
|
437 asm("mov eax, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID)); |
|
438 asm("shr eax, 24"); |
|
439 asm("mov eax, [eax*4+%0]" : : "i"(&SubSchedulerLookupTable)); |
|
440 asm("mov edx, [eax+%0]": : "i"_FOFF(TSubScheduler, iKernLockCount)); |
|
441 asm("popfd"); |
|
442 asm("cmp edx, 0"); |
|
443 asm("jz not_locked"); |
|
444 asm("mov eax, [esp+4]"); |
|
445 asm("cmp eax, 0"); |
|
446 asm("jz locked"); |
|
447 asm("cmp eax, edx"); |
|
448 asm("jnz not_locked"); |
|
449 asm("locked:"); |
|
450 asm("mov eax, 1"); |
|
451 asm("ret"); |
|
452 asm("not_locked:"); |
|
453 asm("xor eax, eax"); |
|
454 asm("ret"); |
|
455 } |
|
456 |
|
457 |
|
458 // Only call this if thread migration is disabled, i.e. |
|
459 // interrupts disabled, kernel locked or current thread in 'freeze cpu' mode |
|
460 extern "C" __NAKED__ TSubScheduler& SubScheduler() |
|
461 { |
|
462 asm("mov eax, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID)); |
|
463 asm("shr eax, 24"); |
|
464 asm("mov eax, [eax*4+%0]" : : "i"(&SubSchedulerLookupTable)); |
|
465 asm("ret"); |
|
466 } |
|
467 |
|
468 /** Returns the NThread control block for the currently scheduled thread. |
|
469 |
|
470 Note that this is the calling thread if called from a thread context, or the |
|
471 interrupted thread if called from an interrupt context. |
|
472 |
|
473 @return A pointer to the NThread for the currently scheduled thread. |
|
474 |
|
475 @pre Call in any context. |
|
476 */ |
|
477 EXPORT_C __NAKED__ NThread* NKern::CurrentThread() |
|
478 { |
|
479 asm("pushfd"); |
|
480 asm("cli"); // stop thread migration between reading APIC ID and thread pointer |
|
481 asm("mov eax, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID)); |
|
482 asm("shr eax, 24"); |
|
483 asm("mov eax, [eax*4+%0]" : : "i"(&SubSchedulerLookupTable)); |
|
484 asm("cmp eax, 0"); |
|
485 asm("jz done"); |
|
486 asm("test al, 3"); |
|
487 asm("jnz bad_ct"); |
|
488 asm("mov eax, [eax+%0]": : "i"_FOFF(TSubScheduler, iCurrentThread)); |
|
489 asm("done:"); |
|
490 asm("popfd"); |
|
491 asm("ret"); |
|
492 asm("bad_ct:"); |
|
493 asm("popfd"); |
|
494 asm("xor eax, eax"); |
|
495 asm("ret"); |
|
496 } |
|
497 |
|
498 |
|
499 /** Returns the NThread control block for the currently scheduled thread. |
|
500 |
|
501 Note that this is the calling thread if called from a thread context, or the |
|
502 interrupted thread if called from an interrupt context. |
|
503 |
|
504 @return A pointer to the NThread for the currently scheduled thread. |
|
505 |
|
506 @pre Call with migration disabled - i.e. from an ISR, IDFC, with interrupts |
|
507 disabled or with preemption disabled. |
|
508 */ |
|
509 extern "C" __NAKED__ NThread* NCurrentThreadL() |
|
510 { |
|
511 asm("mov eax, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID)); |
|
512 asm("shr eax, 24"); |
|
513 asm("mov eax, [eax*4+%0]" : : "i"(&SubSchedulerLookupTable)); |
|
514 asm("mov eax, [eax+%0]": : "i"_FOFF(TSubScheduler, iCurrentThread)); |
|
515 asm("ret"); |
|
516 } |
|
517 |
|
518 |
|
519 /** Returns the CPU number of the calling CPU. |
|
520 |
|
521 @return the CPU number of the calling CPU. |
|
522 |
|
523 @pre Call in any context. |
|
524 */ |
|
525 EXPORT_C __NAKED__ TInt NKern::CurrentCpu() |
|
526 { |
|
527 asm("xor eax, eax"); |
|
528 asm("str ax"); |
|
529 asm("sub al, 0x28"); |
|
530 asm("shr al, 3"); |
|
531 asm("ret"); |
|
532 } |
|
533 |
|
534 |
|
535 /** Return the current processor context type (thread, IDFC or interrupt) |
|
536 |
|
537 @return A value from NKern::TContext enumeration (but never EEscaped) |
|
538 @pre Any context |
|
539 |
|
540 @see NKern::TContext |
|
541 */ |
|
542 EXPORT_C __NAKED__ TInt NKern::CurrentContext() |
|
543 { |
|
544 asm("pushfd"); |
|
545 asm("cli"); // stop thread migration between reading APIC ID and subscheduler stuff |
|
546 asm("mov edx, ds:[%0]": :"i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID)); |
|
547 asm("xor eax, eax"); |
|
548 asm("shr edx, 24"); |
|
549 asm("mov edx, [edx*4+%0]" : : "i"(&SubSchedulerLookupTable)); |
|
550 asm("cmp edx, eax"); |
|
551 asm("jz bad_cc"); |
|
552 asm("test dl, 3"); |
|
553 asm("jnz bad_cc"); |
|
554 asm("cmp eax, [edx+52+%0]": : "i"_FOFF(TSubScheduler,iExtras)); // i_IrqNestCount |
|
555 asm("jle irq"); |
|
556 asm("cmp al, [edx+%0]": : "i"_FOFF(TSubScheduler, iInIDFC)); |
|
557 asm("jz thread"); |
|
558 asm("jmp idfc"); |
|
559 |
|
560 asm("bad_cc:"); // no subscheduler yet [initialising] - return EInterrupt |
|
561 asm("irq:"); // return NKern::EInterrupt [=2] |
|
562 asm("inc eax"); |
|
563 asm("idfc:"); // return NKern::EIDFC [=1] |
|
564 asm("inc eax"); |
|
565 asm("thread:"); // return NKern::EThread [=0] |
|
566 asm("popfd"); |
|
567 asm("ret"); |
|
568 } |
|
569 |
|
570 |
|
571 #ifdef __USE_LOGICAL_DEST_MODE__ |
|
572 extern "C" __NAKED__ void __fastcall do_send_resched_ipis(TUint32) |
|
573 { |
|
574 asm("shl ecx, 24 "); // CPUs mask into bits 24-31 |
|
575 asm("jz short sri0 "); // no CPUs, so nothing to do |
|
576 asm("pushfd "); |
|
577 asm("cli "); |
|
578 asm("mov ds:[%0], ecx" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ICRH)); |
|
579 asm("mov eax, %0" : : "i" (RESCHED_IPI_VECTOR | 0x4800)); |
|
580 asm("mov ds:[%0], eax" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ICRL)); |
|
581 asm("popfd "); |
|
582 asm("sri0: "); |
|
583 asm("ret "); |
|
584 } |
|
585 #endif |
|
586 |
|
587 extern "C" __NAKED__ void __fastcall send_ipi(TUint32) |
|
588 { |
|
589 asm("pushfd "); |
|
590 asm("cli "); |
|
591 asm("mov ds:[%0], ecx" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ICRH)); |
|
592 asm("mov eax, %0" : : "i" (RESCHED_IPI_VECTOR | 0x4000)); |
|
593 asm("mov ds:[%0], eax" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ICRL)); |
|
594 asm("popfd "); |
|
595 asm("ret "); |
|
596 } |
|
597 |
|
598 // Send a reschedule IPI to the current processor |
|
599 // *** DON'T DO ANY TRACING OR INSTRUMENTATION *** |
|
600 extern "C" __NAKED__ void send_self_resched_ipi() |
|
601 { |
|
602 asm("pushfd "); |
|
603 asm("cli "); |
|
604 asm("xor ecx, ecx "); |
|
605 asm("mov ds:[%0], ecx" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ICRH)); |
|
606 asm("mov eax, %0" : : "i" (RESCHED_IPI_VECTOR | 0x44000)); // destination shorthand = self |
|
607 asm("mov ds:[%0], eax" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ICRL)); |
|
608 asm("popfd "); |
|
609 asm("ret "); |
|
610 } |
|
611 |
|
612 extern "C" __NAKED__ void send_irq_ipi(TSubScheduler*) |
|
613 { |
|
614 asm("mov ecx, [esp+4] "); |
|
615 asm("pushfd "); |
|
616 asm("mov edx, [ecx+%0]" : : "i" _FOFF(TSubScheduler, i_APICID)); |
|
617 asm("cli "); |
|
618 asm("mov ds:[%0], edx" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ICRH)); |
|
619 asm("mov eax, %0" : : "i" (TRANSFERRED_IRQ_VECTOR | 0x4000)); |
|
620 asm("mov ds:[%0], eax" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ICRL)); |
|
621 asm("popfd "); |
|
622 asm("ret "); |
|
623 } |
|
624 |