author | hgs |
Mon, 27 Sep 2010 10:52:00 +0100 | |
changeset 273 | 6a75fa55495f |
parent 90 | 947f0dc9f7a8 |
permissions | -rw-r--r-- |
0 | 1 |
// Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies). |
2 |
// All rights reserved. |
|
3 |
// This component and the accompanying materials are made available |
|
4 |
// under the terms of the License "Eclipse Public License v1.0" |
|
5 |
// which accompanies this distribution, and is available |
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 |
// |
|
8 |
// Initial Contributors: |
|
9 |
// Nokia Corporation - initial contribution. |
|
10 |
// |
|
11 |
// Contributors: |
|
12 |
// |
|
13 |
// Description: |
|
14 |
// e32\nkern\win32\ncsched.cpp |
|
273 | 15 |
// |
0 | 16 |
// |
17 |
||
18 |
// NThreadBase member data |
|
19 |
#define __INCLUDE_NTHREADBASE_DEFINES__ |
|
20 |
||
21 |
#include <e32cmn.h> |
|
22 |
#include <e32cmn_private.h> |
|
23 |
#include "nk_priv.h" |
|
273 | 24 |
#include <emulator.h> |
0 | 25 |
|
273 | 26 |
#ifdef __EMI_SUPPORT__ |
0 | 27 |
extern void EMI_AddTaskSwitchEvent(TAny* aPrevious, TAny* aNext); |
28 |
extern void EMI_CheckDfcTag(TAny* aNext); |
|
29 |
#endif |
|
30 |
typedef void (*ProcessHandler)(TAny* aAddressSpace); |
|
31 |
||
32 |
static NThreadBase* SelectThread(TScheduler& aS) |
|
33 |
// |
|
34 |
// Select the next thread to run. |
|
35 |
// This is the heart of the rescheduling algorithm. |
|
273 | 36 |
// This should be essentially the same as the EPOC32 version! |
0 | 37 |
// |
38 |
{ |
|
39 |
NThreadBase* t = static_cast<NThreadBase*>(aS.First()); |
|
273 | 40 |
|
41 |
#ifdef _DEBUG |
|
0 | 42 |
__NK_ASSERT_DEBUG(t); |
43 |
if (t->iHeldFastMutex) |
|
44 |
{ |
|
273 | 45 |
__KTRACE_OPT(KSCHED2, DEBUGPRINT("Resched init->%T, Holding %M", t, t->iHeldFastMutex)); |
0 | 46 |
} |
47 |
else |
|
48 |
{ |
|
273 | 49 |
__KTRACE_OPT(KSCHED2, DEBUGPRINT("Resched init->%T", t)); |
0 | 50 |
} |
273 | 51 |
#endif // _DEBUG |
52 |
||
0 | 53 |
if (t->iTime == 0 && !t->Alone()) |
54 |
{ |
|
55 |
// round robin |
|
56 |
// get here if thread's timeslice has expired and there is another |
|
57 |
// thread ready at the same priority |
|
58 |
if (t->iHeldFastMutex) |
|
59 |
{ |
|
60 |
// round-robin deferred due to fast mutex held |
|
61 |
t->iHeldFastMutex->iWaiting = 1; |
|
62 |
return t; |
|
63 |
} |
|
273 | 64 |
|
0 | 65 |
t->iTime = t->iTimeslice; // reset old thread time slice |
66 |
t = static_cast<NThreadBase*>(t->iNext); // next thread |
|
67 |
aS.iQueue[t->iPriority] = t; // make it first in list |
|
273 | 68 |
__KTRACE_OPT(KSCHED2, DEBUGPRINT("RoundRobin->%T", t)); |
0 | 69 |
} |
273 | 70 |
|
0 | 71 |
if (t->iHeldFastMutex) |
72 |
{ |
|
73 |
if (t->iHeldFastMutex == &aS.iLock) |
|
74 |
{ |
|
75 |
// thread holds system lock: use it |
|
76 |
return t; |
|
77 |
} |
|
273 | 78 |
|
0 | 79 |
if ((t->i_ThrdAttr & KThreadAttImplicitSystemLock) != 0 && aS.iLock.iHoldingThread) |
80 |
t->iHeldFastMutex->iWaiting = 1; |
|
273 | 81 |
|
0 | 82 |
__NK_ASSERT_DEBUG((t->i_ThrdAttr & KThreadAttAddressSpace) == 0); |
273 | 83 |
/* |
0 | 84 |
Check for an address space change. Not implemented for Win32, but useful as |
85 |
documentaiton of the algorithm. |
|
86 |
||
87 |
if ((t->i_ThrdAttr & KThreadAttAddressSpace) != 0 && t->iAddressSpace != aS.iAddressSpace) |
|
88 |
t->iHeldFastMutex->iWaiting = 1; |
|
273 | 89 |
*/ |
0 | 90 |
} |
91 |
else if (t->iWaitFastMutex && t->iWaitFastMutex->iHoldingThread) |
|
92 |
{ |
|
273 | 93 |
__KTRACE_OPT(KSCHED2, DEBUGPRINT("Resched inter->%T, Blocked on %M", t->iWaitFastMutex->iHoldingThread, t->iWaitFastMutex)); |
0 | 94 |
t = t->iWaitFastMutex->iHoldingThread; |
95 |
} |
|
96 |
else if (t->i_ThrdAttr & KThreadAttImplicitSystemLock) |
|
97 |
{ |
|
98 |
// implicit system lock required |
|
99 |
if (aS.iLock.iHoldingThread) |
|
100 |
{ |
|
101 |
// system lock held, switch to that thread |
|
102 |
t = aS.iLock.iHoldingThread; |
|
273 | 103 |
__KTRACE_OPT(KSCHED2, DEBUGPRINT("Resched inter->%T (IMP SYS)", t)); |
0 | 104 |
t->iHeldFastMutex->iWaiting = 1; // aS.iLock.iWaiting = 1; |
105 |
return t; |
|
106 |
} |
|
273 | 107 |
|
0 | 108 |
__NK_ASSERT_DEBUG((t->i_ThrdAttr & KThreadAttAddressSpace) == 0); |
273 | 109 |
/* |
0 | 110 |
Check for an address space change. Not implemented for Win32, but useful as |
111 |
documentaiton of the algorithm. |
|
112 |
||
113 |
if ((t->i_ThrdAttr & KThreadAttAddressSpace) != 0 || t->iAddressSpace != aS.iAddressSpace) |
|
114 |
{ |
|
115 |
// what do we do now? |
|
116 |
__NK_ASSERT_DEBUG(FALSE); |
|
117 |
} |
|
273 | 118 |
*/ |
0 | 119 |
} |
273 | 120 |
|
0 | 121 |
return t; |
122 |
} |
|
123 |
||
124 |
// from NThread |
|
125 |
#undef i_ThrdAttr |
|
126 |
||
273 | 127 |
// From here on it's all emulator (i.e. Win32) specific; there isn't any EPOC32 equivalent for most of it. |
0 | 128 |
// |
273 | 129 |
// The emulator uses one Win32 thread for each Symbian thread; these are the ones scheduled by the Symbian |
130 |
// nanokernel in the algorithm above. Only one such thread will be running at a time; the others will be |
|
131 |
// waiting on their individual scheduler locks, thus simulating a single-threaded architecture. |
|
0 | 132 |
// |
273 | 133 |
// In addition, there are some more Win32 threads used to handle timers, interrupts and the like. These |
134 |
// are not under control of the Symbian scheduler. They are given higher priority than the Symbian threads, |
|
135 |
// so they can run preemptively under control of the Win32 scheduler. However, they must call functions |
|
136 |
// from the Win32Interrupt class before using any Symbian OS calls, so that the current Symbian thread can |
|
137 |
// be suspended during the 'virtual interrupt'. |
|
138 |
||
139 |
static DWORD TlsIndex = TLS_OUT_OF_INDEXES; |
|
140 |
||
141 |
void SchedulerInit(NThread& aInit) |
|
0 | 142 |
// |
273 | 143 |
// Initialise the win32 nKern scheduler |
0 | 144 |
// |
145 |
{ |
|
273 | 146 |
DWORD procaffin, sysaffin; |
147 |
if (GetProcessAffinityMask(GetCurrentProcess(), &procaffin, &sysaffin)) |
|
0 | 148 |
{ |
273 | 149 |
DWORD cpu; |
150 |
switch (Win32SingleCpu) |
|
0 | 151 |
{ |
273 | 152 |
default: |
153 |
// bind the emulator to a nominated CPU on the host PC |
|
154 |
cpu = (1 << Win32SingleCpu); |
|
155 |
if (!(sysaffin & cpu)) |
|
156 |
cpu = procaffin; // CPU selection invalid |
|
157 |
break; |
|
158 |
||
159 |
case NThread::ECpuSingle: |
|
160 |
// bind the emulator to a single CPU on the host PC, pick one |
|
161 |
cpu = procaffin ^ (procaffin & (procaffin - 1)); |
|
162 |
break; |
|
163 |
||
164 |
case NThread::ECpuAll: |
|
165 |
// run the emulator on all CPUs on the host PC |
|
166 |
cpu = sysaffin; |
|
167 |
break; |
|
0 | 168 |
} |
273 | 169 |
|
170 |
SetProcessAffinityMask(GetCurrentProcess(), cpu); |
|
171 |
} |
|
172 |
||
173 |
// identify whether we can use the atomic SignalObjectAndWait API in Win32 for rescheduling |
|
174 |
Win32AtomicSOAW = (SignalObjectAndWait(aInit.iScheduleLock, aInit.iScheduleLock, INFINITE, FALSE) == WAIT_OBJECT_0); |
|
175 |
||
176 |
// allocate the TLS used for thread identification, and set it for the init thread |
|
177 |
TlsIndex = TlsAlloc(); |
|
178 |
__NK_ASSERT_ALWAYS(TlsIndex != TLS_OUT_OF_INDEXES); |
|
179 |
SchedulerRegister(aInit); |
|
0 | 180 |
|
273 | 181 |
Win32FindNonPreemptibleFunctions(); |
182 |
Interrupt.Init(); |
|
183 |
} |
|
184 |
||
185 |
void SchedulerRegister(NThread& aSelf) |
|
186 |
{ |
|
187 |
TlsSetValue(TlsIndex, &aSelf); |
|
188 |
} |
|
189 |
||
190 |
inline NThread* RunningThread() |
|
191 |
// Returns the NThread actually running |
|
192 |
{ |
|
193 |
if (TlsIndex == TLS_OUT_OF_INDEXES) |
|
194 |
return NULL; // not yet initialised |
|
195 |
else |
|
196 |
return static_cast<NThread*>(TlsGetValue(TlsIndex)); |
|
197 |
} |
|
198 |
||
199 |
inline TBool IsScheduledThread() |
|
200 |
// True if the NThread actually running is the scheduled one (not an interrupt thread or similar) |
|
201 |
{ |
|
202 |
return RunningThread() == TheScheduler.iCurrentThread; |
|
203 |
} |
|
204 |
||
205 |
inline NThread& CheckedCurrentThread() |
|
206 |
// Returns the NThread actually running, checking that it's the scheduled one (not an interrupt thread or similar) |
|
207 |
{ |
|
208 |
NThread* t = RunningThread(); |
|
209 |
__NK_ASSERT_ALWAYS(t == TheScheduler.iCurrentThread); |
|
210 |
return *t; |
|
0 | 211 |
} |
212 |
||
213 |
static void ThreadExit(NThread& aCurrent, NThread& aNext) |
|
214 |
// |
|
215 |
// The final context switch of a thread. |
|
216 |
// Wake up the next thread and then destroy this one's Win32 resources. |
|
217 |
// |
|
273 | 218 |
// Return without terminating if we need to immediately reschedule again |
219 |
// because we had to unlock the kernel but there are DFCs pending. |
|
0 | 220 |
// |
221 |
{ |
|
222 |
// the thread is dead |
|
223 |
// extract win32 handles from dying NThread object before rescheduling |
|
224 |
HANDLE sl = aCurrent.iScheduleLock; |
|
225 |
HANDLE th = aCurrent.iWinThread; |
|
226 |
||
227 |
// wake up the next thread |
|
228 |
if (aNext.WakeUp()) |
|
229 |
return; // need to re-reschedule in this thread |
|
230 |
||
231 |
// we are now a vanilla win32 thread, nKern no longer knows about us |
|
232 |
// release resources and exit cleanly |
|
233 |
CloseHandle(sl); |
|
234 |
CloseHandle(th); |
|
235 |
ExitThread(0); // does not return |
|
236 |
} |
|
237 |
||
273 | 238 |
#ifdef MONITOR_THREAD_CPU_TIME |
0 | 239 |
static inline void UpdateThreadCpuTime(NThread& aCurrent, NThread& aNext) |
273 | 240 |
{ |
0 | 241 |
TUint32 timestamp = NKern::FastCounter(); |
242 |
if (aCurrent.iLastStartTime) |
|
243 |
aCurrent.iTotalCpuTime += timestamp - aCurrent.iLastStartTime; |
|
244 |
aNext.iLastStartTime = timestamp; |
|
245 |
} |
|
246 |
#else |
|
247 |
static inline void UpdateThreadCpuTime(NThread& /*aCurrent*/, NThread& /*aNext*/) |
|
273 | 248 |
{ |
0 | 249 |
} |
273 | 250 |
#endif // MONITOR_THREAD_CPU_TIME |
0 | 251 |
|
252 |
static void SwitchThreads(NThread& aCurrent, NThread& aNext) |
|
253 |
// |
|
254 |
// The fundamental context switch - wake up the next thread and wait for reschedule |
|
255 |
// trivially is aNext.WakeUp(), Wait(aCurrent.iScheduleLock), but we may be able to |
|
256 |
// optimise the signal-and-wait |
|
257 |
// |
|
258 |
{ |
|
273 | 259 |
__NK_ASSERT_ALWAYS(InterruptsStatus(ETrue)); |
0 | 260 |
UpdateThreadCpuTime(aCurrent, aNext); |
273 | 261 |
|
0 | 262 |
if (aCurrent.iNState == NThread::EDead) |
273 | 263 |
{ |
0 | 264 |
ThreadExit(aCurrent, aNext); |
273 | 265 |
// Yes, this is reachable! |
266 |
} |
|
267 |
else if (Win32AtomicSOAW && aNext.iWakeup == NThread::ERelease) |
|
0 | 268 |
{ |
273 | 269 |
// special case optimization for normally scheduled threads using atomic Win32 primitive |
0 | 270 |
TheScheduler.iCurrentThread = &aNext; |
273 | 271 |
CheckedSignalObjectAndWait(aNext.iScheduleLock, aCurrent.iScheduleLock); |
272 |
} |
|
273 |
else if (aNext.WakeUp()) |
|
274 |
{ |
|
275 |
// We didn't wake the target thread; instead we need to re-reschedule in this thread |
|
276 |
__NK_ASSERT_ALWAYS(InterruptsStatus(EFalse)); |
|
277 |
return; |
|
0 | 278 |
} |
279 |
else |
|
280 |
{ |
|
273 | 281 |
// Target thread woken, now wait to be rescheduled |
282 |
CheckedWaitForSingleObject(aCurrent.iScheduleLock); |
|
0 | 283 |
} |
273 | 284 |
|
285 |
__NK_ASSERT_ALWAYS(InterruptsStatus(ETrue)); |
|
286 |
} |
|
287 |
||
288 |
void TScheduler::Reschedule() |
|
289 |
// |
|
290 |
// Enter with kernel locked, exit with kernel unlocked, interrupts disabled. |
|
291 |
// If the thread is dead do not return, but terminate the thread. |
|
292 |
// |
|
293 |
{ |
|
294 |
__NK_ASSERT_ALWAYS(TheScheduler.iKernCSLocked == 1); |
|
295 |
NThread& me = *static_cast<NThread*>(TheScheduler.iCurrentThread); |
|
296 |
||
297 |
for (;;) |
|
298 |
{ |
|
299 |
NKern::DisableAllInterrupts(); |
|
300 |
if (TheScheduler.iDfcPendingFlag) |
|
301 |
TheScheduler.QueueDfcs(); |
|
302 |
||
303 |
// Exit from this loop when further rescheduling is no longer needed |
|
304 |
if (!TheScheduler.iRescheduleNeededFlag) |
|
305 |
break; |
|
306 |
||
307 |
// Choose the next thread to run, using the Symbian scheduler |
|
308 |
TheScheduler.iRescheduleNeededFlag = FALSE; |
|
309 |
NKern::EnableAllInterrupts(); |
|
310 |
NThread* t = static_cast<NThread*>(SelectThread(TheScheduler)); |
|
311 |
__KTRACE_OPT(KSCHED, DEBUGPRINT("Reschedule->%T (%08x%08x)", t, TheScheduler.iPresent[1], TheScheduler.iPresent[0])); |
|
312 |
||
313 |
#ifdef __EMI_SUPPORT__ |
|
314 |
EMI_AddTaskSwitchEvent(&me, t); |
|
315 |
EMI_CheckDfcTag(t); |
|
316 |
#endif |
|
317 |
#ifdef BTRACE_CPU_USAGE |
|
318 |
if (TheScheduler.iCpuUsageFilter) |
|
319 |
TheScheduler.iBTraceHandler(BTRACE_HEADER_C(4, BTrace::ECpuUsage, BTrace::ENewThreadContext), 0, (TUint32)t, 0, 0, 0, 0, 0); |
|
320 |
#endif |
|
321 |
||
322 |
// SwitchThreads() can return immediately, if it turns out that another reschedule is |
|
323 |
// necessary; otherwise, this thread will be descheduled in favour of the one selected |
|
324 |
// above, and SwitchThreads() will only return when this thread is next selected |
|
325 |
SwitchThreads(me, *t); |
|
326 |
||
327 |
// When we start again, we should check for being forced to exit; otherwise go round the |
|
328 |
// loop again to see whether another reschedule is called for (e.g. if there are new DFCs). |
|
329 |
NThread::TDivert divertToExit = me.iDivertFn; |
|
330 |
me.iDivertFn = NULL; |
|
331 |
if (divertToExit) |
|
332 |
divertToExit(); |
|
333 |
} |
|
334 |
||
335 |
// interrupts are disabled, the kernel is still locked |
|
336 |
if (TheScheduler.iProcessHandler) |
|
337 |
(*ProcessHandler(TheScheduler.iProcessHandler))(me.iAddressSpace); // thread will need to have its static data updated |
|
338 |
||
339 |
__NK_ASSERT_ALWAYS(TheScheduler.iKernCSLocked == 1); |
|
340 |
TheScheduler.iKernCSLocked = 0; |
|
0 | 341 |
} |
342 |
||
343 |
void TScheduler::YieldTo(NThreadBase*) |
|
344 |
// |
|
345 |
// Directed context switch to the nominated thread. |
|
346 |
// Enter with kernel locked, exit with kernel unlocked but interrupts disabled. |
|
347 |
// |
|
348 |
{ |
|
349 |
RescheduleNeeded(); |
|
350 |
TScheduler::Reschedule(); |
|
351 |
} |
|
352 |
||
273 | 353 |
TBool NThread::WakeUp() |
354 |
// |
|
355 |
// Wake up the thread. What to do depends on whether it was preempted or voluntarily |
|
356 |
// rescheduled. |
|
357 |
// |
|
358 |
// On entry, the kernel is locked, and interrupts may be enabled or disabled. |
|
0 | 359 |
// |
273 | 360 |
// The return value is TRUE if the caller should immediately reschedule again because we |
361 |
// needed to unlock the kernel in order to resume the thread but there were DFCs pending. |
|
362 |
// In this case, the thread is not woken, the kernel remains locked, and the return is |
|
363 |
// made with interrupts disabled (whether or not they were on entry). |
|
364 |
// |
|
365 |
// Otherise, the target thread is woken up (in any of several different ways), and the |
|
366 |
// the return value is FALSE. In that case the interrupt status is unchanged; and the |
|
367 |
// kernel may or not still be locked. |
|
0 | 368 |
// |
369 |
{ |
|
273 | 370 |
__NK_ASSERT_ALWAYS(TheScheduler.iKernCSLocked > 0); |
371 |
__NK_ASSERT_ALWAYS(RunningThread() != this); // Can't wake self! |
|
372 |
||
373 |
switch (iWakeup) |
|
0 | 374 |
{ |
273 | 375 |
default: |
376 |
FAULT(); |
|
377 |
||
378 |
case EIdle: |
|
379 |
// The thread is waiting on its scheduler lock, in Idle() |
|
380 |
__NK_ASSERT_ALWAYS(TheScheduler.iCurrentThread == this); |
|
381 |
CheckedSetEvent(iScheduleLock); |
|
382 |
break; |
|
383 |
||
384 |
case ERelease: |
|
385 |
// The thread is waiting on its scheduler lock |
|
386 |
TheScheduler.iCurrentThread = this; |
|
387 |
CheckedSetEvent(iScheduleLock); |
|
388 |
break; |
|
389 |
||
390 |
case EResumeLocked: |
|
391 |
// The thread is Win32 suspended and must be resumed. |
|
392 |
// |
|
393 |
// A newly created thread does not need the kernel unlocked so we can |
|
394 |
// just resume it; OTOH it will need to have its static data updated ... |
|
395 |
// |
|
396 |
__KTRACE_OPT(KSCHED, DEBUGPRINT("Win32ResumeLocked->%T", this)); |
|
397 |
iWakeup = ERelease; |
|
398 |
TheScheduler.iCurrentThread = this; |
|
399 |
if (TheScheduler.iProcessHandler) |
|
400 |
(*ProcessHandler(TheScheduler.iProcessHandler))(iAddressSpace); |
|
401 |
CheckedResumeThread(iWinThread); |
|
402 |
break; |
|
403 |
||
404 |
case EResumeDiverted: |
|
405 |
// The thread is Win32 suspended and must be resumed. |
|
406 |
// |
|
407 |
// It does not need the kernel unlocked, but does have a diversion pending. We |
|
408 |
// know it's safe to divert the thread here because we called IsSafeToPreempt() |
|
409 |
// when we suspended it - otherwise the diversion could get lost. |
|
410 |
// |
|
411 |
__KTRACE_OPT(KSCHED, DEBUGPRINT("Win32Resume->%T (Resuming diverted thread)", this)); |
|
412 |
iWakeup = ERelease; |
|
413 |
TheScheduler.iCurrentThread = this; |
|
414 |
ApplyDiversion(); |
|
415 |
CheckedResumeThread(iWinThread, ETrue); |
|
416 |
break; |
|
0 | 417 |
|
273 | 418 |
case EResume: |
419 |
// The thread is Win32 suspended and must be resumed. |
|
420 |
// |
|
421 |
// The complication here is that we have to unlock the kernel on behalf of the |
|
422 |
// pre-empted thread. Before doing so, we have to check whether there are DFCs |
|
423 |
// or a reschedule pending; if so, we don't unlock the kernel or wake the target |
|
424 |
// thread, but instead return TRUE, so that our caller (usually SwitchThreads() |
|
425 |
// above) knows to return and go round the TScheduler::Reschedule() loop again. |
|
426 |
// |
|
427 |
TInt irq = NKern::DisableAllInterrupts(); |
|
428 |
if (TheScheduler.iRescheduleNeededFlag || TheScheduler.iDfcPendingFlag) |
|
0 | 429 |
{ |
273 | 430 |
__KTRACE_OPT(KSCHED, DEBUGPRINT("Win32Resume->%T preempted", this)); |
431 |
TheScheduler.iRescheduleNeededFlag = TRUE; // ensure we do the reschedule |
|
432 |
return TRUE; |
|
0 | 433 |
} |
273 | 434 |
|
435 |
// Otherwise we mark the thread as not-preempted, unlock the kernel, restore |
|
436 |
// interrupts, and resume the thread. |
|
437 |
__KTRACE_OPT(KSCHED, DEBUGPRINT("Win32Resume->%T", this)); |
|
438 |
iWakeup = ERelease; |
|
439 |
TheScheduler.iCurrentThread = this; |
|
440 |
if (TheScheduler.iProcessHandler) |
|
441 |
(*ProcessHandler(TheScheduler.iProcessHandler))(iAddressSpace); // threads resumed after interrupt or locks need to have static data updated |
|
442 |
TheScheduler.iKernCSLocked = 0; |
|
443 |
||
444 |
// If there are callbacks waiting, and the thread is in user mode, divert it to |
|
445 |
// pick up its callbacks (we know this is safe because we called IsSafeToPreempt() |
|
446 |
// when we suspended it - otherwise the diversion could get lost. |
|
447 |
if (iUserModeCallbacks != NULL && !iInKernel) |
|
448 |
{ |
|
449 |
TheScheduler.iKernCSLocked = 1; // prevent further pre-emption |
|
450 |
ApplyDiversion(); |
|
451 |
} |
|
452 |
||
453 |
// If pre-emption occurs before the thread is resumed, it is the new thread that |
|
454 |
// is pre-empted, not the running thread, so we are guaranteed to be able to call |
|
455 |
// ResumeThread. If pre-emption occurs, and we are rescheduled to run before that |
|
456 |
// occurs, we will once again be running with the kernel locked and the other |
|
457 |
// thread will have been re-suspended by Win32: so all is well. |
|
458 |
// |
|
459 |
NKern::RestoreInterrupts(irq); |
|
460 |
CheckedResumeThread(iWinThread); |
|
461 |
break; |
|
0 | 462 |
} |
273 | 463 |
|
464 |
return FALSE; |
|
0 | 465 |
} |
466 |
||
273 | 467 |
|
0 | 468 |
/** Put the emulator into 'idle'. |
469 |
This is called by the idle thread when there is nothing else to do. |
|
470 |
||
471 |
@internalTechnology |
|
472 |
*/ |
|
473 |
EXPORT_C void NThread::Idle() |
|
474 |
// |
|
475 |
// Rather than spin, we go to sleep on the schedule lock. Preemption detects |
|
476 |
// this state (Win32Idling) and pokes the event rather than diverting the thread. |
|
477 |
// |
|
478 |
// enter and exit with kernel locked |
|
479 |
// |
|
480 |
{ |
|
481 |
NThread& me = *static_cast<NThread*>(TheScheduler.iCurrentThread); |
|
482 |
me.iWakeup = EIdle; |
|
273 | 483 |
CheckedWaitForSingleObject(me.iScheduleLock); |
484 |
||
0 | 485 |
// something happened, and we've been prodded by an interrupt |
486 |
// the kernel was locked by the interrupt, and now reschedule |
|
487 |
me.iWakeup = ERelease; |
|
488 |
TScheduler::Reschedule(); |
|
489 |
NKern::EnableAllInterrupts(); |
|
490 |
} |
|
491 |
||
273 | 492 |
|
493 |
void EnterKernel(TBool aDiversion) |
|
0 | 494 |
{ |
273 | 495 |
NThread& t = CheckedCurrentThread(); |
496 |
volatile TInt& inKernel = t.iInKernel; |
|
497 |
__NK_ASSERT_DEBUG(inKernel >= 0); |
|
498 |
||
499 |
// This code has to be re-entrant, because a thread that's in the process |
|
500 |
// of entering the kernel may be preempted; then if it isn't yet marked |
|
501 |
// as 'in the kernel' it can be diverted through EnterKernel()/LeaveKernel() |
|
502 |
// in order to execute user-mode callbacks. However this is all in the |
|
503 |
// same thread context, so it doesn't need any special synchronisation. |
|
504 |
// The moment of 'entering' the kernel is deemed to occur when the new value |
|
505 |
// of iInKernel is written back to the NThread object. |
|
506 |
if (inKernel++ == 0) |
|
0 | 507 |
{ |
273 | 508 |
// preamble when coming from userspace |
509 |
__NK_ASSERT_ALWAYS(InterruptsStatus(ETrue)); |
|
510 |
__NK_ASSERT_ALWAYS(t.iHeldFastMutex == 0); |
|
511 |
if (aDiversion) |
|
0 | 512 |
{ |
273 | 513 |
// Forced entry, to make thread exit or run user-mode callbacks |
514 |
// If exiting, iCsCount will have been set to 1 to prevent preemption |
|
515 |
// Otherwise it must be 0, as in the non-diversion case |
|
516 |
__NK_ASSERT_ALWAYS(t.iCsCount <= 1); |
|
517 |
__NK_ASSERT_ALWAYS(TheScheduler.iKernCSLocked == 1); |
|
0 | 518 |
} |
273 | 519 |
else |
520 |
{ |
|
521 |
__NK_ASSERT_ALWAYS(t.iCsCount == 0); |
|
522 |
__NK_ASSERT_ALWAYS(TheScheduler.iKernCSLocked == 0); |
|
523 |
} |
|
0 | 524 |
} |
525 |
} |
|
526 |
||
273 | 527 |
void LeaveKernel() |
0 | 528 |
{ |
273 | 529 |
NThread& t = CheckedCurrentThread(); |
530 |
volatile TInt& inKernel = t.iInKernel; |
|
531 |
__NK_ASSERT_DEBUG(inKernel > 0); |
|
0 | 532 |
|
273 | 533 |
// This code has to be re-entrant, because a thread that's in the process |
534 |
// of leaving the kernel may be preempted; then if it isn't still marked |
|
535 |
// as 'in the kernel' it can be diverted through EnterKernel()/LeaveKernel() |
|
536 |
// in order to execute user-mode callbacks. However this is all in the |
|
537 |
// same thread context, so it doesn't need any special synchronisation. |
|
538 |
// The moment of 'leaving' the kernel is deemed to occur when the new value |
|
539 |
// of iInKernel is written back to the NThread object. |
|
540 |
if (inKernel == 1) |
|
541 |
{ |
|
542 |
// postamble when about to return to userspace |
|
543 |
__NK_ASSERT_ALWAYS(t.iCsCount == 0); |
|
544 |
__NK_ASSERT_ALWAYS(t.iHeldFastMutex == 0); |
|
545 |
__NK_ASSERT_ALWAYS(TheScheduler.iKernCSLocked == 0); |
|
546 |
NKern::DisableAllInterrupts(); |
|
547 |
t.CallUserModeCallbacks(); |
|
548 |
NKern::EnableAllInterrupts(); |
|
549 |
} |
|
0 | 550 |
|
273 | 551 |
inKernel -= 1; |
0 | 552 |
} |
553 |
||
273 | 554 |
/** Locks the kernel and returns a pointer to the current thread |
555 |
Increments iKernCSLocked, thereby deferring IDFCs and preemption. |
|
0 | 556 |
|
557 |
@pre Call either in a thread or an IDFC context. |
|
558 |
@pre Do not call from an ISR. |
|
559 |
@pre Do not call from bare Win32 threads. |
|
560 |
*/ |
|
273 | 561 |
EXPORT_C NThread* NKern::LockC() |
0 | 562 |
{ |
273 | 563 |
CHECK_PRECONDITIONS(MASK_NOT_ISR, "NKern::LockC"); |
564 |
__ASSERT_WITH_MESSAGE_ALWAYS(IsScheduledThread(), "Do not call from bare Win32 threads", "NKern::LockC"); // check that we are a scheduled thread |
|
565 |
++TheScheduler.iKernCSLocked; |
|
566 |
return (NThread*)TheScheduler.iCurrentThread; |
|
0 | 567 |
} |
568 |
||
569 |
/** Locks the kernel. |
|
570 |
||
571 |
Increments iKernCSLocked, thereby deferring IDFCs and preemption. |
|
572 |
||
573 |
@pre Call either in a thread or an IDFC context. |
|
574 |
@pre Do not call from an ISR. |
|
575 |
@pre Do not call from bare Win32 threads. |
|
576 |
*/ |
|
577 |
EXPORT_C void NKern::Lock() |
|
578 |
{ |
|
273 | 579 |
CHECK_PRECONDITIONS(MASK_NOT_ISR, "NKern::Lock"); |
580 |
__ASSERT_WITH_MESSAGE_ALWAYS(IsScheduledThread(), "Do not call from bare Win32 threads", "NKern::Lock"); // check that we are a scheduled thread |
|
0 | 581 |
++TheScheduler.iKernCSLocked; |
582 |
} |
|
583 |
||
273 | 584 |
/** Unlocks the kernel. |
0 | 585 |
|
273 | 586 |
Decrements iKernCSLocked; if it would become zero and IDFCs or a reschedule are |
587 |
pending, calls the scheduler to process them. |
|
0 | 588 |
|
589 |
@pre Call either in a thread or an IDFC context. |
|
590 |
@pre Do not call from an ISR. |
|
591 |
@pre Do not call from bare Win32 threads. |
|
592 |
*/ |
|
273 | 593 |
EXPORT_C void NKern::Unlock() |
0 | 594 |
{ |
273 | 595 |
// check that the caller is the scheduled thread |
596 |
__ASSERT_WITH_MESSAGE_DEBUG(IsScheduledThread(), "Do not call from bare Win32 threads", "NKern::Unlock"); |
|
597 |
CHECK_PRECONDITIONS(MASK_NOT_ISR, "NKern::Unlock"); |
|
598 |
__NK_ASSERT_ALWAYS(TheScheduler.iKernCSLocked > 0); // Can't unlock if it isn't locked! |
|
599 |
||
600 |
// Rather than decrementing the lock before testing the flags, and then |
|
601 |
// re-incrementing it in order to call Reschedule() -- which would |
|
602 |
// leave a window for preemption -- we can test the flags first, and then |
|
603 |
// see whether the lock count is 1 ... |
|
604 |
if ((TheScheduler.iRescheduleNeededFlag || TheScheduler.iDfcPendingFlag) && |
|
605 |
TheScheduler.iKernCSLocked == 1) |
|
606 |
{ |
|
607 |
// Reschedule() returns with the kernel unlocked, but interrupts disabled |
|
608 |
TScheduler::Reschedule(); |
|
609 |
NKern::EnableAllInterrupts(); |
|
610 |
} |
|
611 |
else |
|
612 |
{ |
|
613 |
// All other cases - just decrement the lock count |
|
614 |
TheScheduler.iKernCSLocked -= 1; |
|
615 |
} |
|
0 | 616 |
} |
617 |
||
618 |
||
619 |
/** Allows IDFCs and rescheduling if they are pending. |
|
620 |
||
621 |
If IDFCs or a reschedule are pending and iKernCSLocked is exactly equal to 1 |
|
622 |
calls the scheduler to process the IDFCs and possibly reschedule. |
|
623 |
||
624 |
@return Nonzero if a reschedule actually occurred, zero if not. |
|
273 | 625 |
|
0 | 626 |
@pre Call either in a thread or an IDFC context. |
627 |
@pre Do not call from an ISR. |
|
628 |
@pre Do not call from bare Win32 threads. |
|
629 |
*/ |
|
630 |
EXPORT_C TInt NKern::PreemptionPoint() |
|
631 |
{ |
|
273 | 632 |
// check that the caller is the scheduled thread |
633 |
__ASSERT_WITH_MESSAGE_DEBUG(IsScheduledThread(), "Do not call from bare Win32 threads", "NKern::PreemptionPoint"); |
|
634 |
CHECK_PRECONDITIONS(MASK_NOT_ISR, "NKern::PreemptionPoint"); |
|
635 |
||
636 |
if ((TheScheduler.iRescheduleNeededFlag || TheScheduler.iDfcPendingFlag) && |
|
637 |
TheScheduler.iKernCSLocked == 1) |
|
0 | 638 |
{ |
273 | 639 |
// Reschedule() returns with the kernel unlocked, but interrupts disabled |
0 | 640 |
TScheduler::Reschedule(); |
641 |
TheScheduler.iKernCSLocked = 1; |
|
642 |
NKern::EnableAllInterrupts(); |
|
643 |
return TRUE; |
|
644 |
} |
|
273 | 645 |
|
0 | 646 |
return FALSE; |
647 |
} |
|
648 |
||
273 | 649 |
/** Return the current processor context type |
650 |
(thread, IDFC, interrupt or escaped thread) |
|
651 |
||
652 |
@return A value from NKern::TContext enumeration (including EEscaped) |
|
653 |
@pre Any context |
|
654 |
||
655 |
@see NKern::TContext |
|
656 |
*/ |
|
657 |
EXPORT_C TInt NKern::CurrentContext() |
|
658 |
{ |
|
659 |
NThread* t = RunningThread(); |
|
660 |
||
661 |
if (!t) |
|
662 |
return NKern::EInterrupt; |
|
663 |
||
664 |
if (TheScheduler.iInIDFC) |
|
665 |
return NKern::EIDFC; |
|
666 |
||
667 |
if (t->iWakeup == NThread::EEscaped) |
|
668 |
return NKern::EEscaped; |
|
669 |
||
670 |
__NK_ASSERT_ALWAYS(NKern::Crashed() || t == TheScheduler.iCurrentThread); |
|
671 |
return NKern::EThread; |
|
672 |
} |
|
673 |
||
674 |
||
675 |
/** Disable normal 'interrupts'. |
|
676 |
||
677 |
@param aLevel Ignored |
|
678 |
@pre Call in a Symbian (thread, IDFC, ISR) context. |
|
679 |
@pre Do not call from bare Win32 threads. |
|
680 |
@return Cookie to be passed into RestoreInterrupts() |
|
681 |
*/ |
|
682 |
EXPORT_C TInt NKern::DisableInterrupts(TInt /*aLevel*/) |
|
683 |
{ |
|
684 |
return Interrupt.MaskInterrupts(EFalse); |
|
685 |
} |
|
686 |
||
687 |
/** Restore interrupt mask to state preceding a DisableInterrupts() call |
|
688 |
||
689 |
@param aLevel Cookie returned by Disable(All)Interrupts() |
|
690 |
@pre Call in a Symbian (thread, IDFC, ISR) context. |
|
691 |
@pre Do not call from bare Win32 threads. |
|
692 |
*/ |
|
693 |
EXPORT_C void NKern::RestoreInterrupts(TInt aLevel) |
|
694 |
{ |
|
695 |
Interrupt.RestoreInterruptMask(aLevel); |
|
696 |
} |
|
697 |
||
698 |
/** Disable all maskable 'interrupts'. |
|
699 |
||
700 |
@pre Call in a Symbian (thread, IDFC, ISR) context. |
|
701 |
@pre Do not call from bare Win32 threads. |
|
702 |
@return Cookie to be passed into RestoreInterrupts() |
|
703 |
*/ |
|
704 |
EXPORT_C TInt NKern::DisableAllInterrupts() |
|
705 |
{ |
|
706 |
return Interrupt.MaskInterrupts(EFalse); |
|
707 |
} |
|
708 |
||
709 |
/** Enable all maskable 'interrupts' |
|
710 |
||
711 |
@internalComponent |
|
712 |
@pre Call in a Symbian (thread, IDFC, ISR) context. |
|
713 |
@pre Do not call from bare Win32 threads. |
|
714 |
*/ |
|
715 |
EXPORT_C void NKern::EnableAllInterrupts() |
|
716 |
{ |
|
717 |
Interrupt.RestoreInterruptMask(0); |
|
718 |
} |
|
0 | 719 |
|
720 |
/** Mark the start of an 'interrupt' in the Win32 emulator. |
|
721 |
This must be called in interrupt threads before using any other kernel APIs, |
|
722 |
and should be paired with a call to EndOfInterrupt(). |
|
723 |
||
724 |
@pre Win32 'interrupt' thread context |
|
725 |
*/ |
|
726 |
EXPORT_C void StartOfInterrupt() |
|
727 |
{ |
|
273 | 728 |
// check that the caller is not a scheduled thread |
729 |
__ASSERT_WITH_MESSAGE_DEBUG(!IsScheduledThread(), "Win32 'interrupt' thread context", "StartOfInterrupt"); |
|
730 |
Interrupt.BeginInterrupt(); |
|
0 | 731 |
} |
732 |
||
733 |
/** Mark the end of an 'interrupt' in the Win32 emulator. |
|
734 |
This checks to see if we need to reschedule. |
|
735 |
||
736 |
@pre Win32 'interrupt' thread context |
|
737 |
*/ |
|
738 |
EXPORT_C void EndOfInterrupt() |
|
739 |
{ |
|
273 | 740 |
// check that the caller is not a scheduled thread |
741 |
__ASSERT_WITH_MESSAGE_DEBUG(!IsScheduledThread(), "Win32 'interrupt' thread context", "EndOfInterrupt"); |
|
742 |
Interrupt.EndInterrupt(); |
|
0 | 743 |
} |
744 |
||
745 |
||
273 | 746 |
// The Win32Interrupt class manages virtual interrupts from Win32 event threads |
747 |
||
0 | 748 |
void Win32Interrupt::Init() |
749 |
{ |
|
273 | 750 |
InitializeCriticalSection(&iCS); |
751 |
iQ = CreateSemaphoreA(NULL, 0, KMaxTInt, NULL); |
|
0 | 752 |
__NK_ASSERT_ALWAYS(iQ); |
273 | 753 |
|
0 | 754 |
// create the NThread which exists solely to service reschedules for interrupts |
755 |
// this makes the End() much simpler as it merely needs to kick this thread |
|
756 |
SNThreadCreateInfo ni; |
|
757 |
memclr(&ni, sizeof(ni)); |
|
273 | 758 |
ni.iFunction = &SchedulerThreadFunction; |
759 |
ni.iTimeslice = -1; |
|
760 |
ni.iPriority = 1; |
|
0 | 761 |
NKern::ThreadCreate(&iScheduler, ni); |
762 |
NKern::Lock(); |
|
763 |
TScheduler::YieldTo(&iScheduler); |
|
273 | 764 |
RestoreInterruptMask(0); |
0 | 765 |
} |
766 |
||
273 | 767 |
void Win32Interrupt::BeginInterrupt() |
0 | 768 |
{ |
273 | 769 |
__NK_ASSERT_ALWAYS(!IsScheduledThread()); // check that we aren't a scheduled thread |
770 |
MaskInterrupts(ETrue); // suspend scheduled thread and set mask |
|
771 |
#ifdef BTRACE_CPU_USAGE |
|
772 |
BTrace0(BTrace::ECpuUsage, BTrace::EIrqStart); |
|
773 |
#endif |
|
0 | 774 |
} |
775 |
||
273 | 776 |
void Win32Interrupt::EndInterrupt() |
0 | 777 |
{ |
273 | 778 |
NThread* pC = iInterrupted; |
779 |
iInterrupted = 0; |
|
780 |
__NK_ASSERT_ALWAYS(pC == TheScheduler.iCurrentThread); // unchanged since BeginInterrupt() |
|
781 |
__NK_ASSERT_ALWAYS(!IsScheduledThread()); // check that we aren't a scheduled thread |
|
782 |
__NK_ASSERT_ALWAYS(iOwner == GetCurrentThreadId()); // check we are the interrupting thread |
|
783 |
__NK_ASSERT_ALWAYS(InterruptsStatus(EFalse)); |
|
784 |
__NK_ASSERT_ALWAYS(iLevel == 1); // DSG: is this correct? |
|
785 |
||
786 |
if (TheScheduler.iKernCSLocked) |
|
787 |
{ |
|
788 |
// No rescheduling allowed; just resume the interrupted thread |
|
789 |
NKern::EnableAllInterrupts(); |
|
790 |
CheckedResumeThread(pC->iWinThread); |
|
791 |
return; |
|
792 |
} |
|
793 |
||
794 |
__NK_ASSERT_ALWAYS(iLevel == 1); // DSG: is this correct? |
|
795 |
__NK_ASSERT_ALWAYS(TheScheduler.iKernCSLocked == 0); |
|
796 |
||
797 |
TBool diversionUnsafe = EFalse; // Optimistic assumption until checked |
|
798 |
if (TheScheduler.iRescheduleNeededFlag || TheScheduler.iDfcPendingFlag) |
|
0 | 799 |
{ |
273 | 800 |
switch (pC->iWakeup) |
0 | 801 |
{ |
273 | 802 |
default: |
803 |
FAULT(); |
|
804 |
||
805 |
case NThread::EIdle: |
|
806 |
// wake up the Idle thread, it will always reschedule immediately |
|
807 |
TheScheduler.iKernCSLocked = 1; // prevent further pre-emption |
|
808 |
if (pC->WakeUp()) |
|
809 |
FAULT(); // this can't happen |
|
810 |
NKern::EnableAllInterrupts(); |
|
811 |
CheckedResumeThread(pC->iWinThread); |
|
812 |
return; |
|
813 |
||
814 |
case NThread::ERelease: |
|
815 |
if (pC->IsSafeToPreempt()) |
|
816 |
{ |
|
817 |
// pre-empt the current thread and poke the 'scheduler' thread |
|
818 |
UpdateThreadCpuTime(*pC, iScheduler); |
|
819 |
pC->iWakeup = NThread::EResume; // how to wake this thread later |
|
820 |
TheScheduler.iKernCSLocked = 1; // prevent further pre-emption |
|
821 |
RescheduleNeeded(); |
|
822 |
NKern::EnableAllInterrupts(); |
|
823 |
if (iScheduler.WakeUp()) |
|
824 |
FAULT(); // this can't happen |
|
825 |
return; |
|
826 |
} |
|
827 |
||
828 |
diversionUnsafe = ETrue; // don't consider diverting |
|
829 |
break; |
|
0 | 830 |
} |
273 | 831 |
} |
832 |
||
833 |
#ifdef BTRACE_CPU_USAGE |
|
834 |
// no thread reschedle, so emit trace... |
|
835 |
BTrace0(BTrace::ECpuUsage, BTrace::EIrqEnd); |
|
836 |
#endif |
|
837 |
||
838 |
// If there are callbacks waiting, and the thread is in user mode, and it's at a |
|
839 |
// point where it can safely be preempted, then divert it to pick up its callbacks |
|
840 |
if (pC->iUserModeCallbacks != NULL && !pC->iInKernel && !diversionUnsafe) |
|
841 |
if (pC->IsSafeToPreempt()) |
|
842 |
{ |
|
843 |
TheScheduler.iKernCSLocked = 1; |
|
844 |
pC->ApplyDiversion(); |
|
0 | 845 |
} |
273 | 846 |
|
847 |
NKern::EnableAllInterrupts(); |
|
848 |
CheckedResumeThread(pC->iWinThread); |
|
0 | 849 |
} |
850 |
||
273 | 851 |
|
852 |
TInt Win32Interrupt::MaskInterrupts(TBool aPreempt) |
|
0 | 853 |
{ |
273 | 854 |
if (!iQ) |
855 |
return 0; // interrupt scheme not enabled yet |
|
856 |
||
857 |
EnterCriticalSection(&iCS); // Win32 critical section, not a Symbian one |
|
858 |
||
859 |
DWORD id = GetCurrentThreadId(); |
|
860 |
if (iOwner == id) |
|
861 |
{ |
|
862 |
// The easiest case: we already own the mask, so just increment the level. |
|
863 |
// The requirement for rescheduling on exit is unaffected. |
|
864 |
__NK_ASSERT_ALWAYS(!aPreempt); |
|
865 |
TInt r = iLevel++; |
|
866 |
LeaveCriticalSection(&iCS); |
|
867 |
return r; |
|
868 |
} |
|
869 |
||
870 |
if (!iOwner && !aPreempt) |
|
0 | 871 |
{ |
273 | 872 |
// Another easy case; we've been called from a Symbian thread, and there's |
873 |
// no contention, so we can just take ownership of the interrupt mask. No |
|
874 |
// rescheduling is required on exit (but this may change) ... |
|
875 |
__NK_ASSERT_ALWAYS(iLevel == 0); |
|
876 |
TInt r = iLevel++; |
|
877 |
iOwner = id; |
|
878 |
iRescheduleOnExit = EFalse; |
|
879 |
LeaveCriticalSection(&iCS); |
|
880 |
return r; |
|
881 |
} |
|
882 |
||
883 |
if (iOwner) |
|
884 |
{ |
|
885 |
// Someone else owns it; if we've been called from an interrupt thread, |
|
886 |
// this could be another interrupt thread or a Symbian thread. If we're |
|
887 |
// being called from a Symbian thread, the owner must be another Symbian |
|
888 |
// thread, because a Symbian thread can't preempt an interrupt thread. |
|
889 |
// |
|
890 |
// In either case, we can increment the count of waiters, then wait for the |
|
891 |
// curent holder to release it. Note that another (interrupt) thread could |
|
892 |
// also do this, and then the order in which they get to run is undefined. |
|
893 |
iWaiting += 1; |
|
894 |
||
895 |
do |
|
0 | 896 |
{ |
273 | 897 |
__NK_ASSERT_ALWAYS(iWaiting > 0); |
898 |
LeaveCriticalSection(&iCS); |
|
899 |
CheckedWaitForSingleObject(iQ); |
|
900 |
EnterCriticalSection(&iCS); |
|
901 |
__NK_ASSERT_ALWAYS(iWaiting > 0); |
|
0 | 902 |
} |
273 | 903 |
while (iOwner); |
904 |
||
905 |
iWaiting -= 1; |
|
906 |
iRescheduleOnExit = IsScheduledThread() && (TheScheduler.iRescheduleNeededFlag || TheScheduler.iDfcPendingFlag); |
|
0 | 907 |
} |
273 | 908 |
|
909 |
// Nobody now controls the interrupt mask ... |
|
910 |
__NK_ASSERT_ALWAYS(iOwner == 0 && iLevel == 0); |
|
911 |
||
912 |
if (aPreempt) |
|
913 |
{ |
|
914 |
// ... but in this case, we've been called from an interrupt thread and |
|
915 |
// a Symbian thread may still be running -- yes, even though all emulator |
|
916 |
// threads are normally bound to a single CPU! |
|
917 |
// |
|
918 |
// To ensure that such a thread doesn't see an inconsistent state, we |
|
919 |
// have to suspend it before we actually take ownership, as it could |
|
920 |
// examine the interrupt state at any time, without taking any locks. |
|
921 |
||
922 |
__NK_ASSERT_ALWAYS(iInterrupted == 0); // we haven't done this already |
|
923 |
NThread* pC; |
|
924 |
for (;;) |
|
925 |
{ |
|
926 |
pC = static_cast<NThread*>(TheScheduler.iCurrentThread); |
|
927 |
CheckedSuspendThread(pC->iWinThread); |
|
928 |
if (pC == TheScheduler.iCurrentThread) |
|
929 |
break; // no change of thread, so ok to proceed |
|
930 |
||
931 |
// We suspended the thread while doing a (Symbian) context switch! |
|
932 |
// The scheduler state might be inconsistent if we left it like that, |
|
933 |
// so instead we'll resume it, then try again ... |
|
934 |
CheckedResumeThread(pC->iWinThread); |
|
935 |
} |
|
936 |
||
937 |
__NK_ASSERT_ALWAYS(iInterrupted == 0); |
|
938 |
iInterrupted = pC; |
|
939 |
} |
|
940 |
||
941 |
// Now we can assert ownership of the interrupt mask. |
|
942 |
__NK_ASSERT_ALWAYS(iOwner == 0 && iLevel == 0); |
|
943 |
TInt r = iLevel++; |
|
944 |
iOwner = id; |
|
945 |
LeaveCriticalSection(&iCS); |
|
946 |
return r; |
|
0 | 947 |
} |
948 |
||
273 | 949 |
void Win32Interrupt::RestoreInterruptMask(TInt aLevel) |
0 | 950 |
{ |
273 | 951 |
if (!iQ) |
952 |
return; // interrupt scheme not enabled yet |
|
953 |
||
954 |
DWORD id = GetCurrentThreadId(); |
|
955 |
EnterCriticalSection(&iCS); // Win32 critical section, not a Symbian one |
|
956 |
||
957 |
for (;;) |
|
0 | 958 |
{ |
273 | 959 |
__NK_ASSERT_ALWAYS(id == iOwner); // only the current owner may do this |
960 |
TInt count = iLevel - aLevel; |
|
961 |
if (count <= 0) |
|
962 |
break; // already restored to that level |
|
963 |
||
964 |
iLevel = aLevel; // update the recursion level first |
|
965 |
if (aLevel > 0) |
|
0 | 966 |
{ |
273 | 967 |
// The easiest case: we're still holding ownership, so there's nothing to do |
968 |
break; |
|
0 | 969 |
} |
273 | 970 |
|
971 |
iOwner = 0; // give up ownership |
|
972 |
if (iWaiting) |
|
0 | 973 |
{ |
273 | 974 |
// Someone else is waiting for control of the interrupt mask. |
975 |
// They may preempt us as soon as we exit the critical section |
|
976 |
// (at the end of this function) |
|
977 |
CheckedReleaseSemaphore(iQ); |
|
978 |
break; |
|
0 | 979 |
} |
273 | 980 |
|
981 |
// Lock fully released, no-one waiting, so see whether we need to reschedule |
|
982 |
if (TheScheduler.iKernCSLocked || !iRescheduleOnExit) |
|
983 |
break; |
|
984 |
||
985 |
// Interrupt mask fully unlocked, but reschedule required ... |
|
986 |
TheScheduler.iKernCSLocked = 1; |
|
987 |
LeaveCriticalSection(&iCS); |
|
988 |
TScheduler::Reschedule(); |
|
989 |
EnterCriticalSection(&iCS); |
|
990 |
||
991 |
// Note: TScheduler::Reschedule() above calls MaskInterrupts() -- which changes |
|
992 |
// the state of most of our member data. It returns with the kernel unlocked, |
|
993 |
// but interrupts still disabled. Hence we will have reacquired ownership of the |
|
994 |
// interrupt mask, and must release it again. Instead of going recursive with a |
|
995 |
// call to EnableAllInterrupts() we iterate; we'll get out of this loop eventually, |
|
996 |
// because iRescheduleOnExit is updated by MaskInterrupts() ... |
|
997 |
aLevel = 0; |
|
0 | 998 |
} |
999 |
||
273 | 1000 |
LeaveCriticalSection(&iCS); |
0 | 1001 |
} |
1002 |
||
273 | 1003 |
void Win32Interrupt::ForceReschedule() |
1004 |
{ |
|
1005 |
RescheduleNeeded(); |
|
1006 |
if (iScheduler.WakeUp()) |
|
1007 |
FAULT(); // this can't happen |
|
1008 |
} |
|
1009 |
||
1010 |
void Win32Interrupt::SchedulerThreadFunction(TAny*) |
|
0 | 1011 |
// |
1012 |
// The entry-point for the interrupt-rescheduler thread. |
|
1013 |
// |
|
1014 |
// This spends its whole life going around the TScheduler::Reschedule() loop |
|
1015 |
// selecting another thread to run. |
|
1016 |
// |
|
1017 |
{ |
|
1018 |
TheScheduler.iKernCSLocked = 1; |
|
1019 |
RescheduleNeeded(); |
|
1020 |
TScheduler::Reschedule(); |
|
1021 |
FAULT(); |
|
1022 |
} |
|
1023 |
||
273 | 1024 |
|
1025 |
// |
|
1026 |
// We need a global lock in the emulator to avoid scheduling reentrancy problems with the host |
|
1027 |
// in particular, some host API calls acquire host mutexes, preempting such services results |
|
1028 |
// in suspension of those threads which can cause deadlock if another thread requires that host |
|
1029 |
// mutex. |
|
1030 |
// |
|
1031 |
// Because thread dreaction and code loading also require the same underlying mutex (used |
|
1032 |
// by NT to protect DLL entrypoint calling), this would be rather complex with a fast mutex. |
|
1033 |
// For now, keep it simple and use the preemption lock. Note that this means that the |
|
1034 |
// MS timer DFC may be significantly delayed when loading large DLL trees, for example. |
|
1035 |
// |
|
1036 |
||
1037 |
void SchedulerLock() |
|
1038 |
// |
|
1039 |
// Acquire the global lock. May be called before scheduler running, so handle that case |
|
1040 |
// |
|
0 | 1041 |
{ |
273 | 1042 |
if (TheScheduler.iCurrentThread) |
1043 |
{ |
|
1044 |
EnterKernel(); |
|
1045 |
NKern::Lock(); |
|
1046 |
} |
|
0 | 1047 |
} |
1048 |
||
273 | 1049 |
void SchedulerUnlock() |
1050 |
// |
|
1051 |
// Release the global lock. May be called before scheduler running, so handle that case |
|
1052 |
// |
|
1053 |
{ |
|
1054 |
if (TheScheduler.iCurrentThread) |
|
1055 |
{ |
|
1056 |
NKern::Unlock(); |
|
1057 |
LeaveKernel(); |
|
1058 |
} |
|
1059 |
} |
|
1060 |
||
1061 |
||
1062 |
// This function allows a thread to escape from the Symbian scheduling domain to |
|
1063 |
// become an ordinary Win32 thread for a while, in cases where it is necessary |
|
1064 |
// to use Win32 APIs that are incompatible with the Symbian threading model. |
|
1065 |
// AFAICS this is not currently used! |
|
0 | 1066 |
void SchedulerEscape() |
1067 |
{ |
|
273 | 1068 |
NThread& me = CheckedCurrentThread(); |
0 | 1069 |
EnterKernel(); |
273 | 1070 |
__NK_ASSERT_ALWAYS(TheScheduler.iKernCSLocked == 0); // Can't call Escape() with the Emulator/kernel already locked |
0 | 1071 |
NKern::ThreadEnterCS(); |
1072 |
NKern::Lock(); |
|
273 | 1073 |
me.iNState = NThreadBase::EBlocked; |
0 | 1074 |
TheScheduler.Remove(&me); |
273 | 1075 |
me.iWakeup = NThread::EEscaped; |
1076 |
SetThreadPriority(me.iWinThread, THREAD_PRIORITY_ABOVE_NORMAL); |
|
1077 |
Interrupt.ForceReschedule(); |
|
1078 |
// This schedules some other thread so we can carry on outside the scheduler domain. |
|
1079 |
// It will change the value of iCurrentThread to ensure the 'escaped' invariants are set |
|
0 | 1080 |
} |
1081 |
||
1082 |
void ReenterDfc(TAny* aPtr) |
|
1083 |
{ |
|
1084 |
NThread& me = *static_cast<NThread*>(aPtr); |
|
1085 |
me.iWakeup = NThread::ERelease; |
|
1086 |
me.CheckSuspendThenReady(); |
|
1087 |
} |
|
1088 |
||
1089 |
void SchedulerReenter() |
|
1090 |
{ |
|
273 | 1091 |
NThread* me = RunningThread(); |
0 | 1092 |
__NK_ASSERT_ALWAYS(me); |
1093 |
__NK_ASSERT_ALWAYS(me->iWakeup == NThread::EEscaped); |
|
1094 |
TDfc idfc(&ReenterDfc, me); |
|
1095 |
StartOfInterrupt(); |
|
1096 |
idfc.Add(); |
|
1097 |
EndOfInterrupt(); |
|
273 | 1098 |
SetThreadPriority(me->iWinThread, THREAD_PRIORITY_NORMAL); |
1099 |
CheckedWaitForSingleObject(me->iScheduleLock); |
|
0 | 1100 |
// when released, the kernel is locked and handed over to us |
1101 |
// need to complete the reschedule protocol in this thread now |
|
1102 |
TScheduler::Reschedule(); |
|
1103 |
NKern::EnableAllInterrupts(); |
|
1104 |
NKern::ThreadLeaveCS(); |
|
1105 |
LeaveKernel(); |
|
1106 |
} |
|
1107 |
||
1108 |
||
1109 |
// |
|
1110 |
// We use SuspendThread and ResumeThread to preempt threads. This can cause |
|
1111 |
// deadlock if the thread is using windows synchronisation primitives (eg |
|
1112 |
// critical sections). This isn't too much of a problem most of the time, |
|
1113 |
// because threads generally use the symbian environment rather than the native |
|
1114 |
// windows APIs. However exceptions are an issue - they can happen at any time, |
|
1115 |
// and cause execution of native windows code over which we have no control. |
|
1116 |
// |
|
1117 |
// To work around this we examine the call stack to see if the thread is inside |
|
1118 |
// one of the windows exception handling functions. If so, preemption is |
|
1119 |
// deferred. |
|
1120 |
// |
|
1121 |
||
1122 |
#include <winnt.h> |
|
1123 |
||
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1124 |
// Uncomment the following line to turn on tracing when we examine the call stack |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1125 |
// #define DUMP_STACK_BACKTRACE |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1126 |
|
273 | 1127 |
#ifdef DUMP_STACK_BACKTRACE |
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1128 |
|
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1129 |
#include <psapi.h> |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1130 |
|
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1131 |
typedef BOOL (WINAPI GMIFunc)(HANDLE hProcess, HMODULE hModule, LPMODULEINFO lpmodinfo, DWORD cb); |
273 | 1132 |
typedef BOOL (WINAPI EPMFunc)(HANDLE hProcess, HMODULE* lphModule, DWORD cb, LPDWORD lpcbNeeded); |
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1133 |
typedef DWORD (WINAPI GMBNFunc)(HANDLE hProcess, HMODULE hModule, LPSTR lpBaseName, DWORD nSize); |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1134 |
|
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1135 |
void PrintAllModuleInfo() |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1136 |
{ |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1137 |
HMODULE psapiLibrary = LoadLibraryA("psapi.dll"); |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1138 |
__NK_ASSERT_ALWAYS(psapiLibrary != NULL); |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1139 |
EPMFunc* epmFunc = (EPMFunc*)GetProcAddress(psapiLibrary, "EnumProcessModules"); |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1140 |
__NK_ASSERT_ALWAYS(epmFunc != NULL); |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1141 |
GMIFunc* gmiFunc = (GMIFunc*)GetProcAddress(psapiLibrary, "GetModuleInformation"); |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1142 |
__NK_ASSERT_ALWAYS(gmiFunc != NULL); |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1143 |
GMBNFunc* gmbnFunc = (GMBNFunc*)GetProcAddress(psapiLibrary, "GetModuleBaseNameA"); |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1144 |
__NK_ASSERT_ALWAYS(gmbnFunc != NULL); |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1145 |
const TInt maxModules = 256; |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1146 |
HMODULE modules[maxModules]; |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1147 |
DWORD spaceNeeded; |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1148 |
BOOL r = epmFunc(GetCurrentProcess(), modules, sizeof(HMODULE) * maxModules, &spaceNeeded); |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1149 |
__NK_ASSERT_ALWAYS(r); |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1150 |
__NK_ASSERT_ALWAYS(spaceNeeded <= sizeof(HMODULE) * maxModules); |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1151 |
|
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1152 |
for (TUint i = 0 ; i < spaceNeeded / sizeof(HMODULE) ; ++i) |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1153 |
{ |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1154 |
HMODULE library = modules[i]; |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1155 |
const TUint maxNameLen = 64; |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1156 |
char name[maxNameLen]; |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1157 |
WORD len = gmbnFunc(GetCurrentProcess(), library, name, sizeof(name)); |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1158 |
__NK_ASSERT_ALWAYS(len > 0 && len < maxNameLen); |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1159 |
MODULEINFO info; |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1160 |
r = gmiFunc(GetCurrentProcess(), library, &info, sizeof(info)); |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1161 |
__NK_ASSERT_ALWAYS(r); |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1162 |
DEBUGPRINT("Module %s found at %08x to %08x", name, (TUint)info.lpBaseOfDll, (TUint)info.lpBaseOfDll + info.SizeOfImage); |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1163 |
} |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1164 |
|
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1165 |
r = FreeLibrary(psapiLibrary); |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1166 |
__NK_ASSERT_ALWAYS(r); |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1167 |
} |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1168 |
|
273 | 1169 |
#endif // DUMP_STACK_BACKTRACE |
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1170 |
|
0 | 1171 |
const TInt KWin32NonPreemptibleFunctionCount = 2; |
1172 |
||
1173 |
struct TWin32FunctionInfo |
|
1174 |
{ |
|
1175 |
TUint iStartAddr; |
|
1176 |
TUint iLength; |
|
1177 |
}; |
|
1178 |
||
1179 |
static TWin32FunctionInfo Win32NonPreemptibleFunctions[KWin32NonPreemptibleFunctionCount]; |
|
1180 |
||
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1181 |
HMODULE GetFirstLoadedModuleHandleA(const char* aModuleName1, const char* aModuleName2) |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1182 |
{ |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1183 |
HMODULE result = GetModuleHandleA(aModuleName1); |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1184 |
return result ? result : GetModuleHandleA(aModuleName2); |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1185 |
} |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1186 |
|
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1187 |
TWin32FunctionInfo Win32FindExportedFunction(const char* aFunctionName, ...) |
0 | 1188 |
{ |
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1189 |
va_list arg; |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1190 |
va_start(arg, aFunctionName); |
273 | 1191 |
HMODULE library = NULL; |
1192 |
const char* libname; |
|
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1193 |
|
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1194 |
// Loop through arguments until we find a library we can get a handle to. List of library names |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1195 |
// is NULL-terminated. |
273 | 1196 |
while ((libname = va_arg(arg, const char*)) != NULL) |
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1197 |
{ |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1198 |
library = GetModuleHandleA(libname); |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1199 |
if (library != NULL) |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1200 |
break; |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1201 |
} |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1202 |
va_end(arg); |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1203 |
|
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1204 |
// Make sure we did get a valid library |
0 | 1205 |
__NK_ASSERT_ALWAYS(library != NULL); |
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1206 |
|
0 | 1207 |
// Find the start address of the function |
1208 |
TUint start = (TUint)GetProcAddress(library, aFunctionName); |
|
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1209 |
__NK_ASSERT_ALWAYS(start != 0); |
0 | 1210 |
|
1211 |
// Now have to check all other exports to find the end of the function |
|
273 | 1212 |
TUint end = ~0u; |
1213 |
for (TInt i = 1; ; ++i) |
|
0 | 1214 |
{ |
1215 |
TUint addr = (TUint)GetProcAddress(library, MAKEINTRESOURCEA(i)); |
|
1216 |
if (!addr) |
|
1217 |
break; |
|
1218 |
if (addr > start && addr < end) |
|
1219 |
end = addr; |
|
1220 |
} |
|
273 | 1221 |
__NK_ASSERT_ALWAYS(end != ~0u); |
0 | 1222 |
TWin32FunctionInfo result = { start, end - start }; |
273 | 1223 |
|
1224 |
#ifdef DUMP_STACK_BACKTRACE |
|
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1225 |
DEBUGPRINT("Function %s found at %08x to %08x", aFunctionName, start, end); |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1226 |
#endif |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1227 |
|
0 | 1228 |
return result; |
1229 |
} |
|
1230 |
||
1231 |
void Win32FindNonPreemptibleFunctions() |
|
1232 |
{ |
|
273 | 1233 |
#ifdef DUMP_STACK_BACKTRACE |
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1234 |
PrintAllModuleInfo(); |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1235 |
#endif |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1236 |
|
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1237 |
TUint i = 0; |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1238 |
Win32NonPreemptibleFunctions[i++] = Win32FindExportedFunction("RaiseException", "kernelbase.dll", "kernel32.dll", NULL); |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1239 |
Win32NonPreemptibleFunctions[i++] = Win32FindExportedFunction("KiUserExceptionDispatcher", "ntdll.dll", NULL); |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1240 |
__NK_ASSERT_ALWAYS(i == KWin32NonPreemptibleFunctionCount); |
0 | 1241 |
} |
273 | 1242 |
|
0 | 1243 |
TBool Win32IsThreadInNonPreemptibleFunction(HANDLE aWinThread, TLinAddr aStackTop) |
1244 |
{ |
|
1245 |
const TInt KMaxSearchDepth = 16; // 12 max observed while handling exceptions |
|
1246 |
const TInt KMaxStackSize = 1024 * 1024; // Default reserved stack size on windows |
|
1247 |
const TInt KMaxFrameSize = 4096; |
|
1248 |
||
1249 |
CONTEXT c; |
|
273 | 1250 |
c.ContextFlags = CONTEXT_CONTROL; |
1251 |
CheckedGetThreadContext(aWinThread, &c); |
|
0 | 1252 |
TUint eip = c.Eip; |
1253 |
TUint ebp = c.Ebp; |
|
1254 |
TUint lastEbp = c.Esp; |
|
1255 |
||
273 | 1256 |
#ifdef DUMP_STACK_BACKTRACE |
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1257 |
DEBUGPRINT("Stack backtrace for thread %x", aWinThread); |
273 | 1258 |
#endif |
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1259 |
|
0 | 1260 |
// Walk the call stack |
1261 |
for (TInt i = 0 ; i < KMaxSearchDepth ; ++i) |
|
1262 |
{ |
|
273 | 1263 |
#ifdef DUMP_STACK_BACKTRACE |
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1264 |
DEBUGPRINT(" %08x", eip); |
273 | 1265 |
#endif |
1266 |
||
0 | 1267 |
for (TInt j = 0 ; j < KWin32NonPreemptibleFunctionCount ; ++j) |
1268 |
{ |
|
1269 |
const TWin32FunctionInfo& info = Win32NonPreemptibleFunctions[j]; |
|
1270 |
if (TUint(eip - info.iStartAddr) < info.iLength) |
|
1271 |
{ |
|
1272 |
__KTRACE_OPT(KSCHED, DEBUGPRINT("Thread is in non-preemptible function %d at frame %d: eip == %08x", j, i, eip)); |
|
1273 |
return TRUE; |
|
1274 |
} |
|
1275 |
} |
|
273 | 1276 |
|
0 | 1277 |
// Check frame pointer is valid before dereferencing it |
1278 |
if (TUint(aStackTop - ebp) > KMaxStackSize || TUint(ebp - lastEbp) > KMaxFrameSize || ebp & 3) |
|
1279 |
break; |
|
1280 |
||
1281 |
TUint* frame = (TUint*)ebp; |
|
1282 |
lastEbp = ebp; |
|
1283 |
ebp = frame[0]; |
|
1284 |
eip = frame[1]; |
|
1285 |
} |
|
273 | 1286 |
|
0 | 1287 |
return FALSE; |
1288 |
} |
|
1289 |
||
1290 |
TBool NThread::IsSafeToPreempt() |
|
1291 |
{ |
|
1292 |
return !Win32IsThreadInNonPreemptibleFunction(iWinThread, iUserStackBase); |
|
1293 |
} |
|
1294 |