57 { |
59 { |
58 // round-robin deferred due to fast mutex held |
60 // round-robin deferred due to fast mutex held |
59 t->iHeldFastMutex->iWaiting = 1; |
61 t->iHeldFastMutex->iWaiting = 1; |
60 return t; |
62 return t; |
61 } |
63 } |
|
64 |
62 t->iTime = t->iTimeslice; // reset old thread time slice |
65 t->iTime = t->iTimeslice; // reset old thread time slice |
63 t = static_cast<NThreadBase*>(t->iNext); // next thread |
66 t = static_cast<NThreadBase*>(t->iNext); // next thread |
64 aS.iQueue[t->iPriority] = t; // make it first in list |
67 aS.iQueue[t->iPriority] = t; // make it first in list |
65 __KTRACE_OPT(KSCHED2,DEBUGPRINT("RoundRobin->%T",t)); |
68 __KTRACE_OPT(KSCHED2, DEBUGPRINT("RoundRobin->%T", t)); |
66 } |
69 } |
|
70 |
67 if (t->iHeldFastMutex) |
71 if (t->iHeldFastMutex) |
68 { |
72 { |
69 if (t->iHeldFastMutex == &aS.iLock) |
73 if (t->iHeldFastMutex == &aS.iLock) |
70 { |
74 { |
71 // thread holds system lock: use it |
75 // thread holds system lock: use it |
72 return t; |
76 return t; |
73 } |
77 } |
|
78 |
74 if ((t->i_ThrdAttr & KThreadAttImplicitSystemLock) != 0 && aS.iLock.iHoldingThread) |
79 if ((t->i_ThrdAttr & KThreadAttImplicitSystemLock) != 0 && aS.iLock.iHoldingThread) |
75 t->iHeldFastMutex->iWaiting = 1; |
80 t->iHeldFastMutex->iWaiting = 1; |
|
81 |
76 __NK_ASSERT_DEBUG((t->i_ThrdAttr & KThreadAttAddressSpace) == 0); |
82 __NK_ASSERT_DEBUG((t->i_ThrdAttr & KThreadAttAddressSpace) == 0); |
77 /* |
83 /* |
78 Check for an address space change. Not implemented for Win32, but useful as |
84 Check for an address space change. Not implemented for Win32, but useful as |
79 documentaiton of the algorithm. |
85 documentaiton of the algorithm. |
80 |
86 |
81 if ((t->i_ThrdAttr & KThreadAttAddressSpace) != 0 && t->iAddressSpace != aS.iAddressSpace) |
87 if ((t->i_ThrdAttr & KThreadAttAddressSpace) != 0 && t->iAddressSpace != aS.iAddressSpace) |
82 t->iHeldFastMutex->iWaiting = 1; |
88 t->iHeldFastMutex->iWaiting = 1; |
83 */ |
89 */ |
84 } |
90 } |
85 else if (t->iWaitFastMutex && t->iWaitFastMutex->iHoldingThread) |
91 else if (t->iWaitFastMutex && t->iWaitFastMutex->iHoldingThread) |
86 { |
92 { |
87 __KTRACE_OPT(KSCHED2,DEBUGPRINT("Resched inter->%T, Blocked on %M",t->iWaitFastMutex->iHoldingThread,t->iWaitFastMutex)); |
93 __KTRACE_OPT(KSCHED2, DEBUGPRINT("Resched inter->%T, Blocked on %M", t->iWaitFastMutex->iHoldingThread, t->iWaitFastMutex)); |
88 t = t->iWaitFastMutex->iHoldingThread; |
94 t = t->iWaitFastMutex->iHoldingThread; |
89 } |
95 } |
90 else if (t->i_ThrdAttr & KThreadAttImplicitSystemLock) |
96 else if (t->i_ThrdAttr & KThreadAttImplicitSystemLock) |
91 { |
97 { |
92 // implicit system lock required |
98 // implicit system lock required |
93 if (aS.iLock.iHoldingThread) |
99 if (aS.iLock.iHoldingThread) |
94 { |
100 { |
95 // system lock held, switch to that thread |
101 // system lock held, switch to that thread |
96 t = aS.iLock.iHoldingThread; |
102 t = aS.iLock.iHoldingThread; |
97 __KTRACE_OPT(KSCHED2,DEBUGPRINT("Resched inter->%T (IMP SYS)",t)); |
103 __KTRACE_OPT(KSCHED2, DEBUGPRINT("Resched inter->%T (IMP SYS)", t)); |
98 t->iHeldFastMutex->iWaiting = 1; // aS.iLock.iWaiting = 1; |
104 t->iHeldFastMutex->iWaiting = 1; // aS.iLock.iWaiting = 1; |
99 return t; |
105 return t; |
100 } |
106 } |
|
107 |
101 __NK_ASSERT_DEBUG((t->i_ThrdAttr & KThreadAttAddressSpace) == 0); |
108 __NK_ASSERT_DEBUG((t->i_ThrdAttr & KThreadAttAddressSpace) == 0); |
102 /* |
109 /* |
103 Check for an address space change. Not implemented for Win32, but useful as |
110 Check for an address space change. Not implemented for Win32, but useful as |
104 documentaiton of the algorithm. |
111 documentaiton of the algorithm. |
105 |
112 |
106 if ((t->i_ThrdAttr & KThreadAttAddressSpace) != 0 || t->iAddressSpace != aS.iAddressSpace) |
113 if ((t->i_ThrdAttr & KThreadAttAddressSpace) != 0 || t->iAddressSpace != aS.iAddressSpace) |
107 { |
114 { |
108 // what do we do now? |
115 // what do we do now? |
109 __NK_ASSERT_DEBUG(FALSE); |
116 __NK_ASSERT_DEBUG(FALSE); |
110 } |
117 } |
111 */ |
118 */ |
112 } |
119 } |
|
120 |
113 return t; |
121 return t; |
114 } |
122 } |
115 |
123 |
116 // from NThread |
124 // from NThread |
117 #undef i_ThrdAttr |
125 #undef i_ThrdAttr |
118 |
126 |
119 TBool NThread::WakeUp() |
127 // From here on it's all emulator (i.e. Win32) specific; there isn't any EPOC32 equivalent for most of it. |
120 // |
128 // |
121 // Wake up the thread. What to do depends on whether we were preempted or voluntarily |
129 // The emulator uses one Win32 thread for each Symbian thread; these are the ones scheduled by the Symbian |
122 // rescheduled. |
130 // nanokernel in the algorithm above. Only one such thread will be running at a time; the others will be |
123 // |
131 // waiting on their individual scheduler locks, thus simulating a single-threaded architecture. |
124 // Return TRUE if we need to immediately reschedule again because we had to unlock |
132 // |
125 // the kernel but there are DFCs pending. In this case, the thread does not wake up. |
133 // In addition, there are some more Win32 threads used to handle timers, interrupts and the like. These |
126 // |
134 // are not under control of the Symbian scheduler. They are given higher priority than the Symbian threads, |
127 // NB. kernel is locked |
135 // so they can run preemptively under control of the Win32 scheduler. However, they must call functions |
128 // |
136 // from the Win32Interrupt class before using any Symbian OS calls, so that the current Symbian thread can |
129 { |
137 // be suspended during the 'virtual interrupt'. |
130 switch (iWakeup) |
138 |
131 { |
139 static DWORD TlsIndex = TLS_OUT_OF_INDEXES; |
132 default: |
140 |
133 FAULT(); |
141 void SchedulerInit(NThread& aInit) |
134 case EIdle: |
142 // |
135 __NK_ASSERT_ALWAYS(TheScheduler.iCurrentThread == this); |
143 // Initialise the win32 nKern scheduler |
136 __NK_ASSERT_ALWAYS(SetEvent(iScheduleLock)); |
144 // |
137 break; |
145 { |
138 case ERelease: |
146 DWORD procaffin, sysaffin; |
139 TheScheduler.iCurrentThread = this; |
147 if (GetProcessAffinityMask(GetCurrentProcess(), &procaffin, &sysaffin)) |
140 __NK_ASSERT_ALWAYS(SetEvent(iScheduleLock)); |
148 { |
141 break; |
149 DWORD cpu; |
142 case EResumeLocked: |
150 switch (Win32SingleCpu) |
143 // The thread is Win32 suspended and must be resumed. |
151 { |
144 // |
152 default: |
145 // A newly created thread does not need the kernel unlocked so we can |
153 // bind the emulator to a nominated CPU on the host PC |
146 // just resume the suspended thread |
154 cpu = (1 << Win32SingleCpu); |
147 // |
155 if (!(sysaffin & cpu)) |
148 __KTRACE_OPT(KSCHED,DEBUGPRINT("Win32Resume->%T",this)); |
156 cpu = procaffin; // CPU selection invalid |
149 iWakeup = ERelease; |
157 break; |
150 TheScheduler.iCurrentThread = this; |
158 |
151 if (TheScheduler.iProcessHandler) |
159 case NThread::ECpuSingle: |
152 (*ProcessHandler(TheScheduler.iProcessHandler))(iAddressSpace); // new thread will need to have its static data updated |
160 // bind the emulator to a single CPU on the host PC, pick one |
153 __NK_ASSERT_ALWAYS(TInt(ResumeThread(iWinThread)) > 0); // check thread was previously suspended |
161 cpu = procaffin ^ (procaffin & (procaffin - 1)); |
154 break; |
162 break; |
155 case EResumeDiverted: |
163 |
156 // The thread is Win32 suspended and must be resumed. |
164 case NThread::ECpuAll: |
157 // |
165 // run the emulator on all CPUs on the host PC |
158 // The thread needs to be diverted, and does not need the kernel |
166 cpu = sysaffin; |
159 // unlocked. |
167 break; |
160 // |
168 } |
161 // It's safe the divert the thread here because we called |
169 |
162 // IsSafeToPreempt() when we suspended it - otherwise the diversion |
170 SetProcessAffinityMask(GetCurrentProcess(), cpu); |
163 // could get lost. |
171 } |
164 // |
172 |
165 __KTRACE_OPT(KSCHED,DEBUGPRINT("Win32Resume->%T (Resuming diverted thread)",this)); |
173 // identify whether we can use the atomic SignalObjectAndWait API in Win32 for rescheduling |
166 iWakeup = ERelease; |
174 Win32AtomicSOAW = (SignalObjectAndWait(aInit.iScheduleLock, aInit.iScheduleLock, INFINITE, FALSE) == WAIT_OBJECT_0); |
167 ApplyDiversion(); |
175 |
168 TheScheduler.iCurrentThread = this; |
176 // allocate the TLS used for thread identification, and set it for the init thread |
169 __NK_ASSERT_ALWAYS(TInt(ResumeThread(iWinThread)) == 1); |
177 TlsIndex = TlsAlloc(); |
170 break; |
178 __NK_ASSERT_ALWAYS(TlsIndex != TLS_OUT_OF_INDEXES); |
171 case EResume: |
179 SchedulerRegister(aInit); |
172 // The thread is Win32 suspended and must be resumed. |
180 |
173 // |
181 Win32FindNonPreemptibleFunctions(); |
174 // the complication here is that we have to unlock the kernel on behalf of the |
182 Interrupt.Init(); |
175 // pre-empted thread. This means that we have to check to see if there are more DFCs |
183 } |
176 // pending or a reschedule required, as we unlock the kernel. That check is |
184 |
177 // carried out with interrupts disabled. |
185 void SchedulerRegister(NThread& aSelf) |
178 // |
186 { |
179 // If so, we go back around the loop in this thread context |
187 TlsSetValue(TlsIndex, &aSelf); |
180 // |
188 } |
181 // Otherwise, we unlock the kernel (having marked us as not-preempted), |
189 |
182 // enable interrupts and then resume the thread. If pre-emption occurs before the thread |
190 inline NThread* RunningThread() |
183 // is resumed, it is the new thread that is pre-empted, not the running thread, so we are guaranteed |
191 // Returns the NThread actually running |
184 // to be able to call ResumeThread. If pre-emption occurs, and we are rescheduled to run before |
192 { |
185 // that occurs, we will once again be running with the kernel locked and the other thread will |
193 if (TlsIndex == TLS_OUT_OF_INDEXES) |
186 // have been re-suspended by Win32: so all is well. |
194 return NULL; // not yet initialised |
187 // |
195 else |
188 { |
196 return static_cast<NThread*>(TlsGetValue(TlsIndex)); |
189 __KTRACE_OPT(KSCHED,DEBUGPRINT("Win32Resume->%T",this)); |
197 } |
190 TInt irq = NKern::DisableAllInterrupts(); |
198 |
191 if (TheScheduler.iDfcPendingFlag || TheScheduler.iRescheduleNeededFlag) |
199 inline TBool IsScheduledThread() |
192 { |
200 // True if the NThread actually running is the scheduled one (not an interrupt thread or similar) |
193 // we were interrrupted... back to the top |
201 { |
194 TheScheduler.iRescheduleNeededFlag = TRUE; // ensure we do the reschedule |
202 return RunningThread() == TheScheduler.iCurrentThread; |
195 return TRUE; |
203 } |
196 } |
204 |
197 iWakeup = ERelease; |
205 inline NThread& CheckedCurrentThread() |
198 TheScheduler.iCurrentThread = this; |
206 // Returns the NThread actually running, checking that it's the scheduled one (not an interrupt thread or similar) |
199 if (TheScheduler.iProcessHandler) |
207 { |
200 (*ProcessHandler(TheScheduler.iProcessHandler))(iAddressSpace); // threads resumed after interrupt or locks need to have static data updated |
208 NThread* t = RunningThread(); |
201 |
209 __NK_ASSERT_ALWAYS(t == TheScheduler.iCurrentThread); |
202 if (iInKernel == 0 && iUserModeCallbacks != NULL) |
210 return *t; |
203 ApplyDiversion(); |
|
204 else |
|
205 TheScheduler.iKernCSLocked = 0; // have to unlock the kernel on behalf of the new thread |
|
206 |
|
207 TheScheduler.iCurrentThread = this; |
|
208 NKern::RestoreInterrupts(irq); |
|
209 __NK_ASSERT_ALWAYS(TInt(ResumeThread(iWinThread)) > 0); // check thread was previously suspended |
|
210 } |
|
211 break; |
|
212 } |
|
213 return FALSE; |
|
214 } |
211 } |
215 |
212 |
216 static void ThreadExit(NThread& aCurrent, NThread& aNext) |
213 static void ThreadExit(NThread& aCurrent, NThread& aNext) |
217 // |
214 // |
218 // The final context switch of a thread. |
215 // The final context switch of a thread. |
219 // Wake up the next thread and then destroy this one's Win32 resources. |
216 // Wake up the next thread and then destroy this one's Win32 resources. |
220 // |
217 // |
221 // Return without terminating if we need to immediately reschedule again because |
218 // Return without terminating if we need to immediately reschedule again |
222 // we had to unlock the kernel but there are DFCs pending. |
219 // because we had to unlock the kernel but there are DFCs pending. |
223 // |
220 // |
224 { |
221 { |
225 // the thread is dead |
222 // the thread is dead |
226 // extract win32 handles from dying NThread object before rescheduling |
223 // extract win32 handles from dying NThread object before rescheduling |
227 HANDLE sl = aCurrent.iScheduleLock; |
224 HANDLE sl = aCurrent.iScheduleLock; |
236 CloseHandle(sl); |
233 CloseHandle(sl); |
237 CloseHandle(th); |
234 CloseHandle(th); |
238 ExitThread(0); // does not return |
235 ExitThread(0); // does not return |
239 } |
236 } |
240 |
237 |
241 #ifdef MONITOR_THREAD_CPU_TIME |
238 #ifdef MONITOR_THREAD_CPU_TIME |
242 static inline void UpdateThreadCpuTime(NThread& aCurrent, NThread& aNext) |
239 static inline void UpdateThreadCpuTime(NThread& aCurrent, NThread& aNext) |
243 { |
240 { |
244 TUint32 timestamp = NKern::FastCounter(); |
241 TUint32 timestamp = NKern::FastCounter(); |
245 if (aCurrent.iLastStartTime) |
242 if (aCurrent.iLastStartTime) |
246 aCurrent.iTotalCpuTime += timestamp - aCurrent.iLastStartTime; |
243 aCurrent.iTotalCpuTime += timestamp - aCurrent.iLastStartTime; |
247 aNext.iLastStartTime = timestamp; |
244 aNext.iLastStartTime = timestamp; |
248 } |
245 } |
249 #else |
246 #else |
250 static inline void UpdateThreadCpuTime(NThread& /*aCurrent*/, NThread& /*aNext*/) |
247 static inline void UpdateThreadCpuTime(NThread& /*aCurrent*/, NThread& /*aNext*/) |
251 { |
248 { |
252 } |
249 } |
253 #endif |
250 #endif // MONITOR_THREAD_CPU_TIME |
254 |
251 |
255 static void SwitchThreads(NThread& aCurrent, NThread& aNext) |
252 static void SwitchThreads(NThread& aCurrent, NThread& aNext) |
256 // |
253 // |
257 // The fundamental context switch - wake up the next thread and wait for reschedule |
254 // The fundamental context switch - wake up the next thread and wait for reschedule |
258 // trivially is aNext.WakeUp(), Wait(aCurrent.iScheduleLock), but we may be able to |
255 // trivially is aNext.WakeUp(), Wait(aCurrent.iScheduleLock), but we may be able to |
259 // optimise the signal-and-wait |
256 // optimise the signal-and-wait |
260 // |
257 // |
261 { |
258 { |
|
259 __NK_ASSERT_ALWAYS(InterruptsStatus(ETrue)); |
262 UpdateThreadCpuTime(aCurrent, aNext); |
260 UpdateThreadCpuTime(aCurrent, aNext); |
|
261 |
263 if (aCurrent.iNState == NThread::EDead) |
262 if (aCurrent.iNState == NThread::EDead) |
|
263 { |
264 ThreadExit(aCurrent, aNext); |
264 ThreadExit(aCurrent, aNext); |
265 else if (Win32AtomicSOAW && aNext.iWakeup==NThread::ERelease) |
265 // Yes, this is reachable! |
266 { |
266 } |
267 // special case optimization for normally blocked threads using atomic Win32 primitive |
267 else if (Win32AtomicSOAW && aNext.iWakeup == NThread::ERelease) |
|
268 { |
|
269 // special case optimization for normally scheduled threads using atomic Win32 primitive |
268 TheScheduler.iCurrentThread = &aNext; |
270 TheScheduler.iCurrentThread = &aNext; |
269 DWORD result=SignalObjectAndWait(aNext.iScheduleLock,aCurrent.iScheduleLock, INFINITE, FALSE); |
271 CheckedSignalObjectAndWait(aNext.iScheduleLock, aCurrent.iScheduleLock); |
270 if (result != WAIT_OBJECT_0) |
272 } |
271 { |
273 else if (aNext.WakeUp()) |
272 __NK_ASSERT_ALWAYS(result == 0xFFFFFFFF); |
274 { |
273 KPrintf("SignalObjectAndWait() failed with %d (%T->%T)",GetLastError(),&aCurrent,&aNext); |
275 // We didn't wake the target thread; instead we need to re-reschedule in this thread |
274 FAULT(); |
276 __NK_ASSERT_ALWAYS(InterruptsStatus(EFalse)); |
275 } |
277 return; |
276 } |
278 } |
277 else |
279 else |
278 { |
280 { |
279 if (aNext.WakeUp()) |
281 // Target thread woken, now wait to be rescheduled |
280 return; // need to re-reschedule in this thread |
282 CheckedWaitForSingleObject(aCurrent.iScheduleLock); |
281 __NK_ASSERT_ALWAYS(WaitForSingleObject(aCurrent.iScheduleLock, INFINITE) == WAIT_OBJECT_0); |
283 } |
282 } |
284 |
283 } |
285 __NK_ASSERT_ALWAYS(InterruptsStatus(ETrue)); |
284 |
|
285 void TScheduler::YieldTo(NThreadBase*) |
|
286 // |
|
287 // Directed context switch to the nominated thread. |
|
288 // Enter with kernel locked, exit with kernel unlocked but interrupts disabled. |
|
289 // |
|
290 { |
|
291 RescheduleNeeded(); |
|
292 TScheduler::Reschedule(); |
|
293 } |
286 } |
294 |
287 |
295 void TScheduler::Reschedule() |
288 void TScheduler::Reschedule() |
296 // |
289 // |
297 // Enter with kernel locked, exit with kernel unlocked, interrupts disabled. |
290 // Enter with kernel locked, exit with kernel unlocked, interrupts disabled. |
298 // If the thread is dead do not return, but terminate the thread. |
291 // If the thread is dead do not return, but terminate the thread. |
299 // |
292 // |
300 { |
293 { |
301 __NK_ASSERT_ALWAYS(TheScheduler.iKernCSLocked == 1); |
294 __NK_ASSERT_ALWAYS(TheScheduler.iKernCSLocked == 1); |
302 NThread& me = *static_cast<NThread*>(TheScheduler.iCurrentThread); |
295 NThread& me = *static_cast<NThread*>(TheScheduler.iCurrentThread); |
|
296 |
303 for (;;) |
297 for (;;) |
304 { |
298 { |
305 NKern::DisableAllInterrupts(); |
299 NKern::DisableAllInterrupts(); |
306 if (TheScheduler.iDfcPendingFlag) |
300 if (TheScheduler.iDfcPendingFlag) |
307 TheScheduler.QueueDfcs(); |
301 TheScheduler.QueueDfcs(); |
|
302 |
|
303 // Exit from this loop when further rescheduling is no longer needed |
308 if (!TheScheduler.iRescheduleNeededFlag) |
304 if (!TheScheduler.iRescheduleNeededFlag) |
309 break; |
305 break; |
|
306 |
|
307 // Choose the next thread to run, using the Symbian scheduler |
|
308 TheScheduler.iRescheduleNeededFlag = FALSE; |
310 NKern::EnableAllInterrupts(); |
309 NKern::EnableAllInterrupts(); |
311 TheScheduler.iRescheduleNeededFlag = FALSE; |
|
312 NThread* t = static_cast<NThread*>(SelectThread(TheScheduler)); |
310 NThread* t = static_cast<NThread*>(SelectThread(TheScheduler)); |
313 __KTRACE_OPT(KSCHED,DEBUGPRINT("Reschedule->%T (%08x%08x)",t,TheScheduler.iPresent[1],TheScheduler.iPresent[0])); |
311 __KTRACE_OPT(KSCHED, DEBUGPRINT("Reschedule->%T (%08x%08x)", t, TheScheduler.iPresent[1], TheScheduler.iPresent[0])); |
314 #ifdef __EMI_SUPPORT__ |
312 |
315 EMI_AddTaskSwitchEvent(&me,t); |
313 #ifdef __EMI_SUPPORT__ |
|
314 EMI_AddTaskSwitchEvent(&me, t); |
316 EMI_CheckDfcTag(t); |
315 EMI_CheckDfcTag(t); |
317 #endif |
316 #endif |
318 #ifdef BTRACE_CPU_USAGE |
317 #ifdef BTRACE_CPU_USAGE |
319 if(TheScheduler.iCpuUsageFilter) |
318 if (TheScheduler.iCpuUsageFilter) |
320 TheScheduler.iBTraceHandler(BTRACE_HEADER_C(4,BTrace::ECpuUsage,BTrace::ENewThreadContext),0,(TUint32)t,0,0,0,0,0); |
319 TheScheduler.iBTraceHandler(BTRACE_HEADER_C(4, BTrace::ECpuUsage, BTrace::ENewThreadContext), 0, (TUint32)t, 0, 0, 0, 0, 0); |
321 #endif |
320 #endif |
|
321 |
|
322 // SwitchThreads() can return immediately, if it turns out that another reschedule is |
|
323 // necessary; otherwise, this thread will be descheduled in favour of the one selected |
|
324 // above, and SwitchThreads() will only return when this thread is next selected |
322 SwitchThreads(me, *t); |
325 SwitchThreads(me, *t); |
323 |
326 |
324 // we have just been scheduled to run... check for diversion/new Dfcs |
327 // When we start again, we should check for being forced to exit; otherwise go round the |
325 NThread::TDivert divert = me.iDivert; |
328 // loop again to see whether another reschedule is called for (e.g. if there are new DFCs). |
326 if (divert) |
329 NThread::TDivert divertToExit = me.iDivertFn; |
327 { |
330 me.iDivertFn = NULL; |
328 // diversion (e.g. force exit) |
331 if (divertToExit) |
329 me.iDivert = NULL; |
332 divertToExit(); |
330 divert(); // does not return |
333 } |
331 } |
334 |
332 } |
335 // interrupts are disabled, the kernel is still locked |
333 if (TheScheduler.iProcessHandler) |
336 if (TheScheduler.iProcessHandler) |
334 (*ProcessHandler(TheScheduler.iProcessHandler))(me.iAddressSpace); |
337 (*ProcessHandler(TheScheduler.iProcessHandler))(me.iAddressSpace); // thread will need to have its static data updated |
335 // interrrupts are disabled, the kernel is still locked |
338 |
|
339 __NK_ASSERT_ALWAYS(TheScheduler.iKernCSLocked == 1); |
336 TheScheduler.iKernCSLocked = 0; |
340 TheScheduler.iKernCSLocked = 0; |
337 } |
341 } |
|
342 |
|
343 void TScheduler::YieldTo(NThreadBase*) |
|
344 // |
|
345 // Directed context switch to the nominated thread. |
|
346 // Enter with kernel locked, exit with kernel unlocked but interrupts disabled. |
|
347 // |
|
348 { |
|
349 RescheduleNeeded(); |
|
350 TScheduler::Reschedule(); |
|
351 } |
|
352 |
|
353 TBool NThread::WakeUp() |
|
354 // |
|
355 // Wake up the thread. What to do depends on whether it was preempted or voluntarily |
|
356 // rescheduled. |
|
357 // |
|
358 // On entry, the kernel is locked, and interrupts may be enabled or disabled. |
|
359 // |
|
360 // The return value is TRUE if the caller should immediately reschedule again because we |
|
361 // needed to unlock the kernel in order to resume the thread but there were DFCs pending. |
|
362 // In this case, the thread is not woken, the kernel remains locked, and the return is |
|
363 // made with interrupts disabled (whether or not they were on entry). |
|
364 // |
|
365 // Otherise, the target thread is woken up (in any of several different ways), and the |
|
366 // the return value is FALSE. In that case the interrupt status is unchanged; and the |
|
367 // kernel may or not still be locked. |
|
368 // |
|
369 { |
|
370 __NK_ASSERT_ALWAYS(TheScheduler.iKernCSLocked > 0); |
|
371 __NK_ASSERT_ALWAYS(RunningThread() != this); // Can't wake self! |
|
372 |
|
373 switch (iWakeup) |
|
374 { |
|
375 default: |
|
376 FAULT(); |
|
377 |
|
378 case EIdle: |
|
379 // The thread is waiting on its scheduler lock, in Idle() |
|
380 __NK_ASSERT_ALWAYS(TheScheduler.iCurrentThread == this); |
|
381 CheckedSetEvent(iScheduleLock); |
|
382 break; |
|
383 |
|
384 case ERelease: |
|
385 // The thread is waiting on its scheduler lock |
|
386 TheScheduler.iCurrentThread = this; |
|
387 CheckedSetEvent(iScheduleLock); |
|
388 break; |
|
389 |
|
390 case EResumeLocked: |
|
391 // The thread is Win32 suspended and must be resumed. |
|
392 // |
|
393 // A newly created thread does not need the kernel unlocked so we can |
|
394 // just resume it; OTOH it will need to have its static data updated ... |
|
395 // |
|
396 __KTRACE_OPT(KSCHED, DEBUGPRINT("Win32ResumeLocked->%T", this)); |
|
397 iWakeup = ERelease; |
|
398 TheScheduler.iCurrentThread = this; |
|
399 if (TheScheduler.iProcessHandler) |
|
400 (*ProcessHandler(TheScheduler.iProcessHandler))(iAddressSpace); |
|
401 CheckedResumeThread(iWinThread); |
|
402 break; |
|
403 |
|
404 case EResumeDiverted: |
|
405 // The thread is Win32 suspended and must be resumed. |
|
406 // |
|
407 // It does not need the kernel unlocked, but does have a diversion pending. We |
|
408 // know it's safe to divert the thread here because we called IsSafeToPreempt() |
|
409 // when we suspended it - otherwise the diversion could get lost. |
|
410 // |
|
411 __KTRACE_OPT(KSCHED, DEBUGPRINT("Win32Resume->%T (Resuming diverted thread)", this)); |
|
412 iWakeup = ERelease; |
|
413 TheScheduler.iCurrentThread = this; |
|
414 ApplyDiversion(); |
|
415 CheckedResumeThread(iWinThread, ETrue); |
|
416 break; |
|
417 |
|
418 case EResume: |
|
419 // The thread is Win32 suspended and must be resumed. |
|
420 // |
|
421 // The complication here is that we have to unlock the kernel on behalf of the |
|
422 // pre-empted thread. Before doing so, we have to check whether there are DFCs |
|
423 // or a reschedule pending; if so, we don't unlock the kernel or wake the target |
|
424 // thread, but instead return TRUE, so that our caller (usually SwitchThreads() |
|
425 // above) knows to return and go round the TScheduler::Reschedule() loop again. |
|
426 // |
|
427 TInt irq = NKern::DisableAllInterrupts(); |
|
428 if (TheScheduler.iRescheduleNeededFlag || TheScheduler.iDfcPendingFlag) |
|
429 { |
|
430 __KTRACE_OPT(KSCHED, DEBUGPRINT("Win32Resume->%T preempted", this)); |
|
431 TheScheduler.iRescheduleNeededFlag = TRUE; // ensure we do the reschedule |
|
432 return TRUE; |
|
433 } |
|
434 |
|
435 // Otherwise we mark the thread as not-preempted, unlock the kernel, restore |
|
436 // interrupts, and resume the thread. |
|
437 __KTRACE_OPT(KSCHED, DEBUGPRINT("Win32Resume->%T", this)); |
|
438 iWakeup = ERelease; |
|
439 TheScheduler.iCurrentThread = this; |
|
440 if (TheScheduler.iProcessHandler) |
|
441 (*ProcessHandler(TheScheduler.iProcessHandler))(iAddressSpace); // threads resumed after interrupt or locks need to have static data updated |
|
442 TheScheduler.iKernCSLocked = 0; |
|
443 |
|
444 // If there are callbacks waiting, and the thread is in user mode, divert it to |
|
445 // pick up its callbacks (we know this is safe because we called IsSafeToPreempt() |
|
446 // when we suspended it - otherwise the diversion could get lost. |
|
447 if (iUserModeCallbacks != NULL && !iInKernel) |
|
448 { |
|
449 TheScheduler.iKernCSLocked = 1; // prevent further pre-emption |
|
450 ApplyDiversion(); |
|
451 } |
|
452 |
|
453 // If pre-emption occurs before the thread is resumed, it is the new thread that |
|
454 // is pre-empted, not the running thread, so we are guaranteed to be able to call |
|
455 // ResumeThread. If pre-emption occurs, and we are rescheduled to run before that |
|
456 // occurs, we will once again be running with the kernel locked and the other |
|
457 // thread will have been re-suspended by Win32: so all is well. |
|
458 // |
|
459 NKern::RestoreInterrupts(irq); |
|
460 CheckedResumeThread(iWinThread); |
|
461 break; |
|
462 } |
|
463 |
|
464 return FALSE; |
|
465 } |
|
466 |
338 |
467 |
339 /** Put the emulator into 'idle'. |
468 /** Put the emulator into 'idle'. |
340 This is called by the idle thread when there is nothing else to do. |
469 This is called by the idle thread when there is nothing else to do. |
341 |
470 |
342 @internalTechnology |
471 @internalTechnology |
349 // enter and exit with kernel locked |
478 // enter and exit with kernel locked |
350 // |
479 // |
351 { |
480 { |
352 NThread& me = *static_cast<NThread*>(TheScheduler.iCurrentThread); |
481 NThread& me = *static_cast<NThread*>(TheScheduler.iCurrentThread); |
353 me.iWakeup = EIdle; |
482 me.iWakeup = EIdle; |
354 __NK_ASSERT_ALWAYS(WaitForSingleObject(me.iScheduleLock, INFINITE) == WAIT_OBJECT_0); |
483 CheckedWaitForSingleObject(me.iScheduleLock); |
|
484 |
355 // something happened, and we've been prodded by an interrupt |
485 // something happened, and we've been prodded by an interrupt |
356 // the kernel was locked by the interrupt, and now reschedule |
486 // the kernel was locked by the interrupt, and now reschedule |
357 me.iWakeup = ERelease; |
487 me.iWakeup = ERelease; |
358 TScheduler::Reschedule(); |
488 TScheduler::Reschedule(); |
359 NKern::EnableAllInterrupts(); |
489 NKern::EnableAllInterrupts(); |
360 } |
490 } |
361 |
491 |
362 void SchedulerInit(NThread& aInit) |
492 |
363 // |
493 void EnterKernel(TBool aDiversion) |
364 // Initialise the win32 nKern scheduler |
494 { |
365 // |
495 NThread& t = CheckedCurrentThread(); |
366 { |
496 volatile TInt& inKernel = t.iInKernel; |
367 DWORD procaffin,sysaffin; |
497 __NK_ASSERT_DEBUG(inKernel >= 0); |
368 if (GetProcessAffinityMask(GetCurrentProcess(),&procaffin,&sysaffin)) |
498 |
369 { |
499 // This code has to be re-entrant, because a thread that's in the process |
370 DWORD cpu; |
500 // of entering the kernel may be preempted; then if it isn't yet marked |
371 switch (Win32SingleCpu) |
501 // as 'in the kernel' it can be diverted through EnterKernel()/LeaveKernel() |
372 { |
502 // in order to execute user-mode callbacks. However this is all in the |
373 default: |
503 // same thread context, so it doesn't need any special synchronisation. |
374 // bind the emulator to a nominated CPU on the host PC |
504 // The moment of 'entering' the kernel is deemed to occur when the new value |
375 cpu = (1<<Win32SingleCpu); |
505 // of iInKernel is written back to the NThread object. |
376 if (!(sysaffin & cpu)) |
506 if (inKernel++ == 0) |
377 cpu = procaffin; // CPU selection invalid |
507 { |
378 break; |
508 // preamble when coming from userspace |
379 case NThread::ECpuSingle: |
509 __NK_ASSERT_ALWAYS(InterruptsStatus(ETrue)); |
380 // bind the emulator to a single CPU on the host PC, pick one |
510 __NK_ASSERT_ALWAYS(t.iHeldFastMutex == 0); |
381 cpu = procaffin ^ (procaffin & (procaffin-1)); |
511 if (aDiversion) |
382 break; |
512 { |
383 case NThread::ECpuAll: |
513 // Forced entry, to make thread exit or run user-mode callbacks |
384 // run the emulator on all CPUs on the host PC |
514 // If exiting, iCsCount will have been set to 1 to prevent preemption |
385 cpu=sysaffin; |
515 // Otherwise it must be 0, as in the non-diversion case |
386 break; |
516 __NK_ASSERT_ALWAYS(t.iCsCount <= 1); |
387 } |
517 __NK_ASSERT_ALWAYS(TheScheduler.iKernCSLocked == 1); |
388 SetProcessAffinityMask(GetCurrentProcess(), cpu); |
518 } |
389 } |
519 else |
390 // identify if we can use the atomic SignalObjectAndWait API in Win32 for rescheduling |
520 { |
391 Win32AtomicSOAW = (SignalObjectAndWait(aInit.iScheduleLock, aInit.iScheduleLock, INFINITE, FALSE) == WAIT_OBJECT_0); |
521 __NK_ASSERT_ALWAYS(t.iCsCount == 0); |
392 // |
522 __NK_ASSERT_ALWAYS(TheScheduler.iKernCSLocked == 0); |
393 // allocate the TLS used for thread identification, and set it for the init thread |
523 } |
394 TlsIndex = TlsAlloc(); |
524 } |
395 __NK_ASSERT_ALWAYS(TlsIndex != TLS_OUT_OF_INDEXES); |
525 } |
396 SchedulerRegister(aInit); |
526 |
397 // |
527 void LeaveKernel() |
398 Interrupt.Init(); |
528 { |
399 |
529 NThread& t = CheckedCurrentThread(); |
400 Win32FindNonPreemptibleFunctions(); |
530 volatile TInt& inKernel = t.iInKernel; |
401 } |
531 __NK_ASSERT_DEBUG(inKernel > 0); |
402 |
532 |
403 void SchedulerRegister(NThread& aSelf) |
533 // This code has to be re-entrant, because a thread that's in the process |
404 { |
534 // of leaving the kernel may be preempted; then if it isn't still marked |
405 TlsSetValue(TlsIndex,&aSelf); |
535 // as 'in the kernel' it can be diverted through EnterKernel()/LeaveKernel() |
406 } |
536 // in order to execute user-mode callbacks. However this is all in the |
407 |
537 // same thread context, so it doesn't need any special synchronisation. |
408 NThread* SchedulerThread() |
538 // The moment of 'leaving' the kernel is deemed to occur when the new value |
409 { |
539 // of iInKernel is written back to the NThread object. |
410 if (TlsIndex != TLS_OUT_OF_INDEXES) |
540 if (inKernel == 1) |
411 return static_cast<NThread*>(TlsGetValue(TlsIndex)); |
541 { |
412 else |
542 // postamble when about to return to userspace |
413 return NULL; // not yet initialised |
543 __NK_ASSERT_ALWAYS(t.iCsCount == 0); |
414 } |
544 __NK_ASSERT_ALWAYS(t.iHeldFastMutex == 0); |
415 |
545 __NK_ASSERT_ALWAYS(TheScheduler.iKernCSLocked == 0); |
416 inline TBool IsScheduledThread() |
546 NKern::DisableAllInterrupts(); |
417 { |
547 t.CallUserModeCallbacks(); |
418 return SchedulerThread() == TheScheduler.iCurrentThread; |
548 NKern::EnableAllInterrupts(); |
419 } |
549 } |
420 |
550 |
421 NThread& CheckedCurrentThread() |
551 inKernel -= 1; |
422 { |
552 } |
423 NThread* t = SchedulerThread(); |
553 |
424 __NK_ASSERT_ALWAYS(t == TheScheduler.iCurrentThread); |
554 /** Locks the kernel and returns a pointer to the current thread |
425 return *t; |
555 Increments iKernCSLocked, thereby deferring IDFCs and preemption. |
426 } |
556 |
427 |
557 @pre Call either in a thread or an IDFC context. |
428 |
558 @pre Do not call from an ISR. |
429 /** Disable normal 'interrupts'. |
559 @pre Do not call from bare Win32 threads. |
430 |
|
431 @param aLevel Ignored |
|
432 @return Cookie to be passed into RestoreInterrupts() |
|
433 */ |
560 */ |
434 EXPORT_C TInt NKern::DisableInterrupts(TInt /*aLevel*/) |
561 EXPORT_C NThread* NKern::LockC() |
435 { |
562 { |
436 return Interrupt.Mask(); |
563 CHECK_PRECONDITIONS(MASK_NOT_ISR, "NKern::LockC"); |
437 } |
564 __ASSERT_WITH_MESSAGE_ALWAYS(IsScheduledThread(), "Do not call from bare Win32 threads", "NKern::LockC"); // check that we are a scheduled thread |
438 |
565 ++TheScheduler.iKernCSLocked; |
439 |
566 return (NThread*)TheScheduler.iCurrentThread; |
440 /** Disable all maskable 'interrupts'. |
567 } |
441 |
568 |
442 @return Cookie to be passed into RestoreInterrupts() |
569 /** Locks the kernel. |
|
570 |
|
571 Increments iKernCSLocked, thereby deferring IDFCs and preemption. |
|
572 |
|
573 @pre Call either in a thread or an IDFC context. |
|
574 @pre Do not call from an ISR. |
|
575 @pre Do not call from bare Win32 threads. |
443 */ |
576 */ |
444 EXPORT_C TInt NKern::DisableAllInterrupts() |
577 EXPORT_C void NKern::Lock() |
445 { |
578 { |
446 return Interrupt.Mask(); |
579 CHECK_PRECONDITIONS(MASK_NOT_ISR, "NKern::Lock"); |
447 } |
580 __ASSERT_WITH_MESSAGE_ALWAYS(IsScheduledThread(), "Do not call from bare Win32 threads", "NKern::Lock"); // check that we are a scheduled thread |
448 |
581 ++TheScheduler.iKernCSLocked; |
449 |
582 } |
450 /** Enable all maskable 'interrupts' |
|
451 |
|
452 @internalComponent |
|
453 */ |
|
454 EXPORT_C void NKern::EnableAllInterrupts() |
|
455 { |
|
456 Interrupt.Restore(0); |
|
457 } |
|
458 |
|
459 |
|
460 /** Restore interrupt mask to state preceding a DisableInterrupts() call |
|
461 |
|
462 @param aLevel Cookie returned by Disable(All)Interrupts() |
|
463 */ |
|
464 EXPORT_C void NKern::RestoreInterrupts(TInt aLevel) |
|
465 { |
|
466 Interrupt.Restore(aLevel); |
|
467 } |
|
468 |
|
469 |
583 |
470 /** Unlocks the kernel. |
584 /** Unlocks the kernel. |
471 |
585 |
472 Decrements iKernCSLocked; if it becomes zero and IDFCs or a reschedule are |
586 Decrements iKernCSLocked; if it would become zero and IDFCs or a reschedule are |
473 pending, calls the scheduler to process them. |
587 pending, calls the scheduler to process them. |
474 |
588 |
475 @pre Call either in a thread or an IDFC context. |
589 @pre Call either in a thread or an IDFC context. |
476 @pre Do not call from an ISR. |
590 @pre Do not call from an ISR. |
477 @pre Do not call from bare Win32 threads. |
591 @pre Do not call from bare Win32 threads. |
478 */ |
592 */ |
479 EXPORT_C void NKern::Unlock() |
593 EXPORT_C void NKern::Unlock() |
480 // |
594 { |
481 // using this coding sequence it is possible to call Reschedule unnecessarily |
595 // check that the caller is the scheduled thread |
482 // if we are preempted after testing the flags (lock is zero at this point). |
596 __ASSERT_WITH_MESSAGE_DEBUG(IsScheduledThread(), "Do not call from bare Win32 threads", "NKern::Unlock"); |
483 // However, in the common case this is much faster because 'disabling interrupts' |
597 CHECK_PRECONDITIONS(MASK_NOT_ISR, "NKern::Unlock"); |
484 // can be very expensive. |
|
485 // |
|
486 { |
|
487 CHECK_PRECONDITIONS(MASK_NOT_ISR,"NKern::Unlock"); |
|
488 __ASSERT_WITH_MESSAGE_DEBUG(IsScheduledThread(),"Do not call from bare Win32 threads","NKern::Unlock"); // check that we are a scheduled thread |
|
489 __NK_ASSERT_ALWAYS(TheScheduler.iKernCSLocked > 0); // Can't unlock if it isn't locked! |
598 __NK_ASSERT_ALWAYS(TheScheduler.iKernCSLocked > 0); // Can't unlock if it isn't locked! |
490 if (--TheScheduler.iKernCSLocked == 0) |
599 |
491 { |
600 // Rather than decrementing the lock before testing the flags, and then |
492 if (TheScheduler.iRescheduleNeededFlag || TheScheduler.iDfcPendingFlag) |
601 // re-incrementing it in order to call Reschedule() -- which would |
493 { |
602 // leave a window for preemption -- we can test the flags first, and then |
494 TheScheduler.iKernCSLocked = 1; |
603 // see whether the lock count is 1 ... |
495 TScheduler::Reschedule(); |
604 if ((TheScheduler.iRescheduleNeededFlag || TheScheduler.iDfcPendingFlag) && |
496 NKern::EnableAllInterrupts(); |
605 TheScheduler.iKernCSLocked == 1) |
497 } |
606 { |
498 } |
607 // Reschedule() returns with the kernel unlocked, but interrupts disabled |
499 } |
608 TScheduler::Reschedule(); |
500 |
609 NKern::EnableAllInterrupts(); |
501 |
610 } |
502 /** Locks the kernel. |
611 else |
503 |
612 { |
504 Increments iKernCSLocked, thereby deferring IDFCs and preemption. |
613 // All other cases - just decrement the lock count |
505 |
614 TheScheduler.iKernCSLocked -= 1; |
506 @pre Call either in a thread or an IDFC context. |
615 } |
507 @pre Do not call from an ISR. |
|
508 @pre Do not call from bare Win32 threads. |
|
509 */ |
|
510 EXPORT_C void NKern::Lock() |
|
511 { |
|
512 CHECK_PRECONDITIONS(MASK_NOT_ISR,"NKern::Lock"); |
|
513 __ASSERT_WITH_MESSAGE_ALWAYS(IsScheduledThread(),"Do not call from bare Win32 threads","NKern::Lock"); // check that we are a scheduled thread |
|
514 ++TheScheduler.iKernCSLocked; |
|
515 } |
|
516 |
|
517 |
|
518 /** Locks the kernel and returns a pointer to the current thread |
|
519 Increments iKernCSLocked, thereby deferring IDFCs and preemption. |
|
520 |
|
521 @pre Call either in a thread or an IDFC context. |
|
522 @pre Do not call from an ISR. |
|
523 @pre Do not call from bare Win32 threads. |
|
524 */ |
|
525 EXPORT_C NThread* NKern::LockC() |
|
526 { |
|
527 CHECK_PRECONDITIONS(MASK_NOT_ISR,"NKern::Lock"); |
|
528 __ASSERT_WITH_MESSAGE_ALWAYS(IsScheduledThread(),"Do not call from bare Win32 threads","NKern::Lock"); // check that we are a scheduled thread |
|
529 ++TheScheduler.iKernCSLocked; |
|
530 return (NThread*)TheScheduler.iCurrentThread; |
|
531 } |
616 } |
532 |
617 |
533 |
618 |
534 /** Allows IDFCs and rescheduling if they are pending. |
619 /** Allows IDFCs and rescheduling if they are pending. |
535 |
620 |
536 If IDFCs or a reschedule are pending and iKernCSLocked is exactly equal to 1 |
621 If IDFCs or a reschedule are pending and iKernCSLocked is exactly equal to 1 |
537 calls the scheduler to process the IDFCs and possibly reschedule. |
622 calls the scheduler to process the IDFCs and possibly reschedule. |
538 |
623 |
539 @return Nonzero if a reschedule actually occurred, zero if not. |
624 @return Nonzero if a reschedule actually occurred, zero if not. |
540 |
625 |
541 @pre Call either in a thread or an IDFC context. |
626 @pre Call either in a thread or an IDFC context. |
542 @pre Do not call from an ISR. |
627 @pre Do not call from an ISR. |
543 @pre Do not call from bare Win32 threads. |
628 @pre Do not call from bare Win32 threads. |
544 */ |
629 */ |
545 EXPORT_C TInt NKern::PreemptionPoint() |
630 EXPORT_C TInt NKern::PreemptionPoint() |
546 { |
631 { |
547 CHECK_PRECONDITIONS(MASK_NOT_ISR,"NKern::PreemptionPoint"); |
632 // check that the caller is the scheduled thread |
548 __ASSERT_WITH_MESSAGE_DEBUG(IsScheduledThread(),"Do not call from bare Win32 threads","NKern::PreemptionPoint"); // check that we are a scheduled thread |
633 __ASSERT_WITH_MESSAGE_DEBUG(IsScheduledThread(), "Do not call from bare Win32 threads", "NKern::PreemptionPoint"); |
549 if (TheScheduler.iKernCSLocked == 1 && |
634 CHECK_PRECONDITIONS(MASK_NOT_ISR, "NKern::PreemptionPoint"); |
550 (TheScheduler.iRescheduleNeededFlag || TheScheduler.iDfcPendingFlag)) |
635 |
551 { |
636 if ((TheScheduler.iRescheduleNeededFlag || TheScheduler.iDfcPendingFlag) && |
|
637 TheScheduler.iKernCSLocked == 1) |
|
638 { |
|
639 // Reschedule() returns with the kernel unlocked, but interrupts disabled |
552 TScheduler::Reschedule(); |
640 TScheduler::Reschedule(); |
553 TheScheduler.iKernCSLocked = 1; |
641 TheScheduler.iKernCSLocked = 1; |
554 NKern::EnableAllInterrupts(); |
642 NKern::EnableAllInterrupts(); |
555 return TRUE; |
643 return TRUE; |
556 } |
644 } |
|
645 |
557 return FALSE; |
646 return FALSE; |
558 } |
647 } |
559 |
648 |
|
649 /** Return the current processor context type |
|
650 (thread, IDFC, interrupt or escaped thread) |
|
651 |
|
652 @return A value from NKern::TContext enumeration (including EEscaped) |
|
653 @pre Any context |
|
654 |
|
655 @see NKern::TContext |
|
656 */ |
|
657 EXPORT_C TInt NKern::CurrentContext() |
|
658 { |
|
659 NThread* t = RunningThread(); |
|
660 |
|
661 if (!t) |
|
662 return NKern::EInterrupt; |
|
663 |
|
664 if (TheScheduler.iInIDFC) |
|
665 return NKern::EIDFC; |
|
666 |
|
667 if (t->iWakeup == NThread::EEscaped) |
|
668 return NKern::EEscaped; |
|
669 |
|
670 __NK_ASSERT_ALWAYS(NKern::Crashed() || t == TheScheduler.iCurrentThread); |
|
671 return NKern::EThread; |
|
672 } |
|
673 |
|
674 |
|
675 /** Disable normal 'interrupts'. |
|
676 |
|
677 @param aLevel Ignored |
|
678 @pre Call in a Symbian (thread, IDFC, ISR) context. |
|
679 @pre Do not call from bare Win32 threads. |
|
680 @return Cookie to be passed into RestoreInterrupts() |
|
681 */ |
|
682 EXPORT_C TInt NKern::DisableInterrupts(TInt /*aLevel*/) |
|
683 { |
|
684 return Interrupt.MaskInterrupts(EFalse); |
|
685 } |
|
686 |
|
687 /** Restore interrupt mask to state preceding a DisableInterrupts() call |
|
688 |
|
689 @param aLevel Cookie returned by Disable(All)Interrupts() |
|
690 @pre Call in a Symbian (thread, IDFC, ISR) context. |
|
691 @pre Do not call from bare Win32 threads. |
|
692 */ |
|
693 EXPORT_C void NKern::RestoreInterrupts(TInt aLevel) |
|
694 { |
|
695 Interrupt.RestoreInterruptMask(aLevel); |
|
696 } |
|
697 |
|
698 /** Disable all maskable 'interrupts'. |
|
699 |
|
700 @pre Call in a Symbian (thread, IDFC, ISR) context. |
|
701 @pre Do not call from bare Win32 threads. |
|
702 @return Cookie to be passed into RestoreInterrupts() |
|
703 */ |
|
704 EXPORT_C TInt NKern::DisableAllInterrupts() |
|
705 { |
|
706 return Interrupt.MaskInterrupts(EFalse); |
|
707 } |
|
708 |
|
709 /** Enable all maskable 'interrupts' |
|
710 |
|
711 @internalComponent |
|
712 @pre Call in a Symbian (thread, IDFC, ISR) context. |
|
713 @pre Do not call from bare Win32 threads. |
|
714 */ |
|
715 EXPORT_C void NKern::EnableAllInterrupts() |
|
716 { |
|
717 Interrupt.RestoreInterruptMask(0); |
|
718 } |
560 |
719 |
561 /** Mark the start of an 'interrupt' in the Win32 emulator. |
720 /** Mark the start of an 'interrupt' in the Win32 emulator. |
562 This must be called in interrupt threads before using any other kernel APIs, |
721 This must be called in interrupt threads before using any other kernel APIs, |
563 and should be paired with a call to EndOfInterrupt(). |
722 and should be paired with a call to EndOfInterrupt(). |
564 |
723 |
565 @pre Win32 'interrupt' thread context |
724 @pre Win32 'interrupt' thread context |
566 */ |
725 */ |
567 EXPORT_C void StartOfInterrupt() |
726 EXPORT_C void StartOfInterrupt() |
568 { |
727 { |
569 __ASSERT_WITH_MESSAGE_DEBUG(!IsScheduledThread(),"Win32 'interrupt' thread context","StartOfInterrupt"); // check that we are a scheduled thread |
728 // check that the caller is not a scheduled thread |
570 Interrupt.Begin(); |
729 __ASSERT_WITH_MESSAGE_DEBUG(!IsScheduledThread(), "Win32 'interrupt' thread context", "StartOfInterrupt"); |
571 } |
730 Interrupt.BeginInterrupt(); |
572 |
731 } |
573 |
732 |
574 /** Mark the end of an 'interrupt' in the Win32 emulator. |
733 /** Mark the end of an 'interrupt' in the Win32 emulator. |
575 This checks to see if we need to reschedule. |
734 This checks to see if we need to reschedule. |
576 |
735 |
577 @pre Win32 'interrupt' thread context |
736 @pre Win32 'interrupt' thread context |
578 */ |
737 */ |
579 EXPORT_C void EndOfInterrupt() |
738 EXPORT_C void EndOfInterrupt() |
580 { |
739 { |
581 __ASSERT_WITH_MESSAGE_DEBUG(!IsScheduledThread(),"Win32 'interrupt' thread context","EndOfInterrupt"); // check that we are a scheduled thread |
740 // check that the caller is not a scheduled thread |
582 Interrupt.End(); |
741 __ASSERT_WITH_MESSAGE_DEBUG(!IsScheduledThread(), "Win32 'interrupt' thread context", "EndOfInterrupt"); |
583 } |
742 Interrupt.EndInterrupt(); |
584 |
743 } |
|
744 |
|
745 |
|
746 // The Win32Interrupt class manages virtual interrupts from Win32 event threads |
585 |
747 |
586 void Win32Interrupt::Init() |
748 void Win32Interrupt::Init() |
587 { |
749 { |
588 iQ=CreateSemaphoreA(NULL, 0, KMaxTInt, NULL); |
750 InitializeCriticalSection(&iCS); |
|
751 iQ = CreateSemaphoreA(NULL, 0, KMaxTInt, NULL); |
589 __NK_ASSERT_ALWAYS(iQ); |
752 __NK_ASSERT_ALWAYS(iQ); |
590 // |
753 |
591 // create the NThread which exists solely to service reschedules for interrupts |
754 // create the NThread which exists solely to service reschedules for interrupts |
592 // this makes the End() much simpler as it merely needs to kick this thread |
755 // this makes the End() much simpler as it merely needs to kick this thread |
593 SNThreadCreateInfo ni; |
756 SNThreadCreateInfo ni; |
594 memclr(&ni, sizeof(ni)); |
757 memclr(&ni, sizeof(ni)); |
595 ni.iFunction=&Reschedule; |
758 ni.iFunction = &SchedulerThreadFunction; |
596 ni.iTimeslice=-1; |
759 ni.iTimeslice = -1; |
597 ni.iPriority=1; |
760 ni.iPriority = 1; |
598 NKern::ThreadCreate(&iScheduler, ni); |
761 NKern::ThreadCreate(&iScheduler, ni); |
599 NKern::Lock(); |
762 NKern::Lock(); |
600 TScheduler::YieldTo(&iScheduler); |
763 TScheduler::YieldTo(&iScheduler); |
601 Restore(0); |
764 RestoreInterruptMask(0); |
602 } |
765 } |
603 |
766 |
604 TInt Win32Interrupt::Mask() |
767 void Win32Interrupt::BeginInterrupt() |
|
768 { |
|
769 __NK_ASSERT_ALWAYS(!IsScheduledThread()); // check that we aren't a scheduled thread |
|
770 MaskInterrupts(ETrue); // suspend scheduled thread and set mask |
|
771 #ifdef BTRACE_CPU_USAGE |
|
772 BTrace0(BTrace::ECpuUsage, BTrace::EIrqStart); |
|
773 #endif |
|
774 } |
|
775 |
|
776 void Win32Interrupt::EndInterrupt() |
|
777 { |
|
778 NThread* pC = iInterrupted; |
|
779 iInterrupted = 0; |
|
780 __NK_ASSERT_ALWAYS(pC == TheScheduler.iCurrentThread); // unchanged since BeginInterrupt() |
|
781 __NK_ASSERT_ALWAYS(!IsScheduledThread()); // check that we aren't a scheduled thread |
|
782 __NK_ASSERT_ALWAYS(iOwner == GetCurrentThreadId()); // check we are the interrupting thread |
|
783 __NK_ASSERT_ALWAYS(InterruptsStatus(EFalse)); |
|
784 __NK_ASSERT_ALWAYS(iLevel == 1); // DSG: is this correct? |
|
785 |
|
786 if (TheScheduler.iKernCSLocked) |
|
787 { |
|
788 // No rescheduling allowed; just resume the interrupted thread |
|
789 NKern::EnableAllInterrupts(); |
|
790 CheckedResumeThread(pC->iWinThread); |
|
791 return; |
|
792 } |
|
793 |
|
794 __NK_ASSERT_ALWAYS(iLevel == 1); // DSG: is this correct? |
|
795 __NK_ASSERT_ALWAYS(TheScheduler.iKernCSLocked == 0); |
|
796 |
|
797 TBool diversionUnsafe = EFalse; // Optimistic assumption until checked |
|
798 if (TheScheduler.iRescheduleNeededFlag || TheScheduler.iDfcPendingFlag) |
|
799 { |
|
800 switch (pC->iWakeup) |
|
801 { |
|
802 default: |
|
803 FAULT(); |
|
804 |
|
805 case NThread::EIdle: |
|
806 // wake up the Idle thread, it will always reschedule immediately |
|
807 TheScheduler.iKernCSLocked = 1; // prevent further pre-emption |
|
808 if (pC->WakeUp()) |
|
809 FAULT(); // this can't happen |
|
810 NKern::EnableAllInterrupts(); |
|
811 CheckedResumeThread(pC->iWinThread); |
|
812 return; |
|
813 |
|
814 case NThread::ERelease: |
|
815 if (pC->IsSafeToPreempt()) |
|
816 { |
|
817 // pre-empt the current thread and poke the 'scheduler' thread |
|
818 UpdateThreadCpuTime(*pC, iScheduler); |
|
819 pC->iWakeup = NThread::EResume; // how to wake this thread later |
|
820 TheScheduler.iKernCSLocked = 1; // prevent further pre-emption |
|
821 RescheduleNeeded(); |
|
822 NKern::EnableAllInterrupts(); |
|
823 if (iScheduler.WakeUp()) |
|
824 FAULT(); // this can't happen |
|
825 return; |
|
826 } |
|
827 |
|
828 diversionUnsafe = ETrue; // don't consider diverting |
|
829 break; |
|
830 } |
|
831 } |
|
832 |
|
833 #ifdef BTRACE_CPU_USAGE |
|
834 // no thread reschedle, so emit trace... |
|
835 BTrace0(BTrace::ECpuUsage, BTrace::EIrqEnd); |
|
836 #endif |
|
837 |
|
838 // If there are callbacks waiting, and the thread is in user mode, and it's at a |
|
839 // point where it can safely be preempted, then divert it to pick up its callbacks |
|
840 if (pC->iUserModeCallbacks != NULL && !pC->iInKernel && !diversionUnsafe) |
|
841 if (pC->IsSafeToPreempt()) |
|
842 { |
|
843 TheScheduler.iKernCSLocked = 1; |
|
844 pC->ApplyDiversion(); |
|
845 } |
|
846 |
|
847 NKern::EnableAllInterrupts(); |
|
848 CheckedResumeThread(pC->iWinThread); |
|
849 } |
|
850 |
|
851 |
|
852 TInt Win32Interrupt::MaskInterrupts(TBool aPreempt) |
605 { |
853 { |
606 if (!iQ) |
854 if (!iQ) |
607 return 0; // interrupt scheme not enabled yet |
855 return 0; // interrupt scheme not enabled yet |
608 DWORD id=GetCurrentThreadId(); |
856 |
609 if (__e32_atomic_add_ord32(&iLock, 1)) |
857 EnterCriticalSection(&iCS); // Win32 critical section, not a Symbian one |
610 { |
858 |
611 if (id==iOwner) |
859 DWORD id = GetCurrentThreadId(); |
612 return iLevel++; |
860 if (iOwner == id) |
613 __NK_ASSERT_ALWAYS(WaitForSingleObject(iQ,INFINITE) == WAIT_OBJECT_0); |
861 { |
614 iRescheduleOnExit=IsScheduledThread() && |
862 // The easiest case: we already own the mask, so just increment the level. |
615 (TheScheduler.iRescheduleNeededFlag || TheScheduler.iDfcPendingFlag); |
863 // The requirement for rescheduling on exit is unaffected. |
616 } |
864 __NK_ASSERT_ALWAYS(!aPreempt); |
617 else |
865 TInt r = iLevel++; |
618 iRescheduleOnExit=FALSE; |
866 LeaveCriticalSection(&iCS); |
619 __NK_ASSERT_ALWAYS(iOwner==0 && iLevel==0); |
867 return r; |
620 iOwner=id; |
868 } |
621 iLevel=1; |
869 |
622 return 0; |
870 if (!iOwner && !aPreempt) |
623 } |
871 { |
624 |
872 // Another easy case; we've been called from a Symbian thread, and there's |
625 void Win32Interrupt::Restore(TInt aLevel) |
873 // no contention, so we can just take ownership of the interrupt mask. No |
|
874 // rescheduling is required on exit (but this may change) ... |
|
875 __NK_ASSERT_ALWAYS(iLevel == 0); |
|
876 TInt r = iLevel++; |
|
877 iOwner = id; |
|
878 iRescheduleOnExit = EFalse; |
|
879 LeaveCriticalSection(&iCS); |
|
880 return r; |
|
881 } |
|
882 |
|
883 if (iOwner) |
|
884 { |
|
885 // Someone else owns it; if we've been called from an interrupt thread, |
|
886 // this could be another interrupt thread or a Symbian thread. If we're |
|
887 // being called from a Symbian thread, the owner must be another Symbian |
|
888 // thread, because a Symbian thread can't preempt an interrupt thread. |
|
889 // |
|
890 // In either case, we can increment the count of waiters, then wait for the |
|
891 // curent holder to release it. Note that another (interrupt) thread could |
|
892 // also do this, and then the order in which they get to run is undefined. |
|
893 iWaiting += 1; |
|
894 |
|
895 do |
|
896 { |
|
897 __NK_ASSERT_ALWAYS(iWaiting > 0); |
|
898 LeaveCriticalSection(&iCS); |
|
899 CheckedWaitForSingleObject(iQ); |
|
900 EnterCriticalSection(&iCS); |
|
901 __NK_ASSERT_ALWAYS(iWaiting > 0); |
|
902 } |
|
903 while (iOwner); |
|
904 |
|
905 iWaiting -= 1; |
|
906 iRescheduleOnExit = IsScheduledThread() && (TheScheduler.iRescheduleNeededFlag || TheScheduler.iDfcPendingFlag); |
|
907 } |
|
908 |
|
909 // Nobody now controls the interrupt mask ... |
|
910 __NK_ASSERT_ALWAYS(iOwner == 0 && iLevel == 0); |
|
911 |
|
912 if (aPreempt) |
|
913 { |
|
914 // ... but in this case, we've been called from an interrupt thread and |
|
915 // a Symbian thread may still be running -- yes, even though all emulator |
|
916 // threads are normally bound to a single CPU! |
|
917 // |
|
918 // To ensure that such a thread doesn't see an inconsistent state, we |
|
919 // have to suspend it before we actually take ownership, as it could |
|
920 // examine the interrupt state at any time, without taking any locks. |
|
921 |
|
922 __NK_ASSERT_ALWAYS(iInterrupted == 0); // we haven't done this already |
|
923 NThread* pC; |
|
924 for (;;) |
|
925 { |
|
926 pC = static_cast<NThread*>(TheScheduler.iCurrentThread); |
|
927 CheckedSuspendThread(pC->iWinThread); |
|
928 if (pC == TheScheduler.iCurrentThread) |
|
929 break; // no change of thread, so ok to proceed |
|
930 |
|
931 // We suspended the thread while doing a (Symbian) context switch! |
|
932 // The scheduler state might be inconsistent if we left it like that, |
|
933 // so instead we'll resume it, then try again ... |
|
934 CheckedResumeThread(pC->iWinThread); |
|
935 } |
|
936 |
|
937 __NK_ASSERT_ALWAYS(iInterrupted == 0); |
|
938 iInterrupted = pC; |
|
939 } |
|
940 |
|
941 // Now we can assert ownership of the interrupt mask. |
|
942 __NK_ASSERT_ALWAYS(iOwner == 0 && iLevel == 0); |
|
943 TInt r = iLevel++; |
|
944 iOwner = id; |
|
945 LeaveCriticalSection(&iCS); |
|
946 return r; |
|
947 } |
|
948 |
|
949 void Win32Interrupt::RestoreInterruptMask(TInt aLevel) |
626 { |
950 { |
627 if (!iQ) |
951 if (!iQ) |
628 return; // interrupt scheme not enabled yet |
952 return; // interrupt scheme not enabled yet |
629 DWORD id=GetCurrentThreadId(); |
953 |
|
954 DWORD id = GetCurrentThreadId(); |
|
955 EnterCriticalSection(&iCS); // Win32 critical section, not a Symbian one |
|
956 |
630 for (;;) |
957 for (;;) |
631 { |
958 { |
632 __NK_ASSERT_ALWAYS(id == iOwner); |
959 __NK_ASSERT_ALWAYS(id == iOwner); // only the current owner may do this |
633 TInt count = iLevel - aLevel; |
960 TInt count = iLevel - aLevel; |
634 if (count <= 0) |
961 if (count <= 0) |
635 return; // alredy restored to that level |
962 break; // already restored to that level |
636 TBool reschedule = FALSE; |
963 |
637 iLevel = aLevel; // update this value before releasing the lock |
964 iLevel = aLevel; // update the recursion level first |
638 if (aLevel == 0) |
965 if (aLevel > 0) |
639 { |
966 { |
640 // we release the lock |
967 // The easiest case: we're still holding ownership, so there's nothing to do |
641 iOwner = 0; |
968 break; |
642 if (iRescheduleOnExit && TheScheduler.iKernCSLocked == 0) |
969 } |
643 reschedule = TRUE; // need to trigger reschedule on full release |
970 |
644 } |
971 iOwner = 0; // give up ownership |
645 // now release the lock |
972 if (iWaiting) |
646 if (__e32_atomic_add_ord32(&iLock, TUint32(-count)) == (TUint32)count) |
973 { |
647 { // fully released, check for reschedule |
974 // Someone else is waiting for control of the interrupt mask. |
648 if (!reschedule) |
975 // They may preempt us as soon as we exit the critical section |
649 return; |
976 // (at the end of this function) |
650 } |
977 CheckedReleaseSemaphore(iQ); |
651 else |
978 break; |
652 { // not fully released |
979 } |
653 if (aLevel == 0) |
980 |
654 __NK_ASSERT_ALWAYS(ReleaseSemaphore(iQ,1,NULL)); |
981 // Lock fully released, no-one waiting, so see whether we need to reschedule |
655 return; |
982 if (TheScheduler.iKernCSLocked || !iRescheduleOnExit) |
656 } |
983 break; |
657 // unlocked everything but a reschedule may be required |
984 |
|
985 // Interrupt mask fully unlocked, but reschedule required ... |
658 TheScheduler.iKernCSLocked = 1; |
986 TheScheduler.iKernCSLocked = 1; |
|
987 LeaveCriticalSection(&iCS); |
659 TScheduler::Reschedule(); |
988 TScheduler::Reschedule(); |
660 // return with the kernel unlocked, but interrupts disabled |
989 EnterCriticalSection(&iCS); |
661 // instead of going recursive with a call to EnableAllInterrupts() we iterate |
990 |
662 aLevel=0; |
991 // Note: TScheduler::Reschedule() above calls MaskInterrupts() -- which changes |
663 } |
992 // the state of most of our member data. It returns with the kernel unlocked, |
664 } |
993 // but interrupts still disabled. Hence we will have reacquired ownership of the |
665 |
994 // interrupt mask, and must release it again. Instead of going recursive with a |
666 void Win32Interrupt::Begin() |
995 // call to EnableAllInterrupts() we iterate; we'll get out of this loop eventually, |
667 { |
996 // because iRescheduleOnExit is updated by MaskInterrupts() ... |
668 Mask(); |
997 aLevel = 0; |
669 __NK_ASSERT_ALWAYS(iInterrupted==0); // check we haven't done this already |
998 } |
670 __NK_ASSERT_ALWAYS(!IsScheduledThread()); // check that we aren't a scheduled thread |
999 |
671 NThread* pC; |
1000 LeaveCriticalSection(&iCS); |
672 for (;;) |
1001 } |
673 { |
1002 |
674 pC=static_cast<NThread*>(TheScheduler.iCurrentThread); |
1003 void Win32Interrupt::ForceReschedule() |
675 DWORD r=SuspendThread(pC->iWinThread); |
1004 { |
676 if (pC == TheScheduler.iCurrentThread) |
1005 RescheduleNeeded(); |
677 { |
1006 if (iScheduler.WakeUp()) |
678 // there was no race while suspending the thread, so we can carry on |
1007 FAULT(); // this can't happen |
679 __NK_ASSERT_ALWAYS(r != 0xffffffff); |
1008 } |
680 break; |
1009 |
681 } |
1010 void Win32Interrupt::SchedulerThreadFunction(TAny*) |
682 // We suspended the thread while doing a context switch, resume it and try again |
|
683 if (r != 0xffffffff) |
|
684 __NK_ASSERT_ALWAYS(TInt(ResumeThread(pC->iWinThread)) > 0); // check thread was previously suspended |
|
685 } |
|
686 #ifdef BTRACE_CPU_USAGE |
|
687 BTrace0(BTrace::ECpuUsage,BTrace::EIrqStart); |
|
688 #endif |
|
689 iInterrupted = pC; |
|
690 } |
|
691 |
|
692 void Win32Interrupt::End() |
|
693 { |
|
694 __NK_ASSERT_ALWAYS(iOwner == GetCurrentThreadId()); // check we are the interrupting thread |
|
695 NThread* pC = iInterrupted; |
|
696 __NK_ASSERT_ALWAYS(pC==TheScheduler.iCurrentThread); |
|
697 iInterrupted = 0; |
|
698 if (iLock == 1 && TheScheduler.iKernCSLocked == 0 && |
|
699 (TheScheduler.iRescheduleNeededFlag || TheScheduler.iDfcPendingFlag) && |
|
700 pC->IsSafeToPreempt()) |
|
701 { |
|
702 TheScheduler.iKernCSLocked = 1; // prevent further pre-emption |
|
703 if (pC->iWakeup == NThread::EIdle) |
|
704 { |
|
705 // wake up the NULL thread, it will always reschedule immediately |
|
706 pC->WakeUp(); |
|
707 } |
|
708 else |
|
709 { |
|
710 // pre-empt the current thread and poke the 'scheduler' thread |
|
711 __NK_ASSERT_ALWAYS(pC->iWakeup == NThread::ERelease); |
|
712 pC->iWakeup = NThread::EResume; |
|
713 UpdateThreadCpuTime(*pC, iScheduler); |
|
714 RescheduleNeeded(); |
|
715 NKern::EnableAllInterrupts(); |
|
716 iScheduler.WakeUp(); |
|
717 return; |
|
718 } |
|
719 } |
|
720 else |
|
721 { |
|
722 // no thread reschedle, so emit trace... |
|
723 #ifdef BTRACE_CPU_USAGE |
|
724 BTrace0(BTrace::ECpuUsage,BTrace::EIrqEnd); |
|
725 #endif |
|
726 } |
|
727 |
|
728 if (((NThread*)pC)->iInKernel == 0 && // thread is running in user mode |
|
729 pC->iUserModeCallbacks != NULL && // and has callbacks queued |
|
730 TheScheduler.iKernCSLocked == 0 && // and is not currently processing a diversion |
|
731 pC->IsSafeToPreempt()) // and can be safely prempted at this point |
|
732 { |
|
733 TheScheduler.iKernCSLocked = 1; |
|
734 pC->ApplyDiversion(); |
|
735 } |
|
736 NKern::EnableAllInterrupts(); |
|
737 __NK_ASSERT_ALWAYS(TInt(ResumeThread(pC->iWinThread)) > 0); // check thread was previously suspended |
|
738 } |
|
739 |
|
740 void Win32Interrupt::Reschedule(TAny*) |
|
741 // |
1011 // |
742 // The entry-point for the interrupt-rescheduler thread. |
1012 // The entry-point for the interrupt-rescheduler thread. |
743 // |
1013 // |
744 // This spends its whole life going around the TScheduler::Reschedule() loop |
1014 // This spends its whole life going around the TScheduler::Reschedule() loop |
745 // selecting another thread to run. |
1015 // selecting another thread to run. |
906 return result ? result : GetModuleHandleA(aModuleName2); |
1184 return result ? result : GetModuleHandleA(aModuleName2); |
907 } |
1185 } |
908 |
1186 |
909 TWin32FunctionInfo Win32FindExportedFunction(const char* aFunctionName, ...) |
1187 TWin32FunctionInfo Win32FindExportedFunction(const char* aFunctionName, ...) |
910 { |
1188 { |
911 const char *libname; |
|
912 HMODULE library = NULL; |
|
913 |
|
914 va_list arg; |
1189 va_list arg; |
915 va_start(arg, aFunctionName); |
1190 va_start(arg, aFunctionName); |
|
1191 HMODULE library = NULL; |
|
1192 const char* libname; |
916 |
1193 |
917 // Loop through arguments until we find a library we can get a handle to. List of library names |
1194 // Loop through arguments until we find a library we can get a handle to. List of library names |
918 // is NULL-terminated. |
1195 // is NULL-terminated. |
919 while ((libname = va_arg(arg, const char *)) != NULL) |
1196 while ((libname = va_arg(arg, const char*)) != NULL) |
920 { |
1197 { |
921 library = GetModuleHandleA(libname); |
1198 library = GetModuleHandleA(libname); |
922 if (library != NULL) |
1199 if (library != NULL) |
923 break; |
1200 break; |
924 } |
1201 } |
925 |
|
926 va_end(arg); |
1202 va_end(arg); |
927 |
1203 |
928 // Make sure we did get a valid library |
1204 // Make sure we did get a valid library |
929 __NK_ASSERT_ALWAYS(library != NULL); |
1205 __NK_ASSERT_ALWAYS(library != NULL); |
930 |
1206 |
931 // Find the start address of the function |
1207 // Find the start address of the function |
932 TUint start = (TUint)GetProcAddress(library, aFunctionName); |
1208 TUint start = (TUint)GetProcAddress(library, aFunctionName); |
933 __NK_ASSERT_ALWAYS(start != 0); |
1209 __NK_ASSERT_ALWAYS(start != 0); |
934 |
1210 |
935 // Now have to check all other exports to find the end of the function |
1211 // Now have to check all other exports to find the end of the function |
936 TUint end = 0xffffffff; |
1212 TUint end = ~0u; |
937 TInt i = 1; |
1213 for (TInt i = 1; ; ++i) |
938 for (;;) |
|
939 { |
1214 { |
940 TUint addr = (TUint)GetProcAddress(library, MAKEINTRESOURCEA(i)); |
1215 TUint addr = (TUint)GetProcAddress(library, MAKEINTRESOURCEA(i)); |
941 if (!addr) |
1216 if (!addr) |
942 break; |
1217 break; |
943 if (addr > start && addr < end) |
1218 if (addr > start && addr < end) |
944 end = addr; |
1219 end = addr; |
945 ++i; |
1220 } |
946 } |
1221 __NK_ASSERT_ALWAYS(end != ~0u); |
947 __NK_ASSERT_ALWAYS(end != 0xffffffff); |
|
948 |
|
949 TWin32FunctionInfo result = { start, end - start }; |
1222 TWin32FunctionInfo result = { start, end - start }; |
950 |
1223 |
951 #ifdef DUMP_STACK_BACKTRACE |
1224 #ifdef DUMP_STACK_BACKTRACE |
952 DEBUGPRINT("Function %s found at %08x to %08x", aFunctionName, start, end); |
1225 DEBUGPRINT("Function %s found at %08x to %08x", aFunctionName, start, end); |
953 #endif |
1226 #endif |
954 |
1227 |
955 return result; |
1228 return result; |
956 } |
1229 } |
957 |
1230 |
958 void Win32FindNonPreemptibleFunctions() |
1231 void Win32FindNonPreemptibleFunctions() |
959 { |
1232 { |
960 #ifdef DUMP_STACK_BACKTRACE |
1233 #ifdef DUMP_STACK_BACKTRACE |
961 PrintAllModuleInfo(); |
1234 PrintAllModuleInfo(); |
962 #endif |
1235 #endif |
963 |
1236 |
964 TUint i = 0; |
1237 TUint i = 0; |
965 Win32NonPreemptibleFunctions[i++] = Win32FindExportedFunction("RaiseException", "kernelbase.dll", "kernel32.dll", NULL); |
1238 Win32NonPreemptibleFunctions[i++] = Win32FindExportedFunction("RaiseException", "kernelbase.dll", "kernel32.dll", NULL); |
966 Win32NonPreemptibleFunctions[i++] = Win32FindExportedFunction("KiUserExceptionDispatcher", "ntdll.dll", NULL); |
1239 Win32NonPreemptibleFunctions[i++] = Win32FindExportedFunction("KiUserExceptionDispatcher", "ntdll.dll", NULL); |
967 __NK_ASSERT_ALWAYS(i == KWin32NonPreemptibleFunctionCount); |
1240 __NK_ASSERT_ALWAYS(i == KWin32NonPreemptibleFunctionCount); |
968 } |
1241 } |
969 |
1242 |
970 TBool Win32IsThreadInNonPreemptibleFunction(HANDLE aWinThread, TLinAddr aStackTop) |
1243 TBool Win32IsThreadInNonPreemptibleFunction(HANDLE aWinThread, TLinAddr aStackTop) |
971 { |
1244 { |
972 const TInt KMaxSearchDepth = 16; // 12 max observed while handling exceptions |
1245 const TInt KMaxSearchDepth = 16; // 12 max observed while handling exceptions |
973 const TInt KMaxStackSize = 1024 * 1024; // Default reserved stack size on windows |
1246 const TInt KMaxStackSize = 1024 * 1024; // Default reserved stack size on windows |
974 const TInt KMaxFrameSize = 4096; |
1247 const TInt KMaxFrameSize = 4096; |
975 |
1248 |
976 CONTEXT c; |
1249 CONTEXT c; |
977 c.ContextFlags=CONTEXT_FULL; |
1250 c.ContextFlags = CONTEXT_CONTROL; |
978 GetThreadContext(aWinThread, &c); |
1251 CheckedGetThreadContext(aWinThread, &c); |
979 |
|
980 TUint eip = c.Eip; |
1252 TUint eip = c.Eip; |
981 TUint ebp = c.Ebp; |
1253 TUint ebp = c.Ebp; |
982 TUint lastEbp = c.Esp; |
1254 TUint lastEbp = c.Esp; |
983 |
1255 |
984 #ifdef DUMP_STACK_BACKTRACE |
1256 #ifdef DUMP_STACK_BACKTRACE |
985 DEBUGPRINT("Stack backtrace for thread %x", aWinThread); |
1257 DEBUGPRINT("Stack backtrace for thread %x", aWinThread); |
986 #endif |
1258 #endif |
987 |
1259 |
988 // Walk the call stack |
1260 // Walk the call stack |
989 for (TInt i = 0 ; i < KMaxSearchDepth ; ++i) |
1261 for (TInt i = 0 ; i < KMaxSearchDepth ; ++i) |
990 { |
1262 { |
991 #ifdef DUMP_STACK_BACKTRACE |
1263 #ifdef DUMP_STACK_BACKTRACE |
992 DEBUGPRINT(" %08x", eip); |
1264 DEBUGPRINT(" %08x", eip); |
993 #endif |
1265 #endif |
994 |
1266 |
995 for (TInt j = 0 ; j < KWin32NonPreemptibleFunctionCount ; ++j) |
1267 for (TInt j = 0 ; j < KWin32NonPreemptibleFunctionCount ; ++j) |
996 { |
1268 { |
997 const TWin32FunctionInfo& info = Win32NonPreemptibleFunctions[j]; |
1269 const TWin32FunctionInfo& info = Win32NonPreemptibleFunctions[j]; |
998 if (TUint(eip - info.iStartAddr) < info.iLength) |
1270 if (TUint(eip - info.iStartAddr) < info.iLength) |
999 { |
1271 { |
1000 __KTRACE_OPT(KSCHED, DEBUGPRINT("Thread is in non-preemptible function %d at frame %d: eip == %08x", j, i, eip)); |
1272 __KTRACE_OPT(KSCHED, DEBUGPRINT("Thread is in non-preemptible function %d at frame %d: eip == %08x", j, i, eip)); |
1001 return TRUE; |
1273 return TRUE; |
1002 } |
1274 } |
1003 } |
1275 } |
1004 |
1276 |
1005 // Check frame pointer is valid before dereferencing it |
1277 // Check frame pointer is valid before dereferencing it |
1006 if (TUint(aStackTop - ebp) > KMaxStackSize || TUint(ebp - lastEbp) > KMaxFrameSize || ebp & 3) |
1278 if (TUint(aStackTop - ebp) > KMaxStackSize || TUint(ebp - lastEbp) > KMaxFrameSize || ebp & 3) |
1007 break; |
1279 break; |
1008 |
1280 |
1009 TUint* frame = (TUint*)ebp; |
1281 TUint* frame = (TUint*)ebp; |
1010 lastEbp = ebp; |
1282 lastEbp = ebp; |
1011 ebp = frame[0]; |
1283 ebp = frame[0]; |
1012 eip = frame[1]; |
1284 eip = frame[1]; |
1013 } |
1285 } |
1014 |
1286 |
1015 return FALSE; |
1287 return FALSE; |
1016 } |
1288 } |
1017 |
1289 |
1018 TBool NThread::IsSafeToPreempt() |
1290 TBool NThread::IsSafeToPreempt() |
1019 { |
1291 { |
1020 return !Win32IsThreadInNonPreemptibleFunction(iWinThread, iUserStackBase); |
1292 return !Win32IsThreadInNonPreemptibleFunction(iWinThread, iUserStackBase); |
1021 } |
1293 } |
1022 |
1294 |
1023 void LeaveKernel() |
|
1024 { |
|
1025 TInt& k=CheckedCurrentThread().iInKernel; |
|
1026 __NK_ASSERT_DEBUG(k>0); |
|
1027 if (k==1) // just about to leave kernel |
|
1028 { |
|
1029 NThread& t = CheckedCurrentThread(); |
|
1030 __NK_ASSERT_ALWAYS(t.iCsCount==0); |
|
1031 __NK_ASSERT_ALWAYS(t.iHeldFastMutex==0); |
|
1032 __NK_ASSERT_ALWAYS(TheScheduler.iKernCSLocked==0); |
|
1033 NKern::DisableAllInterrupts(); |
|
1034 t.CallUserModeCallbacks(); |
|
1035 NKern::EnableAllInterrupts(); |
|
1036 } |
|
1037 --k; |
|
1038 } |
|
1039 |
|