|
1 // Copyright (c) 2006-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of the License "Eclipse Public License v1.0" |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: |
|
14 // e32\nkernsmp\dfcs.cpp |
|
15 // DFCs |
|
16 // |
|
17 // |
|
18 |
|
19 // NThreadBase member data |
|
20 #define __INCLUDE_NTHREADBASE_DEFINES__ |
|
21 |
|
22 // TDfc member data |
|
23 #define __INCLUDE_TDFC_DEFINES__ |
|
24 |
|
25 #include "nk_priv.h" |
|
26 |
|
27 extern "C" void send_self_resched_ipi(); |
|
28 |
|
29 /** Construct an IDFC |
|
30 |
|
31 @param aFunction = function to call |
|
32 @param aPtr = parameter to be passed to function |
|
33 */ |
|
34 EXPORT_C TDfc::TDfc(TDfcFn aFunction, TAny* aPtr) |
|
35 { |
|
36 iPtr = aPtr; |
|
37 iFn = aFunction; |
|
38 iTied = 0; |
|
39 iHType = EEventHandlerIDFC; |
|
40 i8888.iHState0 = 0; |
|
41 i8888.iHState1 = 0; |
|
42 i8888.iHState2 = 0; |
|
43 iTiedLink.iNext = 0; |
|
44 } |
|
45 |
|
46 |
|
47 /** Construct an IDFC tied to a thread or group |
|
48 |
|
49 @param aTied = pointer to thread or group to which IDFC should be tied |
|
50 @param aFunction = function to call |
|
51 @param aPtr = parameter to be passed to function |
|
52 |
|
53 @pre Call in thread context, interrupts enabled |
|
54 */ |
|
55 EXPORT_C TDfc::TDfc(NSchedulable* aTied, TDfcFn aFunction, TAny* aPtr) |
|
56 { |
|
57 iPtr = aPtr; |
|
58 iFn = aFunction; |
|
59 iTied = 0; |
|
60 iHType = EEventHandlerIDFC; |
|
61 i8888.iHState0 = 0; |
|
62 i8888.iHState1 = 0; |
|
63 i8888.iHState2 = 0; |
|
64 iTiedLink.iNext = 0; |
|
65 if (aTied) |
|
66 { |
|
67 SetTied(aTied); |
|
68 } |
|
69 } |
|
70 |
|
71 |
|
72 /** Construct a DFC without specifying a DFC queue. |
|
73 The DFC queue must be set before the DFC may be queued. |
|
74 |
|
75 @param aFunction = function to call |
|
76 @param aPtr = parameter to be passed to function |
|
77 @param aPriority = priority of DFC within the queue (0 to 7, where 7 is highest) |
|
78 */ |
|
79 EXPORT_C TDfc::TDfc(TDfcFn aFunction, TAny* aPtr, TInt aPriority) |
|
80 { |
|
81 __NK_ASSERT_DEBUG((TUint)aPriority<(TUint)KNumDfcPriorities); |
|
82 iPtr = aPtr; |
|
83 iFn = aFunction; |
|
84 iTied = 0; |
|
85 iHType = TUint8(aPriority); |
|
86 i8888.iHState0 = 0; |
|
87 i8888.iHState1 = 0; |
|
88 i8888.iHState2 = 0; |
|
89 iTiedLink.iNext = 0; |
|
90 } |
|
91 |
|
92 |
|
93 /** Construct a DFC specifying a DFC queue. |
|
94 |
|
95 @param aFunction = function to call |
|
96 @param aPtr = parameter to be passed to function |
|
97 @param aDfcQ = pointer to DFC queue which this DFC should use |
|
98 @param aPriority = priority of DFC within the queue (0-7) |
|
99 */ |
|
100 EXPORT_C TDfc::TDfc(TDfcFn aFunction, TAny* aPtr, TDfcQue* aDfcQ, TInt aPriority) |
|
101 { |
|
102 __NK_ASSERT_DEBUG((TUint)aPriority<(TUint)KNumDfcPriorities); |
|
103 iPtr = aPtr; |
|
104 iFn = aFunction; |
|
105 iDfcQ = aDfcQ; |
|
106 iHType = TUint8(aPriority); |
|
107 i8888.iHState0 = 0; |
|
108 i8888.iHState1 = 0; |
|
109 i8888.iHState2 = 0; |
|
110 iTiedLink.iNext = 0; |
|
111 } |
|
112 |
|
113 |
|
114 /** Tie an IDFC to a thread or group |
|
115 |
|
116 @param aTied = pointer to thread or group to which IDFC should be tied |
|
117 @return KErrNone if successful |
|
118 @return KErrDied if thread has exited or group has been destroyed. |
|
119 |
|
120 @pre Call in thread context, interrupts enabled |
|
121 @pre Must be IDFC not DFC |
|
122 @pre IDFC must not be queued or running |
|
123 @pre IDFC must not already be tied |
|
124 */ |
|
125 EXPORT_C TInt TDfc::SetTied(NSchedulable* aTied) |
|
126 { |
|
127 __NK_ASSERT_ALWAYS(IsIDFC() && i8816.iHState16==0); |
|
128 __NK_ASSERT_ALWAYS(aTied && !iTied); |
|
129 NKern::Lock(); |
|
130 TInt r = aTied->AddTiedEvent(this); |
|
131 __NK_ASSERT_ALWAYS(r==KErrNone || r==KErrDied); |
|
132 NKern::Unlock(); |
|
133 return r; |
|
134 } |
|
135 |
|
136 |
|
137 /** Destroy a DFC or IDFC |
|
138 |
|
139 @pre Call from thread context with interrupts and preemption enabled |
|
140 @pre Calling thread holds no fast mutex |
|
141 @pre Calling thread in critical section |
|
142 */ |
|
143 EXPORT_C TDfc::~TDfc() |
|
144 { |
|
145 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"TDfc::~TDfc"); |
|
146 NKern::Lock(); |
|
147 NEventHandler::TiedLock.LockOnly(); |
|
148 NSchedulable* tied = iTied; |
|
149 if (IsDFC() || (IsIDFC() && !tied)) |
|
150 { |
|
151 Cancel(); |
|
152 iHType = (TUint8)EEventHandlerDummy; |
|
153 } |
|
154 if (IsIDFC()) |
|
155 { |
|
156 __NK_ASSERT_ALWAYS(tied!=0); |
|
157 tied->AcqSLock(); |
|
158 if (iTiedLink.iNext) |
|
159 { |
|
160 iTiedLink.Deque(); |
|
161 iTiedLink.iNext = 0; |
|
162 } |
|
163 tied->RelSLock(); |
|
164 Cancel(); |
|
165 iHType = (TUint8)EEventHandlerDummy; |
|
166 iTied = 0; |
|
167 } |
|
168 NEventHandler::TiedLock.UnlockOnly(); |
|
169 NKern::Unlock(); |
|
170 } |
|
171 |
|
172 |
|
173 /** Construct a DFC queue |
|
174 Kern::DfcQInit() should be called on the new DFC queue before it can be used. |
|
175 */ |
|
176 EXPORT_C TDfcQue::TDfcQue() |
|
177 : iThread(NULL) |
|
178 {} |
|
179 |
|
180 |
|
181 |
|
182 /** Queue an IDFC or a DFC from an ISR |
|
183 |
|
184 This function is the only way to queue an IDFC and is the only way to queue |
|
185 a DFC from an ISR. To queue a DFC from an IDFC or a thread either Enque() |
|
186 or DoEnque() should be used. |
|
187 |
|
188 This function does nothing if the IDFC/DFC is already queued. |
|
189 |
|
190 @pre Call only from ISR, IDFC or thread with preemption disabled. |
|
191 @pre Do not call from thread with preemption enabled. |
|
192 @return TRUE if DFC was actually queued by this call |
|
193 FALSE if DFC was already queued on entry so this call did nothing |
|
194 |
|
195 @see TDfc::DoEnque() |
|
196 @see TDfc::Enque() |
|
197 */ |
|
198 EXPORT_C TBool TDfc::Add() |
|
199 { |
|
200 __ASSERT_DEBUG(NKern::CurrentContext()!=NKern::EThread || NKern::KernelLocked(), *(int*)0xdfcadd01=0); |
|
201 __ASSERT_DEBUG(IsIDFC() || (IsDFC() && iDfcQ), *(int*)0xdfcadd03=0); |
|
202 // __ASSERT_WITH_MESSAGE_DEBUG( NKern::CurrentContext()!=NKern::EThread || NKern::KernelLocked(),"Do not call from thread with preemption enabled","TDfc::Add"); |
|
203 // __ASSERT_WITH_MESSAGE_DEBUG( IsIDFC() || (IsDFC() && iDfcQ), "DFC queue not set", "TDfc::Add"); |
|
204 #ifdef __WINS__ |
|
205 __NK_ASSERT_ALWAYS(Interrupt.InInterrupt() || NKern::KernelLocked()); |
|
206 #endif |
|
207 TInt irq = NKern::DisableAllInterrupts(); |
|
208 TSubScheduler& ss = SubScheduler(); |
|
209 TUint32 orig = 0xFF00; |
|
210 |
|
211 // Transition the state to 'on normal IDFC queue' |
|
212 // 0000->008n |
|
213 // 00Cn->00En |
|
214 // All other states unchanged |
|
215 // Return original state |
|
216 if (IsValid()) // don't add if tied and tied thread/group is being/has been destroyed |
|
217 orig = AddStateChange(); |
|
218 if (orig==0) |
|
219 { |
|
220 // wasn't already queued |
|
221 i8888.iHState0 = 0; // BeginTiedEvent() not done |
|
222 ss.iDfcs.Add(this); |
|
223 ss.iDfcPendingFlag = 1; |
|
224 #ifdef _DEBUG |
|
225 TUint32 st8 = DFC_STATE(this) & 0xFF; |
|
226 if (st8 != (0x80|ss.iCpuNum)) |
|
227 __crash(); |
|
228 #endif |
|
229 } |
|
230 NKern::RestoreInterrupts(irq); |
|
231 return (orig==0 || (orig&0xFFE0)==0x00C0); |
|
232 } |
|
233 |
|
234 |
|
235 /** Queue an IDFC or a DFC from any context |
|
236 |
|
237 This function is identical to TDfc::Add() but no checks are performed for correct usage, |
|
238 and it contains no instrumentation code. |
|
239 |
|
240 @return TRUE if DFC was actually queued by this call |
|
241 FALSE if DFC was already queued on entry so this call did nothing |
|
242 |
|
243 @see TDfc::DoEnque() |
|
244 @see TDfc::Enque() |
|
245 @see TDfc::Add() |
|
246 */ |
|
247 EXPORT_C TBool TDfc::RawAdd() |
|
248 { |
|
249 TInt irq = NKern::DisableAllInterrupts(); |
|
250 TSubScheduler& ss = SubScheduler(); |
|
251 TUint32 orig = 0xFF00; |
|
252 if (IsValid()) // don't add if tied and tied thread/group is being/has been destroyed |
|
253 orig = AddStateChange(); |
|
254 if (orig==0) |
|
255 { |
|
256 // wasn't already queued |
|
257 i8888.iHState0 = 0; // BeginTiedEvent() not done |
|
258 ss.iDfcs.Add(this); |
|
259 ss.iDfcPendingFlag = 1; |
|
260 send_self_resched_ipi(); // ensure current CPU runs the DFC |
|
261 #ifdef _DEBUG |
|
262 TUint32 st8 = DFC_STATE(this) & 0xFF; |
|
263 if (st8 != (0x80|ss.iCpuNum)) |
|
264 __crash(); |
|
265 #endif |
|
266 // FIXME: Need to wait to ensure IRQ is active before reenabling interrupts |
|
267 } |
|
268 NKern::RestoreInterrupts(irq); |
|
269 return (orig==0 || (orig&0xFFE0)==0x00C0); |
|
270 } |
|
271 |
|
272 |
|
273 /** Queue a DFC (not an IDFC) from an IDFC or thread with preemption disabled. |
|
274 |
|
275 This function is the preferred way to queue a DFC from an IDFC. It should not |
|
276 be used to queue an IDFC - use TDfc::Add() for this. |
|
277 |
|
278 This function does nothing if the DFC is already queued. |
|
279 |
|
280 @pre Call only from IDFC or thread with preemption disabled. |
|
281 @pre Do not call from ISR or thread with preemption enabled. |
|
282 @return TRUE if DFC was actually queued by this call |
|
283 FALSE if DFC was already queued on entry so this call did nothing |
|
284 |
|
285 @see TDfc::Add() |
|
286 @see TDfc::Enque() |
|
287 */ |
|
288 EXPORT_C TBool TDfc::DoEnque() |
|
289 { |
|
290 __ASSERT_WITH_MESSAGE_DEBUG( (NKern::CurrentContext()==NKern::EIDFC )||( NKern::CurrentContext()==NKern::EThread && NKern::KernelLocked()),"Do not call from ISR or thread with preemption enabled","TDfc::DoEnque"); |
|
291 __NK_ASSERT_DEBUG(IsDFC()); |
|
292 __ASSERT_WITH_MESSAGE_DEBUG(iDfcQ, "DFC queue not set", "TDfc::DoEnque"); |
|
293 |
|
294 // Check not already queued and then mark queued to prevent ISRs touching this DFC |
|
295 TDfcQue* q = iDfcQ; |
|
296 NThreadBase* t = q->iThread; |
|
297 t->AcqSLock(); // also protects DFC queue |
|
298 TUint16 expect = 0; |
|
299 TBool ok = __e32_atomic_cas_acq16(&iDfcState, &expect, 1); |
|
300 if (ok) |
|
301 { |
|
302 // wasn't already queued, now marked as on final queue, which means |
|
303 // attempts to cancel will block on the thread spin lock |
|
304 TUint present = q->iPresent[0]; |
|
305 q->Add((TPriListLink*)this); |
|
306 if (!present) |
|
307 t->iWaitState.UnBlockT(NThreadBase::EWaitDfc, q, KErrNone); |
|
308 } |
|
309 t->RelSLock(); // also protects DFC queue |
|
310 return ok; |
|
311 } |
|
312 |
|
313 void TDfcQue::ThreadFunction(TAny* aDfcQ) |
|
314 { |
|
315 TDfcQue& q = *(TDfcQue*)aDfcQ; |
|
316 NThreadBase* t = NKern::CurrentThread(); |
|
317 FOREVER |
|
318 { |
|
319 NKern::Lock(); |
|
320 t->AcqSLock(); // also protects DFC queue |
|
321 if (q.IsEmpty()) |
|
322 { |
|
323 t->iWaitState.SetUpWait(NThreadBase::EWaitDfc, 0, &q); |
|
324 RescheduleNeeded(); |
|
325 t->RelSLock(); // also protects DFC queue |
|
326 NKern::Unlock(); |
|
327 } |
|
328 else |
|
329 { |
|
330 TDfc* d = q.First(); |
|
331 q.Remove((TPriListLink*)d); |
|
332 TDfcFn f = d->iFn; |
|
333 TAny* p = d->iPtr; |
|
334 d->ResetState(); |
|
335 t->RelSLock(); // also protects DFC queue |
|
336 NKern::Unlock(); |
|
337 (*f)(p); |
|
338 } |
|
339 } |
|
340 } |
|
341 |
|
342 |
|
343 |
|
344 void TCancelIPI::Send(TDfc* aDfc, TInt aCpu) |
|
345 { |
|
346 iDfc = aDfc; |
|
347 Queue(&Isr, 1u<<aCpu); |
|
348 } |
|
349 |
|
350 void TCancelIPI::Isr(TGenericIPI* aIPI) |
|
351 { |
|
352 TCancelIPI* p = (TCancelIPI*)aIPI; |
|
353 TDfc* d = p->iDfc; |
|
354 if (d->iNext) |
|
355 { |
|
356 // QueueDfcs() hasn't dequeued it yet |
|
357 // just dequeue it here and reset the state - QueueDfcs() will never see it |
|
358 // Note that this means we have to release the tied thread/group if necessary |
|
359 // BeginTiedEvent() has occurred if iHState0 is set and it's actually an IDFC not an NTimer |
|
360 NSchedulable* tied = (d->iHType==NEventHandler::EEventHandlerIDFC && d->i8888.iHState0) ? d->iTied : 0; |
|
361 d->Deque(); |
|
362 d->ResetState(); |
|
363 if (tied) |
|
364 tied->EndTiedEvent(); |
|
365 } |
|
366 else |
|
367 { |
|
368 // QueueDfcs() has already dequeued it |
|
369 // state transition: |
|
370 // XXYY->XX00 |
|
371 // XX00->0000 |
|
372 // QueueDfcs() will take care of the tied thread/group |
|
373 d->CancelFinalStateChange(); |
|
374 } |
|
375 } |
|
376 |
|
377 |
|
378 /** Cancels an IDFC or DFC. |
|
379 |
|
380 This function does nothing if the IDFC or DFC is not queued. |
|
381 |
|
382 For any DFC or IDFC the following identity holds: |
|
383 Number of times Add() is called and returns TRUE |
|
384 + Number of times DoEnque() is called and returns TRUE |
|
385 + Number of times Enque() is called and returns TRUE |
|
386 + Number of times QueueOnIdle() is called and returns TRUE |
|
387 = Number of times Cancel() is called and returns TRUE |
|
388 + Number of times the DFC/IDFC function executes |
|
389 |
|
390 @pre IDFC or thread context. Do not call from ISRs. |
|
391 |
|
392 @pre If the DFC function accesses the DFC object itself, the user must ensure that |
|
393 Cancel() cannot be called while the DFC function is running. |
|
394 |
|
395 @return TRUE if the DFC was actually dequeued by this call - i.e. an |
|
396 instance of the DFC's execution has been prevented. It |
|
397 is still possible that a previous execution is still in |
|
398 progress. |
|
399 FALSE if the DFC was not queued on entry to the call, or was in |
|
400 the process of being executed or cancelled. In this case |
|
401 it is possible that the DFC executes after this call |
|
402 returns. |
|
403 |
|
404 @post However in either case it is safe to delete the DFC object on |
|
405 return from this call provided only that the DFC function does not |
|
406 refer to the DFC object itself. |
|
407 */ |
|
408 EXPORT_C TBool TDfc::Cancel() |
|
409 { |
|
410 enum TAction { EDeque=1, EReset=2, EIdleUnlock=4, ESendIPI=8, EWait=16 }; |
|
411 |
|
412 CHECK_PRECONDITIONS(MASK_NOT_ISR|MASK_INTERRUPTS_ENABLED,"TDfc::Cancel"); |
|
413 if (!iDfcState) |
|
414 return FALSE; |
|
415 TUint action = EIdleUnlock; |
|
416 TBool ret = FALSE; |
|
417 TInt cpu = -1; |
|
418 NSchedulable* tied = 0; |
|
419 TDfcQue* q = 0; |
|
420 NThreadBase* t = 0; |
|
421 NKern::Lock(); |
|
422 TSubScheduler& ss0 = SubScheduler(); |
|
423 if (IsDFC()) |
|
424 q = iDfcQ, t = q->iThread, t->AcqSLock(); |
|
425 TInt irq = NKern::DisableAllInterrupts(); |
|
426 TheScheduler.iIdleSpinLock.LockOnly(); |
|
427 |
|
428 // 0000->0000, XX00->ZZ00, xxYY->zzYY |
|
429 TUint state = CancelInitialStateChange(); |
|
430 TUint stt = state >> 5; |
|
431 if (state & 0xFF00) |
|
432 { |
|
433 // someone else cancelling at the same time - just wait for them to finish |
|
434 action = EWait|EIdleUnlock; |
|
435 goto end; |
|
436 } |
|
437 if (state == 0) // DFC not active |
|
438 goto end; |
|
439 |
|
440 // possible states here are 0001, 002g, 006m, 008m, 00Am, 00Cm, 00Em |
|
441 ret = (stt!=6); // if running but not pending, Cancel() will not have prevented an execution |
|
442 if (state == TUint(TheScheduler.iIdleGeneration | 0x20)) |
|
443 { |
|
444 // was on idle queue, BeginTiedEvent() isn't called until QueueDfcs() runs |
|
445 action = EDeque|EReset|EIdleUnlock; |
|
446 goto end; |
|
447 } |
|
448 if (state == 1) |
|
449 { |
|
450 // was on final queue, must be DFC not IDFC |
|
451 q->Remove((TPriListLink*)this); |
|
452 action = EReset|EIdleUnlock; |
|
453 goto end; |
|
454 } |
|
455 |
|
456 // possible states here are 002g (spilled), 006m, 008m, 00Am, 00Cm, 00Em |
|
457 // i.e. either on IDFC queue, ExIDFC queue or running |
|
458 // For IDFCs, tied thread/group is now in play. |
|
459 cpu = state & 0x1f; // CPU it's on for states 006m, 008m, 00Am, 00Cm, 00Em |
|
460 if (stt==3 || stt==6 || stt==7) |
|
461 { |
|
462 // It's actually running - must be IDFC. A re-queue may also be pending. |
|
463 TheScheduler.iIdleSpinLock.UnlockOnly(); |
|
464 TSubScheduler* ss = TheSubSchedulers + cpu; |
|
465 TDfc* expect = this; |
|
466 TBool done = __e32_atomic_cas_acq_ptr(&ss->iCurrentIDFC, &expect, 0); |
|
467 if (done) |
|
468 { |
|
469 // We cleared iCurrentIDFC so QueueDfcs() won't touch this again - we reset the state and finish up |
|
470 // We must also release the tied thread/group |
|
471 tied = iTied; |
|
472 action = EReset; |
|
473 goto end; |
|
474 } |
|
475 // QueueDfcs() got to iCurrentIDFC before we did, so we interlock with it |
|
476 // and we can leave the EndTiedEvent to it as well |
|
477 // State transition: |
|
478 // XXAm->XX00, wait |
|
479 // XX00->0000, don't wait |
|
480 TUint32 orig = CancelFinalStateChange() & 0xFF; |
|
481 __NK_ASSERT_ALWAYS(orig==0 || orig==state); |
|
482 action = orig ? EWait : 0; |
|
483 goto end; |
|
484 } |
|
485 |
|
486 // possible states here 002g (propagated), 008m, 00Am so it's either on the endogenous or exogenous IDFC queue |
|
487 if (stt==5) |
|
488 { |
|
489 // it's on the exogenous IDFC queue |
|
490 TheScheduler.iIdleSpinLock.UnlockOnly(); |
|
491 TSubScheduler* ss = TheSubSchedulers + cpu; |
|
492 ss->iExIDfcLock.LockOnly(); |
|
493 if (iNext) |
|
494 { |
|
495 // we got to it before QueueDfcs() on the other CPU so we can finish up here |
|
496 // QueueDfcs() will never see it again so we must release tied thread/group |
|
497 Deque(); |
|
498 tied = iTied; |
|
499 ss->iExIDfcLock.UnlockOnly(); |
|
500 action = EReset; |
|
501 goto end; |
|
502 } |
|
503 // QueueDfcs() on other CPU has already dequeued it - we must now interlock with RunIDFCStateChange() |
|
504 ss->iExIDfcLock.UnlockOnly(); |
|
505 // State transition: |
|
506 // XXAm->XX00, wait |
|
507 // XX00->0000, don't wait |
|
508 // QueueDfcs() will take care of tied thread/group |
|
509 TUint32 orig = CancelFinalStateChange() & 0xFF; |
|
510 __NK_ASSERT_ALWAYS(orig==0 || orig==state); |
|
511 action = orig ? EWait : 0; |
|
512 goto end; |
|
513 } |
|
514 |
|
515 // possible states here 002g (propagated idle) or 008m (IDFC or DFC on endogenous DFC queue) |
|
516 if (stt==1) // propagated idle |
|
517 cpu = TheScheduler.iIdleSpillCpu; |
|
518 |
|
519 // if it's on this CPU's IDFC queue we can just remove it and reset the state here |
|
520 // otherwise we send a cancel IPI to the CPU it's on |
|
521 // We are guaranteed to dequeue the DFC before it executes since the |
|
522 // QueueDfcs() on the target CPU will notice that a cancel is in progress and |
|
523 // so will not run the DFC even if it dequeues it. |
|
524 // QueueDfcs() takes care of the tied thread/group if it sees the DFC/IDFC again, otherwise |
|
525 // we must do it here. |
|
526 if (TUint(cpu) == ss0.iCpuNum) |
|
527 { |
|
528 if (IsIDFC()) |
|
529 tied = iTied; |
|
530 action = EDeque|EReset|EIdleUnlock; |
|
531 } |
|
532 else |
|
533 action = EIdleUnlock|ESendIPI|EWait; |
|
534 |
|
535 end: |
|
536 // Common exit point |
|
537 if (action & EDeque) |
|
538 Deque(); |
|
539 if (action & EReset) |
|
540 { |
|
541 ResetState(); |
|
542 } |
|
543 if (action & EIdleUnlock) |
|
544 TheScheduler.iIdleSpinLock.UnlockOnly(); |
|
545 NKern::RestoreInterrupts(irq); |
|
546 if (t) |
|
547 t->RelSLock(); |
|
548 |
|
549 // on another CPU's IDFC queue so send IPI to remove it |
|
550 if (action & ESendIPI) |
|
551 { |
|
552 TCancelIPI ipi; |
|
553 ipi.Send(this, cpu); |
|
554 ipi.WaitCompletion(); |
|
555 tied = 0; |
|
556 } |
|
557 |
|
558 // wait for cancel to complete |
|
559 if (action & EWait) |
|
560 { |
|
561 TUint n = 0x01000000; |
|
562 while ((iDfcState>>8) & ss0.iCpuMask) |
|
563 { |
|
564 __chill(); |
|
565 if (!--n) |
|
566 __crash(); |
|
567 } |
|
568 } |
|
569 |
|
570 // release tied thread/group if waiting for IDFC to complete |
|
571 if (tied) |
|
572 tied->EndTiedEvent(); |
|
573 NKern::Unlock(); |
|
574 return ret; |
|
575 } |
|
576 |
|
577 |
|
578 /** Queues a DFC (not an IDFC) from a thread. |
|
579 |
|
580 Does nothing if DFC is already queued. |
|
581 |
|
582 NOTE: Although this can be called in an IDFC context, it is more efficient to call |
|
583 DoEnque() in this case. |
|
584 |
|
585 @pre Call either in a thread or an IDFC context. |
|
586 @pre Do not call from an ISR. |
|
587 @return TRUE if DFC was actually queued by this call |
|
588 FALSE if DFC was already queued on entry so this call did nothing |
|
589 */ |
|
590 EXPORT_C TBool TDfc::Enque() |
|
591 { |
|
592 CHECK_PRECONDITIONS(MASK_NOT_ISR,"TDfc::Enque()"); |
|
593 NKern::Lock(); |
|
594 TBool ret = DoEnque(); |
|
595 NKern::Unlock(); |
|
596 return ret; |
|
597 } |
|
598 |
|
599 |
|
600 /** Queue a DFC (not an IDFC) from a thread and also signals a fast mutex. |
|
601 |
|
602 The DFC is unaffected if it is already queued. |
|
603 |
|
604 The fast mutex is signalled before preemption is reenabled to avoid potential |
|
605 scheduler thrashing. |
|
606 |
|
607 @param aMutex = pointer to fast mutex to be signalled; |
|
608 NULL means system lock mutex. |
|
609 @return TRUE if DFC was actually queued by this call |
|
610 FALSE if DFC was already queued on entry so this call did nothing |
|
611 @pre Call in a thread context. |
|
612 @pre Kernel must be unlocked. |
|
613 @pre Do not call from an ISR. |
|
614 @pre Do not call from an IDFC. |
|
615 */ |
|
616 EXPORT_C TBool TDfc::Enque(NFastMutex* aMutex) |
|
617 { |
|
618 CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"TDfc::Enque(NFastMutex* aMutex)"); |
|
619 if (!aMutex) |
|
620 aMutex=&TheScheduler.iLock; |
|
621 NKern::Lock(); |
|
622 TBool ret = DoEnque(); |
|
623 aMutex->Signal(); |
|
624 NKern::Unlock(); |
|
625 return ret; |
|
626 } |
|
627 |
|
628 |
|
629 /** Returns a pointer to the thread on which a DFC runs |
|
630 |
|
631 @return If this is a DFC and the DFC queue has been set, a pointer to the |
|
632 thread which will run the DFC. |
|
633 NULL if this is an IDFC or the DFC queue has not been set. |
|
634 */ |
|
635 EXPORT_C NThreadBase* TDfc::Thread() |
|
636 { |
|
637 if (!IsDFC()) |
|
638 return 0; |
|
639 return iDfcQ ? iDfcQ->iThread : 0; |
|
640 } |
|
641 |
|
642 |
|
643 /****************************************************************************** |
|
644 * Idle notification |
|
645 ******************************************************************************/ |
|
646 |
|
647 /** Register an IDFC or a DFC to be called when the system goes idle |
|
648 |
|
649 This function does nothing if the IDFC/DFC is already queued. |
|
650 |
|
651 @return TRUE if DFC was actually queued by this call |
|
652 FALSE if DFC was already queued on entry so this call did nothing |
|
653 */ |
|
654 EXPORT_C TBool TDfc::QueueOnIdle() |
|
655 { |
|
656 TInt irq = TheScheduler.iIdleSpinLock.LockIrqSave(); |
|
657 TUint32 orig = 0xFF00; |
|
658 |
|
659 // Transition the state to 'on normal idle queue' |
|
660 // 0000->002g |
|
661 // 00Cn->006n |
|
662 // All other states unchanged |
|
663 // Return original state |
|
664 if (IsValid()) // don't add if tied and tied thread/group is being/has been destroyed |
|
665 orig = QueueOnIdleStateChange(); |
|
666 if (orig==0) |
|
667 { |
|
668 i8888.iHState0 = 0; // BeginTiedEvent() not done |
|
669 TheScheduler.iIdleDfcs.Add(this); |
|
670 } |
|
671 |
|
672 TheScheduler.iIdleSpinLock.UnlockIrqRestore(irq); |
|
673 return (orig==0 || (orig&0xFFE0)==0x00C0); |
|
674 } |
|
675 |
|
676 |
|
677 /****************************************************************************** |
|
678 * Scheduler IDFC/DFC Processing |
|
679 ******************************************************************************/ |
|
680 |
|
681 void TSubScheduler::QueueDfcs() |
|
682 // |
|
683 // Enter with interrupts off and kernel locked |
|
684 // Leave with interrupts off and kernel locked |
|
685 // |
|
686 // In state descriptions: |
|
687 // XX=8 bits not all zero (bitmask representing cancelling CPUs) |
|
688 // xx=8 bits (bitmask representing cancelling CPUs) |
|
689 // YY=8 bits not all zero |
|
690 // ZZ=XX with an additional bit set corresponding to the current CPU |
|
691 // zz=xx with an additional bit set corresponding to the current CPU |
|
692 // n = current CPU number |
|
693 // m = another CPU number |
|
694 // g = idle generation number |
|
695 { |
|
696 __KTRACE_OPT(KSCHED2,DEBUGPRINT("^")); |
|
697 iInIDFC = TRUE; |
|
698 BTrace0(BTrace::ECpuUsage, BTrace::EIDFCStart); |
|
699 TDfc* d = 0; |
|
700 NSchedulable* tied = 0; |
|
701 FOREVER |
|
702 { |
|
703 NKern::DisableAllInterrupts(); |
|
704 // remove from pending queue with interrupts disabled |
|
705 d = (TDfc*)iDfcs.GetFirst(); |
|
706 if (d) |
|
707 { |
|
708 d->iNext = 0; |
|
709 #ifdef _DEBUG |
|
710 TUint32 st8 = DFC_STATE(d) & 0xFF; |
|
711 if (st8 != TUint(0x80|iCpuNum) && st8 != TUint(0x21^TheScheduler.iIdleGeneration)) |
|
712 __crash(); |
|
713 #endif |
|
714 if (d->IsDFC()) // also true for mutating NTimer |
|
715 { |
|
716 NKern::EnableAllInterrupts(); |
|
717 TDfcQue* q = d->iDfcQ; |
|
718 NThreadBase* t = q->iThread; |
|
719 t->AcqSLock(); // also protects DFC queue |
|
720 |
|
721 // transition to 'final queue' state |
|
722 // 002g->0001, ok=TRUE |
|
723 // 008n->0001, ok=TRUE |
|
724 // XXYY->XX00, ok=FALSE |
|
725 // XX00->0000, ok=FALSE |
|
726 // other starting states invalid |
|
727 TUint32 orig = d->MoveToFinalQStateChange() >> 5; |
|
728 if (orig==1 || orig==4) |
|
729 { |
|
730 // wasn't being cancelled, now marked as on final queue, which means |
|
731 // attempts to cancel will block on the thread spin lock |
|
732 TUint present = q->iPresent[0]; |
|
733 q->Add((TPriListLink*)d); |
|
734 if (!present) |
|
735 t->iWaitState.UnBlockT(NThreadBase::EWaitDfc, q, KErrNone); |
|
736 } |
|
737 t->RelSLock(); // also protects DFC queue |
|
738 continue; |
|
739 } |
|
740 // endogenous IDFC - could be tied in which case may need to be punted over to another CPU |
|
741 // can't be mutating NTimer since that would have gone into IsDFC() path |
|
742 tied = d->iTied; |
|
743 if (tied && !d->i8888.iHState0) // if tied and BeginTiedEvent() not already done |
|
744 { |
|
745 d->i8888.iHState0 = 1; // flag that BeginTiedEvent() done |
|
746 TInt cpu = tied->BeginTiedEvent(); |
|
747 if (TUint(cpu) != iCpuNum) |
|
748 { |
|
749 // punt over to other CPU |
|
750 TBool kick = FALSE; |
|
751 TSubScheduler* ss = TheSubSchedulers + cpu; |
|
752 ss->iExIDfcLock.LockOnly(); |
|
753 // transition state here to handle cancel |
|
754 // XXYY->XX00, ok=FALSE |
|
755 // XX00->0000, ok=FALSE |
|
756 // 008n->00Am, ok=TRUE |
|
757 // 002g->00Am, ok=TRUE |
|
758 // other starting states invalid |
|
759 TUint32 orig = d->TransferIDFCStateChange(cpu) >> 5; |
|
760 if (orig==1 || orig==4) |
|
761 { |
|
762 kick = !ss->iExIDfcPendingFlag; |
|
763 ss->iExIDfcPendingFlag = TRUE; |
|
764 ss->iExIDfcs.Add(d); |
|
765 } |
|
766 ss->iExIDfcLock.UnlockOnly(); |
|
767 if (kick) |
|
768 send_resched_ipi(cpu); |
|
769 NKern::EnableAllInterrupts(); // let interrupts in |
|
770 if (orig >= 8) |
|
771 tied->EndTiedEvent(); // IDFC cancelled so release tied thread/group |
|
772 continue; |
|
773 } |
|
774 } |
|
775 } |
|
776 else |
|
777 { |
|
778 if (!iExIDfcPendingFlag) |
|
779 break; |
|
780 iExIDfcLock.LockOnly(); |
|
781 d = (TDfc*)iExIDfcs.GetFirst(); |
|
782 if (!d) |
|
783 { |
|
784 iExIDfcPendingFlag = 0; |
|
785 iExIDfcLock.UnlockOnly(); |
|
786 break; |
|
787 } |
|
788 d->iNext = 0; |
|
789 tied = d->iTied; |
|
790 __NK_ASSERT_ALWAYS(d->IsIDFC() && tied); // only tied IDFCs should get here |
|
791 #ifdef _DEBUG |
|
792 TUint32 st8 = DFC_STATE(d) & 0xFF; |
|
793 if (st8 != (0xA0|iCpuNum)) |
|
794 __crash(); |
|
795 #endif |
|
796 iExIDfcLock.UnlockOnly(); |
|
797 } |
|
798 |
|
799 // endogenous or exogenous IDFC |
|
800 // if tied, we are on correct CPU |
|
801 TDfcFn f = d->iFn; |
|
802 TAny* p = d->iPtr; |
|
803 |
|
804 // If Cancel() finds the IDFC in the running state (00Cn or 00En) it will do the following |
|
805 // atomic { if (iCurrentIDFC==d) iCurrentIDFC=0; } |
|
806 // We must guarantee that the following access is observed before the state change in RunIDFCStateChange() |
|
807 // We assume the latter has full barrier semantics to guarantee this. |
|
808 iCurrentIDFC = d; |
|
809 |
|
810 // transition to running state |
|
811 // 002g->00Cn, ok=TRUE |
|
812 // 008n->00Cn, ok=TRUE |
|
813 // 00An->00Cn, ok=TRUE |
|
814 // XXYY->XX00, ok=FALSE |
|
815 // XX00->0000, ok=FALSE |
|
816 // other starting states invalid |
|
817 TUint32 orig = d->RunIDFCStateChange() >> 5; |
|
818 NKern::EnableAllInterrupts(); |
|
819 if (orig==1 || orig==4 || orig==5) |
|
820 { |
|
821 (*f)(p); |
|
822 |
|
823 // transition to idle state or rerun if necessary |
|
824 // first swap iCurrentIDFC with 0 - if original value != d, don't touch d again, return 0xFFFFFFFF |
|
825 // 00Cn->0000 |
|
826 // 00En->008n |
|
827 // 006n->006n |
|
828 // XXCn->XX00 |
|
829 // XXEn->XX00 |
|
830 // XX6n->XX00 |
|
831 // other starting states invalid |
|
832 // return original state |
|
833 NKern::DisableAllInterrupts(); |
|
834 TUint32 orig = d->EndIDFCStateChange(this); |
|
835 if ((orig>>5)==7) |
|
836 { |
|
837 iDfcs.Add(d); |
|
838 #ifdef _DEBUG |
|
839 TUint32 st8 = DFC_STATE(d) & 0xFF; |
|
840 if (st8 != (0x80|iCpuNum)) |
|
841 __crash(); |
|
842 #endif |
|
843 continue; |
|
844 } |
|
845 else if ((orig>>5)==3) |
|
846 { |
|
847 TheScheduler.iIdleSpinLock.LockOnly(); |
|
848 // 006n->002g |
|
849 // XX6n->XX00 |
|
850 orig = d->EndIDFCStateChange2(); |
|
851 if ((orig>>5)==3) |
|
852 TheScheduler.iIdleDfcs.Add(d); |
|
853 TheScheduler.iIdleSpinLock.UnlockOnly(); |
|
854 } |
|
855 NKern::EnableAllInterrupts(); |
|
856 if (tied && orig<0x10000) |
|
857 tied->EndTiedEvent(); // if we set iCurrentIDFC back to 0, we release the tied thread/group |
|
858 } |
|
859 else |
|
860 { |
|
861 iCurrentIDFC = 0; |
|
862 if (tied) |
|
863 tied->EndTiedEvent(); // IDFC cancelled so release tied thread/group |
|
864 } |
|
865 } |
|
866 iDfcPendingFlag = 0; |
|
867 BTrace0(BTrace::ECpuUsage, BTrace::EIDFCEnd); |
|
868 iInIDFC = 0; |
|
869 __KTRACE_OPT(KSCHED2,DEBUGPRINT("~")); |
|
870 } |
|
871 |
|
872 |
|
873 /****************************************************************************** |
|
874 * Kernel-side asynchronous request DFCs |
|
875 ******************************************************************************/ |
|
876 |
|
877 EXPORT_C TAsyncRequest::TAsyncRequest(TDfcFn aFunction, TDfcQue* aDfcQ, TInt aPriority) |
|
878 : TDfc(aFunction, this, aDfcQ, aPriority), iCompletionObject(0), iCancel(0), iResult(0) |
|
879 { |
|
880 } |
|
881 |
|
882 |
|
883 EXPORT_C void TAsyncRequest::Send(TDfc* aCompletionDfc) |
|
884 { |
|
885 __NK_ASSERT_DEBUG(!iCompletionObject); |
|
886 iCancel = EFalse; |
|
887 iCompletionObject = (TAny*)((TLinAddr)aCompletionDfc|1); |
|
888 TDfc::Enque(); |
|
889 } |
|
890 |
|
891 |
|
892 EXPORT_C void TAsyncRequest::Send(NFastSemaphore* aCompletionSemaphore) |
|
893 { |
|
894 __NK_ASSERT_DEBUG(!iCompletionObject); |
|
895 iCancel = EFalse; |
|
896 iCompletionObject = aCompletionSemaphore; |
|
897 TDfc::Enque(); |
|
898 } |
|
899 |
|
900 |
|
901 EXPORT_C TInt TAsyncRequest::SendReceive() |
|
902 { |
|
903 NFastSemaphore signal; |
|
904 NKern::FSSetOwner(&signal, 0); |
|
905 Send(&signal); |
|
906 NKern::FSWait(&signal); |
|
907 return iResult; |
|
908 } |
|
909 |
|
910 |
|
911 EXPORT_C void TAsyncRequest::Cancel() |
|
912 { |
|
913 iCancel = ETrue; |
|
914 if(TDfc::Cancel()) |
|
915 Complete(KErrCancel); |
|
916 } |
|
917 |
|
918 |
|
919 EXPORT_C void TAsyncRequest::Complete(TInt aResult) |
|
920 { |
|
921 TLinAddr signal = (TLinAddr)__e32_atomic_swp_ord_ptr(&iCompletionObject, 0); |
|
922 if(signal) |
|
923 { |
|
924 iResult = aResult; |
|
925 if(signal&1) |
|
926 ((TDfc*)(signal&~1))->Enque(); |
|
927 else |
|
928 NKern::FSSignal((NFastSemaphore*)signal); |
|
929 } |
|
930 } |