|
1 // Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of the License "Eclipse Public License v1.0" |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: |
|
14 // e32\nkern\dfcs.cpp |
|
15 // DFCs |
|
16 // |
|
17 // |
|
18 |
|
19 // NThreadBase member data |
|
20 #define __INCLUDE_NTHREADBASE_DEFINES__ |
|
21 |
|
22 // TDfc member data |
|
23 #define __INCLUDE_TDFC_DEFINES__ |
|
24 |
|
25 #include "nk_priv.h" |
|
26 |
|
27 |
|
28 /** Construct an IDFC |
|
29 |
|
30 @param aFunction = function to call |
|
31 @param aPtr = parameter to be passed to function |
|
32 */ |
|
33 EXPORT_C TDfc::TDfc(TDfcFn aFunction, TAny* aPtr) |
|
34 : iPtr(aPtr), iFunction(aFunction), iDfcQ(NULL) |
|
35 { |
|
36 iPriority=0xff; |
|
37 iSpare1=0; |
|
38 iOnFinalQ=FALSE; |
|
39 iQueued=FALSE; |
|
40 } |
|
41 |
|
42 |
|
43 /** Construct a DFC without specifying a DFC queue. |
|
44 The DFC queue must be set before the DFC may be queued. |
|
45 |
|
46 @param aFunction = function to call |
|
47 @param aPtr = parameter to be passed to function |
|
48 @param aPriority = priority of DFC within the queue (0 to 7, where 7 is highest) |
|
49 */ |
|
50 EXPORT_C TDfc::TDfc(TDfcFn aFunction, TAny* aPtr, TInt aPriority) |
|
51 : iPtr(aPtr), iFunction(aFunction), iDfcQ(NULL) |
|
52 { |
|
53 __NK_ASSERT_DEBUG((TUint)aPriority<(TUint)KNumDfcPriorities); |
|
54 iPriority=TUint8(aPriority); |
|
55 iSpare1=0; |
|
56 iOnFinalQ=FALSE; |
|
57 iQueued=FALSE; |
|
58 } |
|
59 |
|
60 |
|
61 /** Construct a DFC specifying a DFC queue. |
|
62 |
|
63 @param aFunction = function to call |
|
64 @param aPtr = parameter to be passed to function |
|
65 @param aDfcQ = pointer to DFC queue which this DFC should use |
|
66 @param aPriority = priority of DFC within the queue (0-7) |
|
67 */ |
|
68 EXPORT_C TDfc::TDfc(TDfcFn aFunction, TAny* aPtr, TDfcQue* aDfcQ, TInt aPriority) |
|
69 : iPtr(aPtr), iFunction(aFunction), iDfcQ(aDfcQ) |
|
70 { |
|
71 __NK_ASSERT_DEBUG((TUint)aPriority<(TUint)KNumDfcPriorities); |
|
72 iPriority=TUint8(aPriority); |
|
73 iSpare1=0; |
|
74 iOnFinalQ=FALSE; |
|
75 iQueued=FALSE; |
|
76 } |
|
77 |
|
78 |
|
79 /** Construct a DFC queue |
|
80 Kern::DfcQInit() should be called on the new DFC queue before it can be used. |
|
81 */ |
|
82 EXPORT_C TDfcQue::TDfcQue() |
|
83 : iThread(NULL) |
|
84 {} |
|
85 |
|
86 |
|
87 #ifndef __DFC_MACHINE_CODED__ |
|
88 |
|
89 /** Queue an IDFC or a DFC from an ISR |
|
90 |
|
91 This function is the only way to queue an IDFC and is the only way to queue |
|
92 a DFC from an ISR. To queue a DFC from an IDFC or a thread either Enque() |
|
93 or DoEnque() should be used. |
|
94 |
|
95 This function does nothing if the IDFC/DFC is already queued. |
|
96 |
|
97 @pre Call only from ISR, IDFC or thread with preemption disabled. |
|
98 @pre Do not call from thread with preemption enabled. |
|
99 @return TRUE if DFC was actually queued by this call |
|
100 FALSE if DFC was already queued on entry so this call did nothing |
|
101 @see TDfc::DoEnque() |
|
102 @see TDfc::Enque() |
|
103 */ |
|
104 EXPORT_C TBool TDfc::Add() |
|
105 { |
|
106 __ASSERT_WITH_MESSAGE_DEBUG( NKern::CurrentContext()!=NKern::EThread || TheScheduler.iKernCSLocked,"Do not call from thread with preemption enabled","TDfc::Add"); |
|
107 __ASSERT_WITH_MESSAGE_DEBUG( IsIDFC() || iDfcQ != NULL, "DFC queue not set", "TDfc::Add"); |
|
108 #ifdef __WINS__ |
|
109 __NK_ASSERT_ALWAYS(Interrupt.InInterrupt() || TheScheduler.iKernCSLocked); |
|
110 #endif |
|
111 return RawAdd(); |
|
112 } |
|
113 |
|
114 |
|
115 /** Queue an IDFC or a DFC from an ISR |
|
116 |
|
117 This function is identical to TDfc::Add() but no checks are performed for correct usage, |
|
118 and it contains no instrumentation code. |
|
119 |
|
120 @return TRUE if DFC was actually queued by this call |
|
121 FALSE if DFC was already queued on entry so this call did nothing |
|
122 @see TDfc::DoEnque() |
|
123 @see TDfc::Enque() |
|
124 @see TDfc::Add() |
|
125 */ |
|
126 EXPORT_C TBool TDfc::RawAdd() |
|
127 { |
|
128 TInt irq=NKern::DisableAllInterrupts(); |
|
129 |
|
130 // make sure DFC not already queued |
|
131 TBool ok = !TestAndSetQueued(); |
|
132 if (ok) |
|
133 { |
|
134 TheScheduler.iDfcs.Add(this); |
|
135 TheScheduler.iDfcPendingFlag=1; |
|
136 } |
|
137 |
|
138 NKern::RestoreInterrupts(irq); |
|
139 return ok; |
|
140 } |
|
141 |
|
142 |
|
143 /** Queue a DFC (not an IDFC) from an IDFC or thread with preemption disabled. |
|
144 |
|
145 This function is the preferred way to queue a DFC from an IDFC. It should not |
|
146 be used to queue an IDFC - use TDfc::Add() for this. |
|
147 |
|
148 This function does nothing if the DFC is already queued. |
|
149 |
|
150 @return TRUE if DFC was actually queued by this call |
|
151 FALSE if DFC was already queued on entry so this call did nothing |
|
152 @pre Call only from IDFC or thread with preemption disabled. |
|
153 @pre Do not call from ISR or thread with preemption enabled. |
|
154 |
|
155 @see TDfc::Add() |
|
156 @see TDfc::Enque() |
|
157 */ |
|
158 EXPORT_C TBool TDfc::DoEnque() |
|
159 { |
|
160 __ASSERT_WITH_MESSAGE_DEBUG( (NKern::CurrentContext()==NKern::EIDFC )||( NKern::CurrentContext()==NKern::EThread && TheScheduler.iKernCSLocked),"Do not call from ISR or thread with preemption enabled","TDfc::DoEnque"); |
|
161 __NK_ASSERT_DEBUG(!IsIDFC()); |
|
162 __ASSERT_WITH_MESSAGE_DEBUG( iDfcQ, "DFC queue not set", "TDfc::Add"); |
|
163 |
|
164 // Check not already queued and then mark queued to prevent ISRs touching this DFC |
|
165 TBool ok = !TestAndSetQueued(); |
|
166 if (ok) |
|
167 DoEnqueFinal(); |
|
168 return ok; |
|
169 } |
|
170 |
|
171 void TDfc::DoEnqueFinal() |
|
172 // |
|
173 // Add a DFC to its final queue. Assumes DFC not currently queued. |
|
174 // Enter and return with kernel locked. |
|
175 // |
|
176 { |
|
177 iOnFinalQ=TRUE; |
|
178 iDfcQ->Add(this); |
|
179 NThreadBase* pT=iDfcQ->iThread; |
|
180 if (pT->iNState==NThreadBase::EWaitDfc) |
|
181 pT->CheckSuspendThenReady(); |
|
182 } |
|
183 |
|
184 void TDfcQue::ThreadFunction(TAny* aDfcQ) |
|
185 { |
|
186 TDfcQue& q=*(TDfcQue*)aDfcQ; |
|
187 NThreadBase* pC=TheScheduler.iCurrentThread; |
|
188 FOREVER |
|
189 { |
|
190 NKern::Lock(); |
|
191 if (q.IsEmpty()) |
|
192 { |
|
193 pC->iNState=NThreadBase::EWaitDfc; |
|
194 TheScheduler.Remove(pC); |
|
195 RescheduleNeeded(); |
|
196 NKern::Unlock(); |
|
197 } |
|
198 else |
|
199 { |
|
200 TDfc& d=*q.First(); |
|
201 q.Remove(&d); |
|
202 d.iOnFinalQ=FALSE; |
|
203 d.iQueued=FALSE; |
|
204 NKern::Unlock(); |
|
205 (*d.iFunction)(d.iPtr); |
|
206 } |
|
207 } |
|
208 } |
|
209 |
|
210 |
|
211 /** Cancels an IDFC or DFC. |
|
212 |
|
213 This function does nothing if the IDFC or DFC is not queued. |
|
214 |
|
215 @return TRUE if the DFC was actually dequeued by this call. In that case |
|
216 it is guaranteed that the DFC will not execute until it is |
|
217 queued again. |
|
218 FALSE if the DFC was not queued on entry to the call, or was in |
|
219 the process of being executed or cancelled. In this case |
|
220 it is possible that the DFC executes after this call |
|
221 returns. |
|
222 |
|
223 @post However in either case it is safe to delete the DFC object on |
|
224 return from this call provided only that the DFC function does not |
|
225 refer to the DFC object itself. |
|
226 |
|
227 @pre IDFC or thread context. Do not call from ISRs. |
|
228 |
|
229 @pre If the DFC function accesses the DFC object itself, the user must ensure that |
|
230 Cancel() cannot be called while the DFC function is running. |
|
231 */ |
|
232 EXPORT_C TBool TDfc::Cancel() |
|
233 { |
|
234 CHECK_PRECONDITIONS(MASK_NOT_ISR,"TDfc::Cancel"); |
|
235 NKern::Lock(); |
|
236 TBool ret = iQueued; |
|
237 if (iQueued) // ISRs can't affect this test since they can't de-queue a DFC or IDFC |
|
238 { |
|
239 if (!iOnFinalQ) // OK to check this with interrupts enabled since interrupts can't change it |
|
240 { |
|
241 // Must disable interrupts to protect the pending queue |
|
242 TInt irq=NKern::DisableAllInterrupts(); |
|
243 SDblQueLink::Deque(); |
|
244 NKern::RestoreInterrupts(irq); |
|
245 } |
|
246 else |
|
247 { |
|
248 // Final queues can't be modified by interrupts |
|
249 iDfcQ->Remove(this); |
|
250 iOnFinalQ=FALSE; |
|
251 } |
|
252 iQueued=FALSE; // must be done last |
|
253 } |
|
254 NKern::Unlock(); |
|
255 return ret; |
|
256 } |
|
257 #endif |
|
258 |
|
259 /** Queues a DFC (not an IDFC) from a thread. |
|
260 |
|
261 Does nothing if DFC is already queued. |
|
262 |
|
263 NOTE: Although this can be called in an IDFC context, it is more efficient to call |
|
264 DoEnque() in this case. |
|
265 |
|
266 @return TRUE if DFC was actually queued by this call |
|
267 FALSE if DFC was already queued on entry so this call did nothing |
|
268 @pre Call either in a thread or an IDFC context. |
|
269 @pre Do not call from an ISR. |
|
270 */ |
|
271 EXPORT_C TBool TDfc::Enque() |
|
272 { |
|
273 CHECK_PRECONDITIONS(MASK_NOT_ISR,"TDfc::Enque()"); |
|
274 NKern::Lock(); |
|
275 TBool ret = DoEnque(); |
|
276 NKern::Unlock(); |
|
277 return ret; |
|
278 } |
|
279 |
|
280 |
|
281 /** Queue a DFC (not an IDFC) from a thread and also signals a fast mutex. |
|
282 |
|
283 The DFC is unaffected if it is already queued. |
|
284 |
|
285 The fast mutex is signalled before preemption is reenabled to avoid potential |
|
286 scheduler thrashing. |
|
287 |
|
288 @param aMutex = pointer to fast mutex to be signalled; |
|
289 NULL means system lock mutex. |
|
290 @return TRUE if DFC was actually queued by this call |
|
291 FALSE if DFC was already queued on entry so this call did nothing |
|
292 @pre Call in a thread context. |
|
293 @pre Kernel must be unlocked. |
|
294 @pre Do not call from an ISR. |
|
295 @pre Do not call from an IDFC. |
|
296 */ |
|
297 EXPORT_C TBool TDfc::Enque(NFastMutex* aMutex) |
|
298 { |
|
299 CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"TDfc::Enque(NFastMutex* aMutex)"); |
|
300 if (!aMutex) |
|
301 aMutex=&TheScheduler.iLock; |
|
302 NKern::Lock(); |
|
303 TBool ret = DoEnque(); |
|
304 aMutex->Signal(); |
|
305 NKern::Unlock(); |
|
306 return ret; |
|
307 } |
|
308 |
|
309 |
|
310 /** Returns a pointer to the thread on which a DFC runs |
|
311 |
|
312 @return If this is a DFC and the DFC queue has been set, a pointer to the |
|
313 thread which will run the DFC. |
|
314 NULL if this is an IDFC or the DFC queue has not been set. |
|
315 */ |
|
316 EXPORT_C NThreadBase* TDfc::Thread() |
|
317 { |
|
318 if (IsIDFC()) |
|
319 return 0; |
|
320 return iDfcQ ? iDfcQ->iThread : 0; |
|
321 } |
|
322 |
|
323 |
|
324 /****************************************************************************** |
|
325 * Idle notification |
|
326 ******************************************************************************/ |
|
327 |
|
328 /** Register an IDFC or a DFC to be called when the system goes idle |
|
329 |
|
330 This function does nothing if the IDFC/DFC is already queued. |
|
331 |
|
332 @return TRUE if DFC was actually queued by this call |
|
333 FALSE if DFC was already queued on entry so this call did nothing |
|
334 */ |
|
335 EXPORT_C TBool TDfc::QueueOnIdle() |
|
336 { |
|
337 TInt irq=NKern::DisableAllInterrupts(); |
|
338 |
|
339 // make sure DFC not already queued |
|
340 TBool ok = !TestAndSetQueued(); |
|
341 if (ok) |
|
342 TheScheduler.iIdleDfcs.Add(this); |
|
343 |
|
344 NKern::RestoreInterrupts(irq); |
|
345 return ok; |
|
346 } |
|
347 |
|
348 |
|
349 TUint32 NKern::IdleGenerationCount() |
|
350 { |
|
351 return TheScheduler.iIdleGenerationCount; |
|
352 } |
|
353 |
|
354 |
|
355 void NKern::Idle() |
|
356 { |
|
357 TInt irq = NKern::DisableAllInterrupts(); |
|
358 #ifdef _DEBUG |
|
359 if (!TheScheduler.iIdleDfcs.IsEmpty() && TheScheduler.iDelayedQ.IsEmpty()) |
|
360 #else |
|
361 if (!TheScheduler.iIdleDfcs.IsEmpty()) |
|
362 #endif |
|
363 { |
|
364 ++TheScheduler.iIdleGenerationCount; |
|
365 TheScheduler.iDfcs.MoveFrom(&TheScheduler.iIdleDfcs); |
|
366 TheScheduler.iDfcPendingFlag=1; |
|
367 NKern::RestoreInterrupts(irq); |
|
368 return; |
|
369 } |
|
370 NKern::RestoreInterrupts(irq); |
|
371 NKIdle(0); |
|
372 } |
|
373 |
|
374 |
|
375 /****************************************************************************** |
|
376 * Scheduler IDFC/DFC Processing |
|
377 ******************************************************************************/ |
|
378 |
|
379 #ifndef __SCHEDULER_MACHINE_CODED__ |
|
380 void TScheduler::QueueDfcs() |
|
381 // |
|
382 // Enter with interrupts off and kernel locked |
|
383 // Leave with interrupts off and kernel locked |
|
384 // |
|
385 { |
|
386 iInIDFC = TRUE; |
|
387 BTrace0(BTrace::ECpuUsage,BTrace::EIDFCStart); |
|
388 FOREVER |
|
389 { |
|
390 // remove from pending queue with interrupts disabled |
|
391 TDfc* d=(TDfc*)iDfcs.GetFirst(); |
|
392 if (!d) |
|
393 break; |
|
394 NKern::EnableAllInterrupts(); |
|
395 if (d->IsIDFC()) |
|
396 { |
|
397 d->iQueued=FALSE; |
|
398 (*d->iFunction)(d->iPtr); |
|
399 } |
|
400 else |
|
401 d->DoEnqueFinal(); |
|
402 NKern::DisableAllInterrupts(); |
|
403 } |
|
404 iDfcPendingFlag = FALSE; |
|
405 BTrace0(BTrace::ECpuUsage,BTrace::EIDFCEnd); |
|
406 iInIDFC = FALSE; |
|
407 } |
|
408 #endif |
|
409 |
|
410 |
|
411 /****************************************************************************** |
|
412 * Kernel-side asynchronous request DFCs |
|
413 ******************************************************************************/ |
|
414 |
|
415 EXPORT_C TAsyncRequest::TAsyncRequest(TDfcFn aFunction, TDfcQue* aDfcQ, TInt aPriority) |
|
416 : TDfc(aFunction, this, aDfcQ, aPriority), iCompletionObject(0), iCancel(0), iResult(0) |
|
417 { |
|
418 } |
|
419 |
|
420 |
|
421 EXPORT_C void TAsyncRequest::Send(TDfc* aCompletionDfc) |
|
422 { |
|
423 __NK_ASSERT_DEBUG(!iCompletionObject); |
|
424 iCancel = EFalse; |
|
425 iCompletionObject = (TAny*)((TLinAddr)aCompletionDfc|1); |
|
426 TDfc::Enque(); |
|
427 } |
|
428 |
|
429 |
|
430 EXPORT_C void TAsyncRequest::Send(NFastSemaphore* aCompletionSemaphore) |
|
431 { |
|
432 __NK_ASSERT_DEBUG(!iCompletionObject); |
|
433 iCancel = EFalse; |
|
434 iCompletionObject = aCompletionSemaphore; |
|
435 TDfc::Enque(); |
|
436 } |
|
437 |
|
438 |
|
439 EXPORT_C TInt TAsyncRequest::SendReceive() |
|
440 { |
|
441 NFastSemaphore signal; |
|
442 NKern::FSSetOwner(&signal, 0); |
|
443 Send(&signal); |
|
444 NKern::FSWait(&signal); |
|
445 return iResult; |
|
446 } |
|
447 |
|
448 |
|
449 EXPORT_C void TAsyncRequest::Cancel() |
|
450 { |
|
451 iCancel = ETrue; |
|
452 if(TDfc::Cancel()) |
|
453 Complete(KErrCancel); |
|
454 } |
|
455 |
|
456 |
|
457 EXPORT_C void TAsyncRequest::Complete(TInt aResult) |
|
458 { |
|
459 TLinAddr signal = (TLinAddr)__e32_atomic_swp_ord_ptr(&iCompletionObject, 0); |
|
460 if(signal) |
|
461 { |
|
462 iResult = aResult; |
|
463 if(signal&1) |
|
464 ((TDfc*)(signal&~1))->Enque(); |
|
465 else |
|
466 NKern::FSSignal((NFastSemaphore*)signal); |
|
467 } |
|
468 } |