0
|
1 |
// Copyright (c) 2005-2009 Nokia Corporation and/or its subsidiary(-ies).
|
|
2 |
// All rights reserved.
|
|
3 |
// This component and the accompanying materials are made available
|
|
4 |
// under the terms of the License "Eclipse Public License v1.0"
|
|
5 |
// which accompanies this distribution, and is available
|
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
|
|
7 |
//
|
|
8 |
// Initial Contributors:
|
|
9 |
// Nokia Corporation - initial contribution.
|
|
10 |
//
|
|
11 |
// Contributors:
|
|
12 |
//
|
|
13 |
// Description:
|
|
14 |
//
|
|
15 |
|
|
16 |
|
|
17 |
|
|
18 |
#include <kernel/emi.h>
|
|
19 |
#include <kernel/kern_priv.h>
|
|
20 |
|
|
21 |
#ifdef __EMI_SUPPORT__
|
|
22 |
#ifdef __SMP__
|
|
23 |
#error EMI incompatible with SMP
|
|
24 |
#endif
|
|
25 |
|
|
26 |
// For use with DThread::iEmiFlags
|
|
27 |
// const TUint8 KThdCreated = 0;
|
|
28 |
const TUint8 KThdStarting = 1;
|
|
29 |
const TUint8 KThdRunning = 2;
|
|
30 |
const TUint8 KThdExiting = 3;
|
|
31 |
const TUint8 KThdEMIStateMask = 3;
|
|
32 |
const TUint8 KThdUseOldExit = 4;
|
|
33 |
const TUint8 KThdReStart = 8;
|
|
34 |
|
|
35 |
// Static inishalization
|
|
36 |
TThreadExitMonitor EMI::ExitMonitor = NULL;
|
|
37 |
TThreadExitMonitor EMI::OldExitMonitor = NULL;
|
|
38 |
TThreadStartMonitor EMI::StartMonitor = NULL;
|
|
39 |
TInt EMI::Generation = 0;
|
|
40 |
NFastSemaphore* EMI::ExitSemaphore = NULL;
|
|
41 |
NTimer* EMI::AfterIdleTimer = NULL;
|
|
42 |
EMI::TAfterIdleState EMI::AfterIdleState = ENone;
|
|
43 |
TInt EMI::AfterIdleDelay = 0;
|
|
44 |
|
|
45 |
|
|
46 |
|
|
47 |
|
|
48 |
/**
|
|
49 |
@internalComponent
|
|
50 |
*/
|
|
51 |
TInt EMI::Init()
|
|
52 |
{
|
|
53 |
K::TheNullThread->iEmiFlags= KThdRunning;
|
|
54 |
|
|
55 |
ExitSemaphore = new NFastSemaphore();
|
|
56 |
if (!ExitSemaphore)
|
|
57 |
return KErrNoMemory;
|
|
58 |
|
|
59 |
TheScheduler.iSigma = new NThread();
|
|
60 |
return (TheScheduler.iSigma == NULL) ? KErrNoMemory : KErrNone;
|
|
61 |
}
|
|
62 |
|
|
63 |
|
|
64 |
|
|
65 |
|
|
66 |
//EXPORT_C TInt EMI::TaskEventLogging(TBool aLogging, TInt aLogSize, TThreadStartMonitor aStartMonitor, TThreadExitMonitor aExitMonitor)
|
|
67 |
/**
|
|
68 |
Starts or stops the scheduling event logging system.
|
|
69 |
|
|
70 |
The function can be called repeatedly to change the configuration of the scheduling event
|
|
71 |
logging system at any time.
|
|
72 |
|
|
73 |
It take pointers to two functions, known as the 'start monitor' and
|
|
74 |
the 'exit monitor'.
|
|
75 |
|
|
76 |
The 'start monitor' is called when a Symbian OS thread, (a DThread), starts
|
|
77 |
executing, i.e. after the thread is initially resumed. It should be used
|
|
78 |
to initialise any data associated with the thread, and to set the thread
|
|
79 |
as loggable, through a call to SetThreadLoggable(), if scheduling event logging is required.
|
|
80 |
|
|
81 |
The 'exit monitor' is called when, and if, a Symbian OS thread, (a DThread) exits.
|
|
82 |
It should be used to free any resources associated with the thread.
|
|
83 |
|
|
84 |
The initial state of the 'start monitor' and the 'exit monitor' is NULL.
|
|
85 |
Changing from this joint initial state, by setting one or both, causes all threads
|
|
86 |
to be initialised to a non-loggable state.
|
|
87 |
|
|
88 |
TaskEventLogging() should be protected from possible parallel
|
|
89 |
invocation, as it does not act atomically.
|
|
90 |
|
|
91 |
@param aLogging
|
|
92 |
|
|
93 |
If true, scheduling event logging is enabled. If false, scheduling event logging is disabled,
|
|
94 |
but non-scheduling events can still be logged. The initial state is false.
|
|
95 |
|
|
96 |
|
|
97 |
@param aLogSize
|
|
98 |
|
|
99 |
The number of entries in the internal event log. If the event buffer is
|
|
100 |
filled up, oldest events are deleted to make way for new events and the
|
|
101 |
"event lost" flag is set on the event following the deleted event. A
|
|
102 |
value of 0 indicates no event log and no event logging is possible in this
|
|
103 |
state. If an event log already exists, and this value is a different
|
|
104 |
size, then all events in the old log will be lost.
|
|
105 |
|
|
106 |
Changing the event size may result in a memory allocation. On failure,
|
|
107 |
this function returns KErrNoMemory, with logging turned off, and no event
|
|
108 |
buffer. Monitors are left as they where before the call.
|
|
109 |
|
|
110 |
|
|
111 |
@param aStartMonitor
|
|
112 |
|
|
113 |
A pointer to the function to be called when a new DThread is started. A value of
|
|
114 |
NULL means that no 'start monitor' function will be called.
|
|
115 |
|
|
116 |
Once this function has been set, it is called for each DThread that
|
|
117 |
already exists. This includes the Idle thread, the Supervisor thread,
|
|
118 |
and DFC threads. The function is also called for the Sigma thread, which
|
|
119 |
accounts for time spent in non-loggable scheduled entities. Such entities
|
|
120 |
would include DThreads that are not fully initialised, or are being
|
|
121 |
deleted.
|
|
122 |
|
|
123 |
On return, the 'start monitor' function should return 0 for success or
|
|
124 |
a negative value to indicate failure. Note that a failure here may result
|
|
125 |
in the creation of the thread failing. Threads that are already running remain running.
|
|
126 |
|
|
127 |
@param aExitMonitor
|
|
128 |
|
|
129 |
A pointer to the function to be called when (and if) a DThread
|
|
130 |
exits. A value of NULL means that no 'exit monitor' function will be called.
|
|
131 |
|
|
132 |
Before the exit monitor is called, the monitoring of the thread is
|
|
133 |
disabled, i.e. it is set to non-loggable, and all events for the thread are deleted
|
|
134 |
from the event log. In reality, the deleted event records are altered so that
|
|
135 |
they no longer make any reference to the deleted thread,
|
|
136 |
but instead refer to the Sigma thread. (This gives rise to the possibility of
|
|
137 |
seeing the Sigma thread switching to the Sigma thread) Note that a
|
|
138 |
non-visible thread will still trigger this routine.
|
|
139 |
|
|
140 |
@pre Calling thread must be in a critical section.
|
|
141 |
@pre Interrupts must be enabled.
|
|
142 |
@pre Kernel must be unlocked.
|
|
143 |
@pre No fast mutex can be held.
|
|
144 |
@pre Call in a thread context.
|
|
145 |
@pre Can be used in a device driver.
|
|
146 |
|
|
147 |
@return KErrNone on success; KErrNoMemory on allocation failure;
|
|
148 |
KErrNotSupported if EMI is not compiled into the kernel.
|
|
149 |
*/
|
|
150 |
EXPORT_C TInt EMI::TaskEventLogging(TBool aLogging, TInt aLogSize, TThreadStartMonitor aStartMonitor, TThreadExitMonitor aExitMonitor)
|
|
151 |
{
|
|
152 |
CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"EMI::TaskEventLogging");
|
|
153 |
TInt error = TaskEventLogging(aLogging, aLogSize);
|
|
154 |
if (error == KErrNone)
|
|
155 |
ThreadMonitors(aStartMonitor, aExitMonitor);
|
|
156 |
return error;
|
|
157 |
}
|
|
158 |
|
|
159 |
|
|
160 |
|
|
161 |
|
|
162 |
/**
|
|
163 |
This is equivalent to calling the 4 parameter version of TaskEventLogging(),
|
|
164 |
but with the last two parameters remaining unchanged.
|
|
165 |
|
|
166 |
@param aLogging If true, scheduling event logging is enabled. If false,
|
|
167 |
scheduling event logging is disabled, but non-scheduling events
|
|
168 |
can still be logged. The initial state is false.
|
|
169 |
|
|
170 |
@param aLogSize The number of entries in the internal event log. If the event buffer is
|
|
171 |
filled up, oldest events are deleted to make way for new events and the
|
|
172 |
"event lost" flag is set on the event following the deleted event. A
|
|
173 |
value of 0 indicates no event log and no event logging is possible in this
|
|
174 |
state. If an event log already exists, and this value is a different
|
|
175 |
size, then all events in the old log will be lost.
|
|
176 |
|
|
177 |
Changing the event size may result in a memory allocation. On failure,
|
|
178 |
this function returns KErrNoMemory, with logging turned off, and no event
|
|
179 |
buffer. Monitors are left as they where before the call.
|
|
180 |
|
|
181 |
@return KErrNone on success; KErrNoMemory on allocation failure;
|
|
182 |
KErrNotSupported if EMI is not compiled into the kernel.
|
|
183 |
|
|
184 |
@see TaskEventLogging()
|
|
185 |
*/
|
|
186 |
EXPORT_C TInt EMI::TaskEventLogging(TBool aLogging, TInt aLogSize)
|
|
187 |
{
|
|
188 |
|
|
189 |
// Alloc/DeAlloc event log.
|
|
190 |
// ------------------------
|
|
191 |
// The event log is a ring buffer where iBufferStart is the first valid
|
|
192 |
// record, and iBufferEnd is the last. There is a floating empty slot.
|
|
193 |
// To allow for this, one more record the aLogSize is allocated.
|
|
194 |
// iBufferTail = next record to be read from.
|
|
195 |
// iBufferHead = next record to be written to.
|
|
196 |
|
|
197 |
TInt oldSize;
|
|
198 |
TAny* eventBuffer;
|
|
199 |
|
|
200 |
if (TheScheduler.iBufferStart)
|
|
201 |
oldSize =(((TInt) TheScheduler.iBufferEnd -
|
|
202 |
(TInt) TheScheduler.iBufferStart) / sizeof(TTaskEventRecord));
|
|
203 |
else
|
|
204 |
oldSize = 0;
|
|
205 |
|
|
206 |
if (aLogSize != oldSize)
|
|
207 |
{
|
|
208 |
// Disable logging
|
|
209 |
TheScheduler.iLogging = EFalse;
|
|
210 |
// Disable buffer use
|
|
211 |
eventBuffer = TheScheduler.iBufferStart;
|
|
212 |
NKern::Lock();
|
|
213 |
TheScheduler.iBufferStart = NULL;
|
|
214 |
Generation++;
|
|
215 |
NKern::Unlock();
|
|
216 |
|
|
217 |
if (oldSize > 0)
|
|
218 |
Kern::Free(eventBuffer);
|
|
219 |
|
|
220 |
if (aLogSize == 0)
|
|
221 |
{
|
|
222 |
aLogging = EFalse;
|
|
223 |
|
|
224 |
TheScheduler.iBufferEnd = NULL;
|
|
225 |
eventBuffer = NULL;
|
|
226 |
}
|
|
227 |
else if (aLogSize > 0)
|
|
228 |
{
|
|
229 |
aLogSize *= sizeof(TTaskEventRecord);
|
|
230 |
eventBuffer = Kern::Alloc(aLogSize + sizeof(TTaskEventRecord)); // +1 empty slot
|
|
231 |
if (eventBuffer != NULL)
|
|
232 |
TheScheduler.iBufferEnd = (TAny*) ((TInt) eventBuffer + aLogSize);
|
|
233 |
else
|
|
234 |
{
|
|
235 |
TheScheduler.iBufferEnd = NULL;
|
|
236 |
TheScheduler.iLogging = EFalse;
|
|
237 |
return KErrNoMemory;
|
|
238 |
}
|
|
239 |
TheScheduler.iBufferTail = eventBuffer;
|
|
240 |
TheScheduler.iBufferHead = eventBuffer;
|
|
241 |
}
|
|
242 |
else
|
|
243 |
{
|
|
244 |
K::Fault(K::EBadLogSize);
|
|
245 |
}
|
|
246 |
|
|
247 |
//Re-enable buffer use;
|
|
248 |
TheScheduler.iBufferStart = eventBuffer;
|
|
249 |
}
|
|
250 |
|
|
251 |
// Set Logging
|
|
252 |
// -----------
|
|
253 |
|
|
254 |
TheScheduler.iLogging = aLogging;
|
|
255 |
|
|
256 |
return KErrNone;
|
|
257 |
}
|
|
258 |
|
|
259 |
|
|
260 |
/**
|
|
261 |
This is equivalent to calling the 4 parameter version of TaskEventLogging(),
|
|
262 |
but with the first two parameters remaining unchanged.
|
|
263 |
|
|
264 |
@param aStartMonitor A pointer to the new 'start monitor' function.
|
|
265 |
|
|
266 |
@param aExitMonitor A pointer to the new 'exit monitor' function.
|
|
267 |
|
|
268 |
@see TaskEventLogging()
|
|
269 |
|
|
270 |
@pre Calling thread must be in a critical section.
|
|
271 |
@pre No fast mutex can be held.
|
|
272 |
@pre ernel must be unlocked
|
|
273 |
@pre interrupts enabled
|
|
274 |
@pre Call in a thread context
|
|
275 |
*/
|
|
276 |
EXPORT_C void EMI::ThreadMonitors(TThreadStartMonitor aStartMonitor, TThreadExitMonitor aExitMonitor)
|
|
277 |
{
|
|
278 |
CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"EMI::ThreadMonitors");
|
|
279 |
TInt threadNo;
|
|
280 |
|
|
281 |
// Clear loggable
|
|
282 |
// --------------
|
|
283 |
|
|
284 |
DObjectCon & threadList = *(K::Containers[EThread]);
|
|
285 |
|
|
286 |
// if monitors changed from initial state,
|
|
287 |
// iterate thread list unsetting loggable flag.
|
|
288 |
if ((StartMonitor == NULL) && (ExitMonitor == NULL)
|
|
289 |
&& ((aStartMonitor != NULL) || (aExitMonitor != NULL)))
|
|
290 |
{
|
|
291 |
threadList.Wait();
|
|
292 |
|
|
293 |
for (TInt i = threadList.Count()-1; i >=0; i--)
|
|
294 |
((DThread*) threadList[i])->iNThread.ModifyAttributes(KThreadAttLoggable, 0);
|
|
295 |
|
|
296 |
threadList.Signal();
|
|
297 |
}
|
|
298 |
|
|
299 |
// Set Monitors
|
|
300 |
// ------------
|
|
301 |
|
|
302 |
if ((StartMonitor == aStartMonitor) || (aStartMonitor == NULL))
|
|
303 |
{
|
|
304 |
StartMonitor = aStartMonitor;
|
|
305 |
ExitMonitor = aExitMonitor;
|
|
306 |
if ((aStartMonitor == NULL))
|
|
307 |
{
|
|
308 |
threadList.Wait();
|
|
309 |
threadNo = threadList.Count();
|
|
310 |
while (threadNo--)
|
|
311 |
((DThread*) threadList[threadNo])->iEmiStartMonitor=NULL;
|
|
312 |
threadList.Signal();
|
|
313 |
}
|
|
314 |
}
|
|
315 |
else
|
|
316 |
{
|
|
317 |
TUint currentId = 0;
|
|
318 |
TInt count;
|
|
319 |
DThread* theThread;
|
|
320 |
|
|
321 |
if (ExitMonitor == aExitMonitor)
|
|
322 |
{
|
|
323 |
StartMonitor = aStartMonitor;
|
|
324 |
}
|
|
325 |
else
|
|
326 |
{
|
|
327 |
// Newly created theads will use new monitors
|
|
328 |
// but current threads can use old monitor until
|
|
329 |
// new start monitor is called.
|
|
330 |
|
|
331 |
threadList.Wait();
|
|
332 |
|
|
333 |
NKern::Lock();
|
|
334 |
OldExitMonitor = ExitMonitor;
|
|
335 |
for (threadNo = threadList.Count()-1; threadNo >=0 ; threadNo--)
|
|
336 |
{
|
|
337 |
((DThread*) threadList[threadNo])->iEmiFlags |= KThdUseOldExit;
|
|
338 |
NKern::PreemptionPoint();
|
|
339 |
}
|
|
340 |
StartMonitor = aStartMonitor;
|
|
341 |
ExitMonitor = aExitMonitor;
|
|
342 |
NKern::Unlock();
|
|
343 |
|
|
344 |
// Unlocking and relocking acts as Premption point.
|
|
345 |
threadList.Signal();
|
|
346 |
}
|
|
347 |
|
|
348 |
// The Sigma thread;
|
|
349 |
StartMonitor(TheScheduler.iSigma);
|
|
350 |
|
|
351 |
threadList.Wait();
|
|
352 |
|
|
353 |
// Iterate thead list, calling start monitor.
|
|
354 |
theThread = (DThread *) threadList[0];
|
|
355 |
currentId = theThread->iId;
|
|
356 |
threadNo = -1;
|
|
357 |
FOREVER
|
|
358 |
{
|
|
359 |
if (theThread->iId != currentId)
|
|
360 |
{
|
|
361 |
// from start find next thread with id>current
|
|
362 |
count = threadList.Count();
|
|
363 |
for (threadNo = 0; threadNo < count; threadNo++)
|
|
364 |
{
|
|
365 |
theThread = (DThread*) threadList[threadNo];
|
|
366 |
if (theThread->iId > currentId)
|
|
367 |
break;
|
|
368 |
}
|
|
369 |
|
|
370 |
if (theThread->iId > currentId) // if found,
|
|
371 |
currentId = theThread->iId;
|
|
372 |
else
|
|
373 |
break;
|
|
374 |
|
|
375 |
}
|
|
376 |
else
|
|
377 |
{
|
|
378 |
// move onto next thread
|
|
379 |
threadNo++;
|
|
380 |
if (threadNo >= threadList.Count())
|
|
381 |
break;
|
|
382 |
else
|
|
383 |
{
|
|
384 |
theThread = (DThread*) threadList[threadNo];
|
|
385 |
currentId = theThread->iId;
|
|
386 |
}
|
|
387 |
|
|
388 |
}
|
|
389 |
|
|
390 |
NKern::Lock();
|
|
391 |
if (theThread->iEmiStartMonitor == (TAny*) StartMonitor)
|
|
392 |
{
|
|
393 |
theThread->iEmiFlags &= KThdEMIStateMask; // Clear KThdUseOldExit
|
|
394 |
NKern::Unlock();
|
|
395 |
}
|
|
396 |
else
|
|
397 |
{
|
|
398 |
if ((theThread->iEmiFlags & KThdEMIStateMask) != KThdRunning)
|
|
399 |
{
|
|
400 |
if ((theThread->iEmiFlags & KThdEMIStateMask) == KThdStarting) // Old start running
|
|
401 |
theThread->iEmiStartMonitor = (TAny*) StartMonitor;
|
|
402 |
|
|
403 |
theThread->iEmiFlags &= KThdEMIStateMask; // Clear KThdUseOldExit
|
|
404 |
NKern::Unlock();
|
|
405 |
}
|
|
406 |
else
|
|
407 |
{
|
|
408 |
// Use new exit monitor, moniter running.
|
|
409 |
theThread->iEmiFlags = KThdRunning | KThdReStart; // & !KThdUseOldExit
|
|
410 |
NKern::Unlock();
|
|
411 |
threadList.Signal();
|
|
412 |
StartMonitor(&(theThread->iNThread));
|
|
413 |
theThread->iEmiStartMonitor = (TAny*) StartMonitor;
|
|
414 |
NKern::Lock();
|
|
415 |
if ((theThread->iEmiFlags & KThdEMIStateMask) == KThdExiting)
|
|
416 |
{ // It must be waiting for us to finish.
|
|
417 |
ExitSemaphore->Signal();
|
|
418 |
theThread->iEmiFlags = KThdExiting; // Clear flags
|
|
419 |
}
|
|
420 |
else
|
|
421 |
theThread->iEmiFlags = KThdRunning; // Clear flags
|
|
422 |
NKern::Unlock();
|
|
423 |
threadList.Wait();
|
|
424 |
|
|
425 |
//Resync theadNo with theThread, after unlocked period
|
|
426 |
if (threadNo >= threadList.Count()) //count could drop
|
|
427 |
threadNo = 0;
|
|
428 |
theThread = (DThread*) threadList[threadNo];
|
|
429 |
}
|
|
430 |
}
|
|
431 |
}
|
|
432 |
|
|
433 |
OldExitMonitor = NULL; // Transfer compleated.
|
|
434 |
threadList.Signal();
|
|
435 |
}
|
|
436 |
}
|
|
437 |
|
|
438 |
|
|
439 |
/**
|
|
440 |
@internalComponent
|
|
441 |
*/
|
|
442 |
void EMI::CallStartHandler(DThread * aDThread)
|
|
443 |
{
|
|
444 |
TThreadStartMonitor startMonitor;
|
|
445 |
TInt result;
|
|
446 |
|
|
447 |
NKern::ThreadEnterCS();
|
|
448 |
NKern::Lock();
|
|
449 |
aDThread->iEmiStartMonitor = (TAny*) StartMonitor;
|
|
450 |
|
|
451 |
if (OldExitMonitor == ExitMonitor)
|
|
452 |
aDThread->iEmiFlags |= KThdStarting;
|
|
453 |
else
|
|
454 |
aDThread->iEmiFlags = KThdStarting;
|
|
455 |
|
|
456 |
do
|
|
457 |
{
|
|
458 |
startMonitor = (TThreadStartMonitor) aDThread->iEmiStartMonitor;
|
|
459 |
NKern::Unlock();
|
|
460 |
|
|
461 |
if (startMonitor != NULL)
|
|
462 |
{
|
|
463 |
result = startMonitor (&(aDThread->iNThread));
|
|
464 |
if (result < 0)
|
|
465 |
{
|
|
466 |
NKern::ThreadLeaveCS();
|
|
467 |
Kern::Exit(result);
|
|
468 |
}
|
|
469 |
}
|
|
470 |
|
|
471 |
NKern::Lock();
|
|
472 |
}
|
|
473 |
while ((TAny*) startMonitor != aDThread->iEmiStartMonitor);
|
|
474 |
|
|
475 |
aDThread->iEmiFlags &= KThdUseOldExit; // Flag must be preserved as could
|
|
476 |
aDThread->iEmiFlags |= KThdRunning; // be about to run diffrent start monitor.
|
|
477 |
// if loop above executed, flag is unset.
|
|
478 |
|
|
479 |
NKern::Unlock();
|
|
480 |
NKern::ThreadLeaveCS();
|
|
481 |
}
|
|
482 |
|
|
483 |
|
|
484 |
/**
|
|
485 |
@internalComponent
|
|
486 |
*/
|
|
487 |
void EMI::CallExitHandler(DThread* aDThread)
|
|
488 |
{
|
|
489 |
TTaskEventRecord* rec;
|
|
490 |
TInt currentGeneration;
|
|
491 |
TAny* nThread = (TAny*) &(aDThread->iNThread);
|
|
492 |
TThreadExitMonitor exitMonitor;
|
|
493 |
|
|
494 |
// Set thread non-loggable
|
|
495 |
((NThread*) nThread)->ModifyAttributes(KThreadAttLoggable, 0);
|
|
496 |
|
|
497 |
// Clear event log
|
|
498 |
NKern::Lock();
|
|
499 |
if (TheScheduler.iBufferStart != NULL)
|
|
500 |
{
|
|
501 |
currentGeneration = Generation;
|
|
502 |
rec = (TTaskEventRecord*) TheScheduler.iBufferHead;
|
|
503 |
|
|
504 |
while ((rec != TheScheduler.iBufferTail) && (currentGeneration == Generation))
|
|
505 |
{
|
|
506 |
rec--;
|
|
507 |
if (rec < TheScheduler.iBufferStart)
|
|
508 |
rec = (TTaskEventRecord*) TheScheduler.iBufferEnd;
|
|
509 |
|
|
510 |
if (rec->iPrevious == nThread)
|
|
511 |
rec->iPrevious = TheScheduler.iSigma;
|
|
512 |
if (rec->iNext == nThread)
|
|
513 |
rec->iNext = TheScheduler.iSigma;
|
|
514 |
|
|
515 |
NKern::PreemptionPoint();
|
|
516 |
}
|
|
517 |
}
|
|
518 |
|
|
519 |
if ((aDThread->iEmiFlags & KThdEMIStateMask) != KThdRunning)
|
|
520 |
{
|
|
521 |
// Thread died before it started running - dont call VEMs function.
|
|
522 |
aDThread->iEmiFlags |= KThdExiting;
|
|
523 |
NKern::Unlock();
|
|
524 |
}
|
|
525 |
else
|
|
526 |
{
|
|
527 |
|
|
528 |
aDThread->iEmiFlags |= KThdExiting; // No further calls to Start monitor.
|
|
529 |
|
|
530 |
|
|
531 |
__ASSERT_COMPILE(KThdExiting==KThdEMIStateMask);
|
|
532 |
|
|
533 |
if ((aDThread->iEmiFlags & KThdUseOldExit) != 0)
|
|
534 |
exitMonitor = OldExitMonitor;
|
|
535 |
else
|
|
536 |
exitMonitor = ExitMonitor;
|
|
537 |
|
|
538 |
if ((aDThread->iEmiFlags & KThdReStart) != 0)
|
|
539 |
{ // monitor already running
|
|
540 |
ExitSemaphore->SetOwner(NULL);
|
|
541 |
ExitSemaphore->Wait();
|
|
542 |
}
|
|
543 |
|
|
544 |
NKern::Unlock();
|
|
545 |
|
|
546 |
// Call VEMs function
|
|
547 |
if (exitMonitor != NULL)
|
|
548 |
exitMonitor (&(aDThread->iNThread));
|
|
549 |
}
|
|
550 |
}
|
|
551 |
|
|
552 |
/**
|
|
553 |
Fills the specified task event record from the event log, before the event is
|
|
554 |
deleted.
|
|
555 |
|
|
556 |
Note that if the event log becomes full, the addition of a further event (before any
|
|
557 |
subsequent event get) results in the oldest event in the buffer being
|
|
558 |
deleted. In this case, the next oldest event record will have its
|
|
559 |
"Events lost before this event" flag set to true. If bit 1 of the flags
|
|
560 |
("Previous thread now waiting") is set, it means that the previous thread
|
|
561 |
is blocked and is no longer ready to run. It could be waiting on a timer,
|
|
562 |
mutex, semaphore or similar. If this flag is false it would mean that either
|
|
563 |
the thread's time slice expired, or it was pre-empted by a higher priority
|
|
564 |
thread.
|
|
565 |
|
|
566 |
@param aRecord The record to be filled.
|
|
567 |
|
|
568 |
@return EFalse, if the event queue is empty; ETrue, otherwise.
|
|
569 |
|
|
570 |
@see TTaskEventRecord
|
|
571 |
|
|
572 |
@pre Do not call from an ISR
|
|
573 |
*/
|
|
574 |
EXPORT_C TBool EMI::GetTaskEvent(TTaskEventRecord& aRecord)
|
|
575 |
{
|
|
576 |
CHECK_PRECONDITIONS(MASK_NOT_ISR,"EMI::GetTaskEvent");
|
|
577 |
NKern::Lock();
|
|
578 |
if (TheScheduler.iBufferStart)
|
|
579 |
{
|
|
580 |
if (TheScheduler.iBufferTail != TheScheduler.iBufferHead)
|
|
581 |
{ // if not empty
|
|
582 |
aRecord = *((TTaskEventRecord*) TheScheduler.iBufferTail);
|
|
583 |
TheScheduler.iBufferTail = ((TTaskEventRecord*) TheScheduler.iBufferTail) + 1;
|
|
584 |
if (TheScheduler.iBufferTail > TheScheduler.iBufferEnd)
|
|
585 |
TheScheduler.iBufferTail = TheScheduler.iBufferStart;
|
|
586 |
NKern::Unlock();
|
|
587 |
return ETrue;
|
|
588 |
}
|
|
589 |
}
|
|
590 |
NKern::Unlock();
|
|
591 |
return EFalse;
|
|
592 |
}
|
|
593 |
|
|
594 |
|
|
595 |
/**
|
|
596 |
Allows custom events to be added into the event log.
|
|
597 |
|
|
598 |
See TTaskEventRecord for the record structure.
|
|
599 |
|
|
600 |
-# TTaskEventRecord::iType should be set to a value greater then 127.
|
|
601 |
-# Bit 0 of TTaskEventRecord::iFlags should be set to zero.
|
|
602 |
-# TTaskEventRecord::iUserState and TTaskEventRecord::iTime will be overridden
|
|
603 |
by the actual UserState and Time when the event is added to the event log.
|
|
604 |
-# TTaskEventRecord::iPrevious and TTaskEventRecord::iNext are intended to point
|
|
605 |
to NThreads, so that the helper tools can provide additional information about
|
|
606 |
the threads involved, but the fields can also be used to hold pointers to non-NThread objects.
|
|
607 |
If used in this alternative way, care should be used to ensure that there is no address
|
|
608 |
collision with the NThread records in the system.
|
|
609 |
|
|
610 |
Note: Custom events will follow the same rules as scheduling events in that
|
|
611 |
the event could be deleted (due to buffer over run), or the previous event
|
|
612 |
may be deleted setting the flag in this one. As a result such records should
|
|
613 |
never be relied upon, and no memory leakage or similar side effect should
|
|
614 |
occur if the events are lost.
|
|
615 |
|
|
616 |
Also for events within the buffer, as with scheduling events, if the thread
|
|
617 |
indicated by iPrevious or iNext terminates, then this thread reference will be
|
|
618 |
removed, (to prevent dereferencing of an invalid pointer) and replaced with a
|
|
619 |
pointer to the Sigma thread.
|
|
620 |
|
|
621 |
@param aRecord The record to be copied into the event log.
|
|
622 |
|
|
623 |
@return EFalse on failure, indicating no event buffer is allocated; ETrue, otherwise.
|
|
624 |
|
|
625 |
@see TTaskEventRecord
|
|
626 |
|
|
627 |
@pre Do not call from an ISR
|
|
628 |
*/
|
|
629 |
EXPORT_C TBool EMI::AddTaskEvent(TTaskEventRecord& aRecord)
|
|
630 |
{
|
|
631 |
CHECK_PRECONDITIONS(MASK_NOT_ISR,"EMI::AddTaskEvent");
|
|
632 |
NKern::Lock();
|
|
633 |
if (TheScheduler.iBufferStart)
|
|
634 |
{
|
|
635 |
*((TTaskEventRecord*) TheScheduler.iBufferHead) = aRecord;
|
|
636 |
((TTaskEventRecord*) TheScheduler.iBufferHead)->iUserState = TheScheduler.iEmiState;
|
|
637 |
((TTaskEventRecord*) TheScheduler.iBufferHead)->iTime = NKern::FastCounter();
|
|
638 |
|
|
639 |
TheScheduler.iBufferHead = ((TTaskEventRecord*) TheScheduler.iBufferHead) + 1;
|
|
640 |
if (TheScheduler.iBufferHead > TheScheduler.iBufferEnd)
|
|
641 |
TheScheduler.iBufferHead = TheScheduler.iBufferStart;
|
|
642 |
|
|
643 |
if (TheScheduler.iBufferTail == TheScheduler.iBufferHead)
|
|
644 |
{ // overflow, move on read pointer - event lost!
|
|
645 |
TheScheduler.iBufferTail = ((TTaskEventRecord*) TheScheduler.iBufferTail) + 1;
|
|
646 |
if (TheScheduler.iBufferTail > TheScheduler.iBufferEnd)
|
|
647 |
TheScheduler.iBufferTail = TheScheduler.iBufferStart;
|
|
648 |
|
|
649 |
((TTaskEventRecord*) TheScheduler.iBufferTail)->iFlags |= KTskEvtFlag_EventLost;
|
|
650 |
}
|
|
651 |
NKern::Unlock();
|
|
652 |
return ETrue;
|
|
653 |
}
|
|
654 |
NKern::Unlock();
|
|
655 |
return EFalse;
|
|
656 |
}
|
|
657 |
|
|
658 |
#ifdef __WINS__
|
|
659 |
|
|
660 |
/**
|
|
661 |
@internalComponent
|
|
662 |
*/
|
|
663 |
void EMI_AddTaskSwitchEvent(TAny* aPrevious, TAny* aNext)
|
|
664 |
{
|
|
665 |
if (TheScheduler.iLogging)
|
|
666 |
{
|
|
667 |
TUint8 state = ((NThread*) aPrevious)->iSpare1; //iNState
|
|
668 |
|
|
669 |
if ((((NThread*) aPrevious)->Attributes() & KThreadAttLoggable) == 0)
|
|
670 |
aPrevious = TheScheduler.iSigma;
|
|
671 |
if ((((NThread*) aNext)->Attributes() & KThreadAttLoggable) == 0)
|
|
672 |
aNext = TheScheduler.iSigma;
|
|
673 |
|
|
674 |
if (aPrevious != aNext)
|
|
675 |
{
|
|
676 |
TTaskEventRecord& rec = *((TTaskEventRecord*) TheScheduler.iBufferHead);
|
|
677 |
|
|
678 |
rec.iType = 0;
|
|
679 |
rec.iFlags = (state == NThread::EReady) ? (TUint8) 0 :KTskEvtFlag_PrevWaiting;
|
|
680 |
rec.iUserState = TheScheduler.iEmiState;
|
|
681 |
rec.iTime = NKern::FastCounter();
|
|
682 |
rec.iPrevious = aPrevious;
|
|
683 |
rec.iNext = aNext;
|
|
684 |
|
|
685 |
TheScheduler.iBufferHead = ((TTaskEventRecord *) TheScheduler.iBufferHead) + 1;
|
|
686 |
if (TheScheduler.iBufferHead > TheScheduler.iBufferEnd)
|
|
687 |
TheScheduler.iBufferHead = TheScheduler.iBufferStart;
|
|
688 |
|
|
689 |
if (TheScheduler.iBufferTail == TheScheduler.iBufferHead)
|
|
690 |
{ // overflow, move on read pointer - event lost!
|
|
691 |
TheScheduler.iBufferTail = ((TTaskEventRecord*) TheScheduler.iBufferTail) + 1;
|
|
692 |
if (TheScheduler.iBufferTail > TheScheduler.iBufferEnd)
|
|
693 |
TheScheduler.iBufferTail = TheScheduler.iBufferStart;
|
|
694 |
|
|
695 |
((TTaskEventRecord*) TheScheduler.iBufferTail)->iFlags |= KTskEvtFlag_EventLost;
|
|
696 |
}
|
|
697 |
}
|
|
698 |
}
|
|
699 |
}
|
|
700 |
|
|
701 |
/**
|
|
702 |
@internalComponent
|
|
703 |
*/
|
|
704 |
void EMI_CheckDfcTag(TAny* aNext)
|
|
705 |
{
|
|
706 |
TUint result = ((NThread*) aNext)->iTag & TheScheduler.iEmiMask;
|
|
707 |
|
|
708 |
if (result!=0)
|
|
709 |
{
|
|
710 |
TheScheduler.iEmiDfcTrigger |= result;
|
|
711 |
TheScheduler.iEmiDfc->Add();
|
|
712 |
}
|
|
713 |
}
|
|
714 |
|
|
715 |
|
|
716 |
|
|
717 |
#endif
|
|
718 |
|
|
719 |
|
|
720 |
|
|
721 |
|
|
722 |
/**
|
|
723 |
Gets a pointer to the idle thread.
|
|
724 |
|
|
725 |
This can be compared with an NThread pointer returned from
|
|
726 |
EMI::GetTaskEvent() to determine whether the system was idling.
|
|
727 |
|
|
728 |
@return A pointer to the idle thread.
|
|
729 |
*/
|
|
730 |
EXPORT_C NThread* EMI::GetIdleThread()
|
|
731 |
{
|
|
732 |
return &(K::TheNullThread->iNThread);
|
|
733 |
}
|
|
734 |
|
|
735 |
|
|
736 |
|
|
737 |
|
|
738 |
/**
|
|
739 |
Gets a pointer to the Sigma thread.
|
|
740 |
|
|
741 |
This can be compared with an NThread pointer returned from
|
|
742 |
EMI::GetTaskEvent() to determine whether the system was busy
|
|
743 |
with activity associated with the Sigma thread.
|
|
744 |
|
|
745 |
@return A pointer to the Sigma thread.
|
|
746 |
*/
|
|
747 |
EXPORT_C NThread* EMI::GetSigmaThread()
|
|
748 |
{
|
|
749 |
return TheScheduler.iSigma;
|
|
750 |
}
|
|
751 |
|
|
752 |
|
|
753 |
|
|
754 |
|
|
755 |
/**
|
|
756 |
Sets energy management scheme (VEMS) data into the specified thread.
|
|
757 |
|
|
758 |
This function allows implementors of energy management schemes to store
|
|
759 |
any data associated with the thread. The field is used solely by scheme
|
|
760 |
implementors, and has no restriction in use.
|
|
761 |
|
|
762 |
Note that this data must be cleaned up before the 'exit monitor' completes.
|
|
763 |
|
|
764 |
The function sets the NThread::iVmsdata field to the value of the aVemsData pointer.
|
|
765 |
|
|
766 |
@param aThread The thread to be altered.
|
|
767 |
@param aVemsData The pointer to be set into the NThread::iVemsData field.
|
|
768 |
|
|
769 |
@see EMI::GetThreadVemsData()
|
|
770 |
*/
|
|
771 |
EXPORT_C void EMI::SetThreadVemsData(NThread* aThread, TAny* aVemsData)
|
|
772 |
{
|
|
773 |
aThread->iVemsData = aVemsData;
|
|
774 |
}
|
|
775 |
|
|
776 |
|
|
777 |
|
|
778 |
/**
|
|
779 |
Gets energy management scheme (VEMS) data from the specified thread.
|
|
780 |
|
|
781 |
The function returns the NThread::iVmsdata field.
|
|
782 |
|
|
783 |
@param aThread The thread whose data is to be queried.
|
|
784 |
|
|
785 |
@return The pointer returned from the NThread::iVemsData field.
|
|
786 |
|
|
787 |
@see EMI::SetThreadVemsData()
|
|
788 |
*/
|
|
789 |
EXPORT_C TAny* EMI::GetThreadVemsData(NThread* aThread)
|
|
790 |
{
|
|
791 |
return aThread->iVemsData;
|
|
792 |
}
|
|
793 |
|
|
794 |
|
|
795 |
|
|
796 |
|
|
797 |
/**
|
|
798 |
Sets the loggable attribute in the specified NThread.
|
|
799 |
|
|
800 |
If this field is true then
|
|
801 |
scheduling events involving this thread will be logged, and may be discovered
|
|
802 |
with GetTaskEvent(). This is used to protect the system from trying to
|
|
803 |
interact with a partially defined DThread. This will be initialised to
|
|
804 |
false, and set back to false during deletion of the thread. The energy management scheme (VEMS)
|
|
805 |
can set this to true from the 'start monitor', after any associated data has
|
|
806 |
been initialised. During any phase in which the loggable attribute is false,
|
|
807 |
the thread's activity will be considered to be Sigma thread activity. This
|
|
808 |
behaviour prevents partially defined DThreads' activity appearing in the
|
|
809 |
event log before the thread apparently started or after it terminated. The
|
|
810 |
Sigma thread's Loggable attribute is ignored, and is treated as if always
|
|
811 |
being true.
|
|
812 |
|
|
813 |
@param aThread The thread to be altered.
|
|
814 |
@param aLoggable Value for the loggable flag.
|
|
815 |
|
|
816 |
@see EMI::GetThreadLoggable()
|
|
817 |
*/
|
|
818 |
EXPORT_C void EMI::SetThreadLoggable(NThread* aThread, TBool aLoggable)
|
|
819 |
{
|
|
820 |
if (aLoggable)
|
|
821 |
aThread->ModifyAttributes(0, KThreadAttLoggable);
|
|
822 |
else
|
|
823 |
aThread->ModifyAttributes(KThreadAttLoggable, 0);
|
|
824 |
}
|
|
825 |
|
|
826 |
|
|
827 |
|
|
828 |
|
|
829 |
/**
|
|
830 |
Gets the state of the Loggable attribute in the NThread.
|
|
831 |
|
|
832 |
@param aThread The thread to be queried.
|
|
833 |
|
|
834 |
@return The state of the Loggable attribute in the NThread.
|
|
835 |
|
|
836 |
@see EMI::SetThreadLoggable()
|
|
837 |
*/
|
|
838 |
EXPORT_C TBool EMI::GetThreadLoggable(NThread* aThread)
|
|
839 |
{
|
|
840 |
return (aThread->Attributes() & KThreadAttLoggable) != 0;
|
|
841 |
}
|
|
842 |
|
|
843 |
|
|
844 |
|
|
845 |
|
|
846 |
/**
|
|
847 |
Sets the NThread::iTag field in the specified NThread.
|
|
848 |
|
|
849 |
This field is ANDed with the scheduler's EMI mask when the thread is scheduled.
|
|
850 |
A nonzero result will trigger a DFC to be scheduled.
|
|
851 |
|
|
852 |
@param aThread The thread to be altered.
|
|
853 |
@param aTag The tag to be set.
|
|
854 |
|
|
855 |
@see EMI::SetDfc()
|
|
856 |
@see EMI::SetMask()
|
|
857 |
@see EMI::GetMask()
|
|
858 |
@see EMI::GetThreadTag()
|
|
859 |
*/
|
|
860 |
EXPORT_C void EMI::SetThreadTag(NThread* aThread, TUint32 aTag)
|
|
861 |
{
|
|
862 |
aThread->iTag = aTag;
|
|
863 |
}
|
|
864 |
|
|
865 |
|
|
866 |
|
|
867 |
|
|
868 |
/**
|
|
869 |
Gets the NThread::iTag field from the specified NThread.
|
|
870 |
|
|
871 |
This tag is ANDed with the scheduler's EMI mask when the thread is scheduled.
|
|
872 |
A nonzero result will trigger a DFC to be scheduled.
|
|
873 |
|
|
874 |
@param aThread The thread to be queried.
|
|
875 |
|
|
876 |
@return The value of the NThread::iTag field.
|
|
877 |
|
|
878 |
@see EMI::SetDfc()
|
|
879 |
@see EMI::SetMask()
|
|
880 |
@see EMI::GetMask()
|
|
881 |
@see EMI::SetThreadTag()
|
|
882 |
*/
|
|
883 |
EXPORT_C TUint32 EMI::GetThreadTag(NThread* aThread)
|
|
884 |
{
|
|
885 |
return aThread->iTag;
|
|
886 |
}
|
|
887 |
|
|
888 |
|
|
889 |
|
|
890 |
/**
|
|
891 |
Sets the scheduler's EMI mask.
|
|
892 |
|
|
893 |
During scheduling, this mask is ANDed with the NThread::iTag field in the NThread
|
|
894 |
about to be scheduled. If the result is nonzero, a DFC is scheduled. This
|
|
895 |
function can be called anywhere and does not need to be called from a
|
|
896 |
critical section.
|
|
897 |
|
|
898 |
@param aMask The mask to be used.
|
|
899 |
|
|
900 |
@see EMI::SetDfc()
|
|
901 |
@see EMI::SetThreadTag()
|
|
902 |
@see EMI::GetThreadTag()
|
|
903 |
@see EMI::GetMask()
|
|
904 |
*/
|
|
905 |
EXPORT_C void EMI::SetMask(TUint32 aMask)
|
|
906 |
{
|
|
907 |
TheScheduler.iEmiMask = aMask;
|
|
908 |
|
|
909 |
}
|
|
910 |
|
|
911 |
|
|
912 |
|
|
913 |
|
|
914 |
/**
|
|
915 |
Gets the scheduler's EMI mask.
|
|
916 |
|
|
917 |
During scheduling, this mask is ANDed with the NThread::iTag field in the NThread
|
|
918 |
about to be scheduled. If the result is nonzero, a DFC is scheduled. This
|
|
919 |
function can be called anywhere and does not need to be called from a
|
|
920 |
critical section.
|
|
921 |
|
|
922 |
@return The mask value.
|
|
923 |
|
|
924 |
@see EMI::SetDfc()
|
|
925 |
@see EMI::SetThreadTag()
|
|
926 |
@see EMI::GetThreadTag()
|
|
927 |
@see EMI::SetMask()
|
|
928 |
*/
|
|
929 |
EXPORT_C TUint32 EMI::GetMask()
|
|
930 |
{
|
|
931 |
return TheScheduler.iEmiMask;
|
|
932 |
}
|
|
933 |
|
|
934 |
|
|
935 |
|
|
936 |
|
|
937 |
/**
|
|
938 |
Sets the the scheduler's EMI mask, and the DFC to be scheduled.
|
|
939 |
|
|
940 |
During scheduling, this mask is ANDed with the NThread::iTag field in the NThread
|
|
941 |
about to be scheduled. If the result is nonzero, then the specified DFC
|
|
942 |
is scheduled. This method may be called anywhere and does not need to be
|
|
943 |
called from a critical section.
|
|
944 |
|
|
945 |
@param aDfc The DFC to be used.
|
|
946 |
@param aMask The mask to be used.
|
|
947 |
|
|
948 |
@see EMI::SetThreadTag()
|
|
949 |
@see EMI::SetMask()
|
|
950 |
*/
|
|
951 |
EXPORT_C void EMI::SetDfc(TDfc* aDfc, TUint32 aMask)
|
|
952 |
{
|
|
953 |
TheScheduler.iEmiDfc = aDfc;
|
|
954 |
TheScheduler.iEmiMask = aMask;
|
|
955 |
}
|
|
956 |
|
|
957 |
|
|
958 |
|
|
959 |
|
|
960 |
/**
|
|
961 |
Returns the logical AND of the NThread::iTag field of the thread which triggered the
|
|
962 |
DFC with the mask prevailing at the time.
|
|
963 |
|
|
964 |
As is normal with DFCs, if more than one DFC triggering event occurs before the DFC gets
|
|
965 |
to run, then only one DFC will actually run. In this case, this function returns the logical OR of
|
|
966 |
the corresponding values for all the events. The accumulated trigger tag is
|
|
967 |
cleared by this function, so calling it twice in succession will return zero
|
|
968 |
the second time.
|
|
969 |
|
|
970 |
@return The tag bits that caused the last DFC.
|
|
971 |
*/
|
|
972 |
EXPORT_C TUint32 EMI::GetDfcTriggerTag()
|
|
973 |
{
|
|
974 |
return __e32_atomic_swp_ord32(&TheScheduler.iEmiDfcTrigger, 0);
|
|
975 |
}
|
|
976 |
|
|
977 |
|
|
978 |
|
|
979 |
|
|
980 |
/**
|
|
981 |
Sets the user state variable, as reported in the event log.
|
|
982 |
|
|
983 |
It is expected that this variable will be used to remember the clock
|
|
984 |
frequency at the time when the event was logged. This method may be called
|
|
985 |
anywhere and does not need to be called from a critical section.
|
|
986 |
|
|
987 |
@param aState The new user state value.
|
|
988 |
|
|
989 |
@see EMI::GetState()
|
|
990 |
*/
|
|
991 |
EXPORT_C void EMI::SetState(TUint32 aState)
|
|
992 |
{
|
|
993 |
TheScheduler.iEmiState = aState;
|
|
994 |
}
|
|
995 |
|
|
996 |
|
|
997 |
|
|
998 |
|
|
999 |
/**
|
|
1000 |
Gets the user state variable, as reported in the event log.
|
|
1001 |
|
|
1002 |
This method may be called anywhere and does not need to be called from a critical section.
|
|
1003 |
|
|
1004 |
@return The user state variable.
|
|
1005 |
|
|
1006 |
@see EMI::SetState()
|
|
1007 |
*/
|
|
1008 |
EXPORT_C TUint32 EMI::GetState()
|
|
1009 |
{
|
|
1010 |
return TheScheduler.iEmiState;
|
|
1011 |
}
|
|
1012 |
|
|
1013 |
|
|
1014 |
|
|
1015 |
|
|
1016 |
/**
|
|
1017 |
@internalComponent
|
|
1018 |
*/
|
|
1019 |
void AfterIdleCallback(TAny* aSem)
|
|
1020 |
{
|
|
1021 |
NKern::Lock();
|
|
1022 |
EMI::AfterIdleState = EMI::ENone;
|
|
1023 |
((NFastSemaphore*) aSem)->Signal();
|
|
1024 |
//release semaphore
|
|
1025 |
NKern::Unlock();
|
|
1026 |
}
|
|
1027 |
|
|
1028 |
|
|
1029 |
|
|
1030 |
|
|
1031 |
/**
|
|
1032 |
This call waits for the time interval aDelay (specified in milliseconds)
|
|
1033 |
to pass, unless the CPU goes idle following the call with only one event in
|
|
1034 |
the queue, representing the context switch from the current thread to the idle
|
|
1035 |
thread. In this case the timer is paused before the CPU goes idle and resumed
|
|
1036 |
when it wakes up.
|
|
1037 |
|
|
1038 |
For this to function correctly, EMI::EnterIdle() and EMI::LeaveIdle() must be called by
|
|
1039 |
the base port from the idle function.
|
|
1040 |
|
|
1041 |
@param aDelay The time interval to be delayed.
|
|
1042 |
|
|
1043 |
@pre No fast mutex can be held.
|
|
1044 |
@pre Call in a thread context.
|
|
1045 |
@pre Kernel must be unlocked
|
|
1046 |
@pre interrupts enabled
|
|
1047 |
*/
|
|
1048 |
EXPORT_C void EMI::AfterIdle(TInt aDelay)
|
|
1049 |
{
|
|
1050 |
CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"EMI::AfterIdle");
|
|
1051 |
NFastSemaphore mySem;
|
|
1052 |
// Set new timer, callback to release semephore
|
|
1053 |
NTimer myTimer(&AfterIdleCallback, &mySem);
|
|
1054 |
NKern::Lock();
|
|
1055 |
myTimer.OneShot(aDelay, ETrue);
|
|
1056 |
// Set AfterIdle Timer pointer to point at new timer. Mark as runnung.
|
|
1057 |
// (If this is running, any existing AfterIdele call should act as normall timer)
|
|
1058 |
AfterIdleState = EWaiting;
|
|
1059 |
AfterIdleTimer = &myTimer;
|
|
1060 |
AfterIdleDelay = aDelay;
|
|
1061 |
|
|
1062 |
// Wait for Timer to release semaphore.
|
|
1063 |
mySem.SetOwner(NULL);
|
|
1064 |
mySem.Wait();
|
|
1065 |
NKern::Unlock();
|
|
1066 |
}
|
|
1067 |
|
|
1068 |
|
|
1069 |
/**
|
|
1070 |
This function must be called by the base port at the start of the idle
|
|
1071 |
function, just after interrupts have been disabled.
|
|
1072 |
*/
|
|
1073 |
EXPORT_C void EMI::EnterIdle()
|
|
1074 |
{
|
|
1075 |
if (AfterIdleState == EWaiting)
|
|
1076 |
{
|
|
1077 |
if (AfterIdleTimer->iState > NTimer::EIdle)
|
|
1078 |
{
|
|
1079 |
TInt sizeDelay = (TInt) TheScheduler.iBufferHead - (TInt) TheScheduler.iBufferTail;
|
|
1080 |
|
|
1081 |
if (sizeDelay && TheScheduler.iLogging)
|
|
1082 |
sizeDelay = ((sizeDelay != sizeof(TTaskEventRecord)) &&
|
|
1083 |
((TheScheduler.iBufferHead != TheScheduler.iBufferStart)
|
|
1084 |
|| (TheScheduler.iBufferTail != TheScheduler.iBufferEnd)));
|
|
1085 |
|
|
1086 |
if (!sizeDelay)
|
|
1087 |
{
|
|
1088 |
AfterIdleTimer->Cancel();
|
|
1089 |
AfterIdleState = EHeld;
|
|
1090 |
}
|
|
1091 |
else
|
|
1092 |
AfterIdleState = ENone;
|
|
1093 |
}
|
|
1094 |
else
|
|
1095 |
AfterIdleState = ENone;
|
|
1096 |
}
|
|
1097 |
}
|
|
1098 |
|
|
1099 |
/**
|
|
1100 |
This function must be called by the base port at the end of the idle function,
|
|
1101 |
just before interrupts are re-enabled.
|
|
1102 |
*/
|
|
1103 |
EXPORT_C void EMI::LeaveIdle()
|
|
1104 |
{
|
|
1105 |
if (AfterIdleState == EHeld)
|
|
1106 |
{
|
|
1107 |
AfterIdleTimer->OneShot(AfterIdleDelay, ETrue);
|
|
1108 |
AfterIdleState = ENone;
|
|
1109 |
}
|
|
1110 |
}
|
|
1111 |
|
|
1112 |
#else // EMI not supported
|
|
1113 |
// EMI Stubs - for compatability
|
|
1114 |
|
|
1115 |
EXPORT_C TInt EMI::TaskEventLogging(TBool, TInt, TThreadStartMonitor, TThreadExitMonitor)
|
|
1116 |
{ return KErrNotSupported; }
|
|
1117 |
EXPORT_C TInt EMI::TaskEventLogging (TBool, TInt) { return KErrNotSupported; }
|
|
1118 |
EXPORT_C void EMI::ThreadMonitors (TThreadStartMonitor, TThreadExitMonitor) {}
|
|
1119 |
EXPORT_C TBool EMI::GetTaskEvent (TTaskEventRecord&) { return EFalse; }
|
|
1120 |
EXPORT_C TBool EMI::AddTaskEvent (TTaskEventRecord&) { return EFalse; }
|
|
1121 |
EXPORT_C NThread* EMI::GetIdleThread () { return NULL; }
|
|
1122 |
EXPORT_C NThread* EMI::GetSigmaThread () { return NULL; }
|
|
1123 |
EXPORT_C void EMI::SetThreadVemsData (NThread*, TAny*) {}
|
|
1124 |
EXPORT_C TAny* EMI::GetThreadVemsData (NThread*) { return NULL; }
|
|
1125 |
EXPORT_C void EMI::SetThreadLoggable (NThread*, TBool) {}
|
|
1126 |
EXPORT_C TBool EMI::GetThreadLoggable (NThread*) { return EFalse; }
|
|
1127 |
EXPORT_C void EMI::SetThreadTag (NThread*, TUint32) {}
|
|
1128 |
EXPORT_C TUint32 EMI::GetThreadTag (NThread*) { return 0; }
|
|
1129 |
EXPORT_C void EMI::SetMask (TUint32) {}
|
|
1130 |
EXPORT_C TUint32 EMI::GetMask () { return 0; }
|
|
1131 |
EXPORT_C void EMI::SetDfc (TDfc *, TUint32) {}
|
|
1132 |
EXPORT_C TUint32 EMI::GetDfcTriggerTag () { return 0; }
|
|
1133 |
EXPORT_C void EMI::SetState (TUint32) {}
|
|
1134 |
EXPORT_C TUint32 EMI::GetState () { return 0; }
|
|
1135 |
EXPORT_C void EMI::AfterIdle (TInt) {}
|
|
1136 |
EXPORT_C void EMI::EnterIdle () {}
|
|
1137 |
EXPORT_C void EMI::LeaveIdle () {}
|
|
1138 |
|
|
1139 |
#endif
|