0
|
1 |
// Copyright (c) 2005-2009 Nokia Corporation and/or its subsidiary(-ies).
|
|
2 |
// All rights reserved.
|
|
3 |
// This component and the accompanying materials are made available
|
|
4 |
// under the terms of the License "Eclipse Public License v1.0"
|
|
5 |
// which accompanies this distribution, and is available
|
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
|
|
7 |
//
|
|
8 |
// Initial Contributors:
|
|
9 |
// Nokia Corporation - initial contribution.
|
|
10 |
//
|
|
11 |
// Contributors:
|
|
12 |
//
|
|
13 |
// Description:
|
|
14 |
// e32\nkernsmp\nkerns.cpp
|
|
15 |
//
|
|
16 |
//
|
|
17 |
|
|
18 |
// NThreadBase member data
|
|
19 |
#define __INCLUDE_NTHREADBASE_DEFINES__
|
|
20 |
|
|
21 |
#include <e32cmn.h>
|
|
22 |
#include <e32cmn_private.h>
|
|
23 |
#include "nk_priv.h"
|
|
24 |
|
|
25 |
extern "C" void ExcFault(TAny*);
|
|
26 |
|
|
27 |
/******************************************************************************
|
|
28 |
* Fast mutex
|
|
29 |
******************************************************************************/
|
|
30 |
/** Create a fast mutex
|
|
31 |
|
|
32 |
@publishedPartner
|
|
33 |
@released
|
|
34 |
*/
|
|
35 |
EXPORT_C NFastMutex::NFastMutex()
|
|
36 |
: iHoldingThread(0), iMutexLock(TSpinLock::EOrderFastMutex)
|
|
37 |
{
|
|
38 |
}
|
|
39 |
|
|
40 |
/******************************************************************************
|
|
41 |
* NSchedulable
|
|
42 |
******************************************************************************/
|
|
43 |
NSchedulable::NSchedulable()
|
|
44 |
: iSSpinLock(TSpinLock::EOrderThread)
|
|
45 |
{
|
|
46 |
iPriority = 0;
|
|
47 |
iReady = 0;
|
|
48 |
iCurrent = 0;
|
|
49 |
iLastCpu = 0;
|
|
50 |
iNSchedulableSpare1 = 0;
|
|
51 |
iPauseCount = 0;
|
|
52 |
iSuspended = 0;
|
|
53 |
iNSchedulableSpare2 = 0;
|
|
54 |
iCpuChange = 0;
|
|
55 |
iStopping = 0;
|
|
56 |
iFreezeCpu = 0;
|
|
57 |
iParent = (NSchedulable*)0xdeadbeef;
|
|
58 |
iCpuAffinity = 0;
|
|
59 |
new (i_IDfcMem) TDfc(&DeferredReadyIDfcFn, this);
|
|
60 |
iEventState = 0;
|
|
61 |
iTotalCpuTime64 = 0;
|
|
62 |
}
|
|
63 |
|
|
64 |
/******************************************************************************
|
|
65 |
* NThreadGroup
|
|
66 |
******************************************************************************/
|
|
67 |
NThreadGroup::NThreadGroup()
|
|
68 |
{
|
|
69 |
iParent = 0;
|
|
70 |
iThreadCount = 0;
|
|
71 |
new (&iSSpinLock) TSpinLock(TSpinLock::EOrderThreadGroup);
|
|
72 |
}
|
|
73 |
|
|
74 |
/** Create a thread group
|
|
75 |
|
|
76 |
@publishedPartner
|
|
77 |
@prototype
|
|
78 |
*/
|
|
79 |
EXPORT_C TInt NKern::GroupCreate(NThreadGroup* aGroup, SNThreadGroupCreateInfo& aInfo)
|
|
80 |
{
|
|
81 |
new (aGroup) NThreadGroup();
|
|
82 |
aGroup->iCpuAffinity = aInfo.iCpuAffinity;
|
|
83 |
return KErrNone;
|
|
84 |
}
|
|
85 |
|
|
86 |
|
|
87 |
/** Destroy a thread group
|
|
88 |
|
|
89 |
@pre Call in thread context, interrupts enabled, preemption enabled
|
|
90 |
@pre No fast mutex held
|
|
91 |
@pre Calling thread in critical section
|
|
92 |
@pre All threads have left the group
|
|
93 |
|
|
94 |
@publishedPartner
|
|
95 |
@prototype
|
|
96 |
*/
|
|
97 |
EXPORT_C void NKern::GroupDestroy(NThreadGroup* aGroup)
|
|
98 |
{
|
|
99 |
NKern::ThreadEnterCS();
|
|
100 |
aGroup->DetachTiedEvents();
|
|
101 |
NKern::ThreadLeaveCS();
|
|
102 |
}
|
|
103 |
|
|
104 |
|
|
105 |
/******************************************************************************
|
|
106 |
* Thread
|
|
107 |
******************************************************************************/
|
|
108 |
void InvalidExec()
|
|
109 |
{
|
|
110 |
FAULT();
|
|
111 |
}
|
|
112 |
|
|
113 |
static const SFastExecTable DefaultFastExecTable={0,{0}};
|
|
114 |
static const SSlowExecTable DefaultSlowExecTable={0,(TLinAddr)InvalidExec,0,{{0,0}}};
|
|
115 |
|
|
116 |
const SNThreadHandlers NThread_Default_Handlers =
|
|
117 |
{
|
|
118 |
NTHREAD_DEFAULT_EXIT_HANDLER,
|
|
119 |
NTHREAD_DEFAULT_STATE_HANDLER,
|
|
120 |
NTHREAD_DEFAULT_EXCEPTION_HANDLER,
|
|
121 |
NTHREAD_DEFAULT_TIMEOUT_HANDLER
|
|
122 |
};
|
|
123 |
|
|
124 |
NThreadWaitState::NThreadWaitState()
|
|
125 |
: iTimer(&TimerExpired, this)
|
|
126 |
{
|
|
127 |
iWtSt64 = 0;
|
|
128 |
iTimer.iTriggerTime = 0;
|
|
129 |
iTimer.iNTimerSpare1 = 0;
|
|
130 |
}
|
|
131 |
|
|
132 |
NThreadBase::NThreadBase()
|
|
133 |
: iRequestSemaphore(), iWaitState()
|
|
134 |
{
|
|
135 |
iParent = this;
|
|
136 |
iWaitLink.iPriority = 0;
|
|
137 |
iBasePri = 0;
|
|
138 |
iMutexPri = 0;
|
|
139 |
i_NThread_Initial = 0;
|
|
140 |
iLinkedObjType = EWaitNone;
|
|
141 |
i_ThrdAttr = 0;
|
|
142 |
iNThreadBaseSpare10 = 0;
|
|
143 |
iFastMutexDefer = 0;
|
|
144 |
iRequestSemaphore.iOwningThread = (NThreadBase*)this;
|
|
145 |
iTime = 0;
|
|
146 |
iTimeslice = 0;
|
|
147 |
iSavedSP = 0;
|
|
148 |
iAddressSpace = 0;
|
|
149 |
iHeldFastMutex = 0;
|
|
150 |
iUserModeCallbacks = 0;
|
|
151 |
iLinkedObj = 0;
|
|
152 |
iNewParent = 0;
|
|
153 |
iFastExecTable = 0;
|
|
154 |
iSlowExecTable = 0;
|
|
155 |
iCsCount = 0;
|
|
156 |
iCsFunction = 0;
|
|
157 |
iHandlers = 0;
|
|
158 |
iSuspendCount = 0;
|
|
159 |
iStackBase = 0;
|
|
160 |
iStackSize = 0;
|
|
161 |
iExtraContext = 0;
|
|
162 |
iExtraContextSize = 0;
|
|
163 |
iNThreadBaseSpare6 = 0;
|
|
164 |
iNThreadBaseSpare7 = 0;
|
|
165 |
iNThreadBaseSpare8 = 0;
|
|
166 |
iNThreadBaseSpare9 = 0;
|
|
167 |
|
|
168 |
// KILL
|
|
169 |
iTag = 0;
|
|
170 |
iVemsData = 0;
|
|
171 |
}
|
|
172 |
|
|
173 |
TInt NThreadBase::Create(SNThreadCreateInfo& aInfo, TBool aInitial)
|
|
174 |
{
|
|
175 |
__KTRACE_OPT(KNKERN,DEBUGPRINT(">NThreadBase::Create %08x(%08x,%d)", this, &aInfo, aInitial));
|
|
176 |
if (aInfo.iPriority<0 || aInfo.iPriority>63)
|
|
177 |
return KErrArgument;
|
|
178 |
if (aInfo.iPriority==0 && !aInitial)
|
|
179 |
return KErrArgument;
|
|
180 |
// if (aInfo.iCpu!=KCpuAny && aInfo.iCpu>=TheScheduler.iNumCpus)
|
|
181 |
// return KErrArgument;
|
|
182 |
iStackBase=(TLinAddr)aInfo.iStackBase;
|
|
183 |
iStackSize=aInfo.iStackSize;
|
|
184 |
iTimeslice=(aInfo.iTimeslice>0)?aInfo.iTimeslice:-1;
|
|
185 |
iTime=iTimeslice;
|
|
186 |
iPriority=TUint8(aInfo.iPriority);
|
|
187 |
iBasePri=TUint8(aInfo.iPriority);
|
|
188 |
iCpuAffinity = aInfo.iCpuAffinity;
|
|
189 |
iHandlers = aInfo.iHandlers ? aInfo.iHandlers : &NThread_Default_Handlers;
|
|
190 |
iFastExecTable=aInfo.iFastExecTable?aInfo.iFastExecTable:&DefaultFastExecTable;
|
|
191 |
iSlowExecTable=(aInfo.iSlowExecTable?aInfo.iSlowExecTable:&DefaultSlowExecTable)->iEntries;
|
|
192 |
i_ThrdAttr=(TUint8)aInfo.iAttributes;
|
|
193 |
if (aInitial)
|
|
194 |
{
|
|
195 |
TSubScheduler& ss = SubScheduler();
|
|
196 |
iLastCpu = (TUint8)ss.iCpuNum;
|
|
197 |
iReady = (TUint8)(iLastCpu | EReadyOffset);
|
|
198 |
iCurrent = iReady;
|
|
199 |
iCpuAffinity = iLastCpu;
|
|
200 |
iEventState = (iLastCpu<<EEventCpuShift) | (iLastCpu<<EThreadCpuShift);
|
|
201 |
ss.Add(this);
|
|
202 |
i_NThread_Initial = TRUE;
|
|
203 |
ss.iInitialThread = (NThread*)this;
|
|
204 |
NKern::Unlock(); // now that current thread is defined
|
|
205 |
}
|
|
206 |
else
|
|
207 |
{
|
|
208 |
iSuspendCount = 1;
|
|
209 |
iSuspended = 1;
|
|
210 |
TInt ecpu;
|
|
211 |
if (iCpuAffinity & NTHREADBASE_CPU_AFFINITY_MASK)
|
|
212 |
{
|
|
213 |
ecpu = __e32_find_ls1_32(iCpuAffinity);
|
|
214 |
if (ecpu >= TheScheduler.iNumCpus)
|
|
215 |
ecpu = 0; // FIXME: Inactive CPU?
|
|
216 |
}
|
|
217 |
else
|
|
218 |
ecpu = iCpuAffinity;
|
|
219 |
iEventState = (ecpu<<EEventCpuShift) | (ecpu<<EThreadCpuShift);
|
|
220 |
if (aInfo.iGroup)
|
|
221 |
{
|
|
222 |
NKern::Lock();
|
|
223 |
AcqSLock();
|
|
224 |
aInfo.iGroup->AcqSLock();
|
|
225 |
iParent = (NSchedulable*)aInfo.iGroup;
|
|
226 |
++aInfo.iGroup->iThreadCount;
|
|
227 |
iEventState |= EEventParent;
|
|
228 |
RelSLock();
|
|
229 |
NKern::Unlock();
|
|
230 |
}
|
|
231 |
}
|
|
232 |
__KTRACE_OPT(KNKERN,DEBUGPRINT("<NThreadBase::Create OK"));
|
|
233 |
return KErrNone;
|
|
234 |
}
|
|
235 |
|
|
236 |
void NThread_Default_State_Handler(NThread* __DEBUG_ONLY(aThread), TInt __DEBUG_ONLY(aOperation), TInt __DEBUG_ONLY(aParameter))
|
|
237 |
{
|
|
238 |
// __KTRACE_OPT(KPANIC,DEBUGPRINT("Unknown NState %d: thread %T op %08x par %08x",aThread,aThread->iNState,aOperation,aParameter));
|
|
239 |
#ifdef _DEBUG
|
|
240 |
DEBUGPRINT("UnknownState: thread %T op %08x par %08x",aThread,aOperation,aParameter);
|
|
241 |
#endif
|
|
242 |
FAULT();
|
|
243 |
}
|
|
244 |
|
|
245 |
void NThread_Default_Exception_Handler(TAny* aContext, NThread*)
|
|
246 |
{
|
|
247 |
ExcFault(aContext);
|
|
248 |
}
|
|
249 |
|
|
250 |
|
|
251 |
/** Create a nanothread.
|
|
252 |
|
|
253 |
This function is intended to be used by the EPOC kernel and by personality
|
|
254 |
layers. A nanothread may not use most of the functions available to normal
|
|
255 |
Symbian OS threads. Use Kern::ThreadCreate() to create a Symbian OS thread.
|
|
256 |
|
|
257 |
@param aThread Pointer to control block for thread to create.
|
|
258 |
@param aInfo Information needed for creating the thread.
|
|
259 |
|
|
260 |
@see SNThreadCreateInfo
|
|
261 |
@see Kern::ThreadCreate
|
|
262 |
|
|
263 |
@pre Call in a thread context.
|
|
264 |
@pre Interrupts must be enabled.
|
|
265 |
@pre Kernel must be unlocked.
|
|
266 |
*/
|
|
267 |
EXPORT_C TInt NKern::ThreadCreate(NThread* aThread, SNThreadCreateInfo& aInfo)
|
|
268 |
{
|
|
269 |
CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadCreate");
|
|
270 |
return aThread->Create(aInfo,FALSE);
|
|
271 |
}
|
|
272 |
|
|
273 |
// User-mode callbacks
|
|
274 |
|
|
275 |
TUserModeCallback::TUserModeCallback(TUserModeCallbackFunc aFunc)
|
|
276 |
: iNext(KUserModeCallbackUnqueued),
|
|
277 |
iFunc(aFunc)
|
|
278 |
{
|
|
279 |
}
|
|
280 |
|
|
281 |
TUserModeCallback::~TUserModeCallback()
|
|
282 |
{
|
|
283 |
__NK_ASSERT_DEBUG(iNext == KUserModeCallbackUnqueued);
|
|
284 |
}
|
|
285 |
|
|
286 |
void NKern::CancelUserModeCallbacks()
|
|
287 |
{
|
|
288 |
// Call any queued callbacks with the EUserModeCallbackCancel reason code, in the current
|
|
289 |
// thread.
|
|
290 |
|
|
291 |
TUserModeCallback* listHead =
|
|
292 |
(TUserModeCallback*)__e32_atomic_swp_ord_ptr(&NCurrentThread()->iUserModeCallbacks, NULL);
|
|
293 |
while (listHead)
|
|
294 |
{
|
|
295 |
TUserModeCallback* callback = listHead;
|
|
296 |
listHead = listHead->iNext;
|
|
297 |
callback->iNext = KUserModeCallbackUnqueued;
|
|
298 |
__e32_memory_barrier();
|
|
299 |
callback->iFunc(callback, EUserModeCallbackCancel);
|
|
300 |
}
|
|
301 |
}
|
|
302 |
|
|
303 |
void NKern::MoveUserModeCallbacks(NThreadBase* aDestThread, NThreadBase* aSrcThread)
|
|
304 |
{
|
|
305 |
// Move all queued user-mode callbacks from the source thread to the destination thread, and
|
|
306 |
// prevent any more from being queued. Used by the kernel thread code so that callbacks get
|
|
307 |
// cancelled in another thread if the thread they were originally queued on dies.
|
|
308 |
|
|
309 |
// Atomically remove list of callbacks and set pointer to 1
|
|
310 |
// The latter ensures any subsequent attempts to add callbacks fail
|
|
311 |
TUserModeCallback* sourceListStart =
|
|
312 |
(TUserModeCallback*)__e32_atomic_swp_ord_ptr(&aSrcThread->iUserModeCallbacks, (TAny*)1);
|
|
313 |
__NK_ASSERT_DEBUG(((TUint)sourceListStart & 3) == 0); // check this only gets called once per thread
|
|
314 |
|
|
315 |
if (sourceListStart == NULL)
|
|
316 |
return;
|
|
317 |
|
|
318 |
TUserModeCallback* sourceListEnd = sourceListStart;
|
|
319 |
while (sourceListEnd->iNext != NULL)
|
|
320 |
sourceListEnd = sourceListEnd->iNext;
|
|
321 |
|
|
322 |
NKern::Lock();
|
|
323 |
TUserModeCallback* destListStart = aDestThread->iUserModeCallbacks;
|
|
324 |
do
|
|
325 |
{
|
|
326 |
__NK_ASSERT_DEBUG(((TUint)destListStart & 3) == 0); // dest thread must not die
|
|
327 |
sourceListEnd->iNext = destListStart;
|
|
328 |
} while (!__e32_atomic_cas_ord_ptr(&aDestThread->iUserModeCallbacks, &destListStart, sourceListStart));
|
|
329 |
NKern::Unlock();
|
|
330 |
}
|
|
331 |
|
|
332 |
/** Initialise the null thread
|
|
333 |
@internalComponent
|
|
334 |
*/
|
|
335 |
void NKern::Init(NThread* aThread, SNThreadCreateInfo& aInfo)
|
|
336 |
{
|
|
337 |
aInfo.iFunction=NULL; // irrelevant
|
|
338 |
aInfo.iPriority=0; // null thread has lowest priority
|
|
339 |
aInfo.iTimeslice=0; // null thread not timesliced
|
|
340 |
aInfo.iAttributes=0; // null thread does not require implicit locks
|
|
341 |
aThread->Create(aInfo,TRUE); // create the null thread
|
|
342 |
}
|
|
343 |
|
|
344 |
/** @internalTechnology */
|
|
345 |
EXPORT_C void NKern::RecordIntLatency(TInt /*aLatency*/, TInt /*aIntMask*/)
|
|
346 |
{
|
|
347 |
}
|
|
348 |
|
|
349 |
|
|
350 |
/** @internalTechnology */
|
|
351 |
EXPORT_C void NKern::RecordThreadLatency(TInt /*aLatency*/)
|
|
352 |
{
|
|
353 |
}
|
|
354 |
|
|
355 |
/********************************************
|
|
356 |
* Deterministic Priority List Implementation
|
|
357 |
********************************************/
|
|
358 |
|
|
359 |
|
|
360 |
/** Construct a priority list with the specified number of priorities
|
|
361 |
|
|
362 |
@param aNumPriorities The number of priorities (must be 1-64).
|
|
363 |
*/
|
|
364 |
EXPORT_C TPriListBase::TPriListBase(TInt aNumPriorities)
|
|
365 |
{
|
|
366 |
memclr(this, sizeof(TPriListBase)+(aNumPriorities-1)*sizeof(SDblQueLink*) );
|
|
367 |
}
|
|
368 |
|
|
369 |
|
|
370 |
/********************************************
|
|
371 |
* Miscellaneous
|
|
372 |
********************************************/
|
|
373 |
|
|
374 |
/** Get the current value of the high performance counter.
|
|
375 |
|
|
376 |
If a high performance counter is not available, this uses the millisecond
|
|
377 |
tick count instead.
|
|
378 |
*/
|
|
379 |
EXPORT_C TUint32 NKern::FastCounter()
|
|
380 |
{
|
|
381 |
return (TUint32)Timestamp();
|
|
382 |
}
|
|
383 |
|
|
384 |
|
|
385 |
/** Get the frequency of counter queried by NKern::FastCounter().
|
|
386 |
*/
|
|
387 |
EXPORT_C TInt NKern::FastCounterFrequency()
|
|
388 |
{
|
|
389 |
return (TInt)TimestampFrequency();
|
|
390 |
}
|
|
391 |
|
|
392 |
|
|
393 |
extern "C" {
|
|
394 |
TUint32 CrashState;
|
|
395 |
}
|
|
396 |
|
|
397 |
EXPORT_C TBool NKern::Crashed()
|
|
398 |
{
|
|
399 |
return CrashState!=0;
|
|
400 |
}
|
|
401 |
|
|
402 |
|
|
403 |
/** Returns number of nanokernel timer ticks since system started.
|
|
404 |
@return tick count
|
|
405 |
@pre any context
|
|
406 |
*/
|
|
407 |
EXPORT_C TUint32 NKern::TickCount()
|
|
408 |
{
|
|
409 |
return NTickCount();
|
|
410 |
}
|
|
411 |
|
|
412 |
|
|
413 |
TUint32 BTrace::BigTraceId = 0;
|
|
414 |
|
|
415 |
TBool BTrace::DoOutBig(TUint32 a0, TUint32 a1, const TAny* aData, TInt aDataSize, TUint32 aContext, TUint32 aPc)
|
|
416 |
{
|
|
417 |
SBTraceData& traceData = BTraceData;
|
|
418 |
|
|
419 |
// see if trace is small enough to fit in single record...
|
|
420 |
if(TUint(aDataSize)<=TUint(KMaxBTraceDataArray+4))
|
|
421 |
{
|
|
422 |
a0 += aDataSize;
|
|
423 |
TUint32 a2 = 0;
|
|
424 |
TUint32 a3 = 0;
|
|
425 |
if(aDataSize)
|
|
426 |
{
|
|
427 |
a2 = *((TUint32*&)aData)++; // first 4 bytes into a2
|
|
428 |
if(aDataSize>=4 && aDataSize<=8)
|
|
429 |
a3 = *(TUint32*)aData; // only 4 more bytes, so pass by value, not pointer
|
|
430 |
else
|
|
431 |
a3 = (TUint32)aData;
|
|
432 |
}
|
|
433 |
__ACQUIRE_BTRACE_LOCK();
|
|
434 |
TBool r = traceData.iHandler(a0,0,aContext,a1,a2,a3,0,aPc);
|
|
435 |
__RELEASE_BTRACE_LOCK();
|
|
436 |
return r;
|
|
437 |
}
|
|
438 |
|
|
439 |
// adjust for header2, extra, and size word...
|
|
440 |
a0 |= BTrace::EHeader2Present<<(BTrace::EFlagsIndex*8)|BTrace::EExtraPresent<<(BTrace::EFlagsIndex*8);
|
|
441 |
a0 += 12;
|
|
442 |
|
|
443 |
TUint32 traceId = __e32_atomic_add_ord32(&BigTraceId, 1);
|
|
444 |
TUint32 header2 = BTrace::EMultipartFirst;
|
|
445 |
TInt offset = 0;
|
|
446 |
do
|
|
447 |
{
|
|
448 |
TUint32 size = aDataSize-offset;
|
|
449 |
if(size>KMaxBTraceDataArray)
|
|
450 |
size = KMaxBTraceDataArray;
|
|
451 |
else
|
|
452 |
header2 = BTrace::EMultipartLast;
|
|
453 |
if(size<=4)
|
|
454 |
*(TUint32*)&aData = *(TUint32*)aData; // 4 bytes or less are passed by value, not pointer
|
|
455 |
|
|
456 |
__ACQUIRE_BTRACE_LOCK();
|
|
457 |
TBool result = traceData.iHandler(a0+size,header2,aContext,aDataSize,a1,(TUint32)aData,traceId,aPc);
|
|
458 |
__RELEASE_BTRACE_LOCK();
|
|
459 |
if (!result)
|
|
460 |
return result;
|
|
461 |
|
|
462 |
offset += size;
|
|
463 |
*(TUint8**)&aData += size;
|
|
464 |
|
|
465 |
header2 = BTrace::EMultipartMiddle;
|
|
466 |
a1 = offset;
|
|
467 |
}
|
|
468 |
while(offset<aDataSize);
|
|
469 |
|
|
470 |
return TRUE;
|
|
471 |
}
|
|
472 |
|
|
473 |
EXPORT_C TSpinLock* BTrace::LockPtr()
|
|
474 |
{
|
|
475 |
#ifdef __USE_BTRACE_LOCK__
|
|
476 |
return &BTraceLock;
|
|
477 |
#else
|
|
478 |
return 0;
|
|
479 |
#endif
|
|
480 |
}
|