0
|
1 |
// Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
|
|
2 |
// All rights reserved.
|
|
3 |
// This component and the accompanying materials are made available
|
|
4 |
// under the terms of the License "Eclipse Public License v1.0"
|
|
5 |
// which accompanies this distribution, and is available
|
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
|
|
7 |
//
|
|
8 |
// Initial Contributors:
|
|
9 |
// Nokia Corporation - initial contribution.
|
|
10 |
//
|
|
11 |
// Contributors:
|
|
12 |
//
|
|
13 |
// Description:
|
|
14 |
// e32\nkern\nkerns.cpp
|
|
15 |
//
|
|
16 |
//
|
|
17 |
|
|
18 |
// NThreadBase member data
|
|
19 |
#define __INCLUDE_NTHREADBASE_DEFINES__
|
|
20 |
|
|
21 |
#include <e32cmn.h>
|
|
22 |
#include <e32cmn_private.h>
|
|
23 |
#include "nk_priv.h"
|
|
24 |
|
|
25 |
extern "C" void ExcFault(TAny*);
|
|
26 |
|
|
27 |
/******************************************************************************
|
|
28 |
* Thread
|
|
29 |
******************************************************************************/
|
|
30 |
|
|
31 |
void InvalidExec()
|
|
32 |
{
|
|
33 |
FAULT();
|
|
34 |
}
|
|
35 |
|
|
36 |
static const SFastExecTable DefaultFastExecTable={0,{0}};
|
|
37 |
static const SSlowExecTable DefaultSlowExecTable={0,(TLinAddr)InvalidExec,0,{{0,0}}};
|
|
38 |
|
|
39 |
const SNThreadHandlers NThread_Default_Handlers =
|
|
40 |
{
|
|
41 |
NTHREAD_DEFAULT_EXIT_HANDLER,
|
|
42 |
NTHREAD_DEFAULT_STATE_HANDLER,
|
|
43 |
NTHREAD_DEFAULT_EXCEPTION_HANDLER,
|
|
44 |
NTHREAD_DEFAULT_TIMEOUT_HANDLER
|
|
45 |
};
|
|
46 |
|
|
47 |
/** Create a fast mutex
|
|
48 |
|
|
49 |
@publishedPartner
|
|
50 |
@released
|
|
51 |
*/
|
|
52 |
EXPORT_C NFastMutex::NFastMutex()
|
|
53 |
: iHoldingThread(0), iWaiting(0)
|
|
54 |
{
|
|
55 |
}
|
|
56 |
|
|
57 |
/** Create a spin lock
|
|
58 |
|
|
59 |
@internalComponent
|
|
60 |
*/
|
|
61 |
EXPORT_C TSpinLock::TSpinLock(TUint)
|
|
62 |
: iLock(0)
|
|
63 |
{
|
|
64 |
}
|
|
65 |
|
|
66 |
/** Create a R/W spin lock
|
|
67 |
|
|
68 |
@internalComponent
|
|
69 |
*/
|
|
70 |
EXPORT_C TRWSpinLock::TRWSpinLock(TUint)
|
|
71 |
: iLock(0)
|
|
72 |
{
|
|
73 |
}
|
|
74 |
|
|
75 |
NThreadBase::NThreadBase()
|
|
76 |
{
|
|
77 |
// from TPriListLink
|
|
78 |
iPriority = 0;
|
|
79 |
iSpare1 = 0;
|
|
80 |
iSpare2 = 0;
|
|
81 |
iSpare3 = 0;
|
|
82 |
|
|
83 |
iRequestSemaphore.iOwningThread=(NThreadBase*)this;
|
|
84 |
new (&iTimer) NTimer(TimerExpired,this);
|
|
85 |
iRequestSemaphore.iOwningThread = this;
|
|
86 |
|
|
87 |
iHeldFastMutex = 0;
|
|
88 |
iWaitFastMutex = 0;
|
|
89 |
iAddressSpace = 0;
|
|
90 |
iTime = 0;
|
|
91 |
iTimeslice = 0;
|
|
92 |
iWaitObj = 0;
|
|
93 |
iSuspendCount = 0;
|
|
94 |
iCsCount = 0;
|
|
95 |
iCsFunction = 0;
|
|
96 |
iReturnValue = 0;
|
|
97 |
iStackBase = 0;
|
|
98 |
iStackSize = 0;
|
|
99 |
iHandlers = 0;
|
|
100 |
iFastExecTable = 0;
|
|
101 |
iSlowExecTable = 0;
|
|
102 |
iSavedSP = 0;
|
|
103 |
iExtraContext = 0;
|
|
104 |
iExtraContextSize = 0;
|
|
105 |
iLastStartTime = 0;
|
|
106 |
iTotalCpuTime = 0;
|
|
107 |
iTag = 0;
|
|
108 |
iVemsData = 0;
|
|
109 |
iUserModeCallbacks = 0;
|
|
110 |
iSpare7 = 0;
|
|
111 |
iSpare8 = 0;
|
|
112 |
}
|
|
113 |
|
|
114 |
TInt NThreadBase::Create(SNThreadCreateInfo& aInfo, TBool aInitial)
|
|
115 |
{
|
|
116 |
if (aInfo.iPriority<0 || aInfo.iPriority>63)
|
|
117 |
return KErrArgument;
|
|
118 |
if (aInfo.iPriority==0 && !aInitial)
|
|
119 |
return KErrArgument;
|
|
120 |
new (this) NThreadBase;
|
|
121 |
iStackBase=(TLinAddr)aInfo.iStackBase;
|
|
122 |
iStackSize=aInfo.iStackSize;
|
|
123 |
iTimeslice=(aInfo.iTimeslice>0)?aInfo.iTimeslice:-1;
|
|
124 |
iTime=iTimeslice;
|
|
125 |
#ifdef _DEBUG
|
|
126 |
// When the crazy scheduler is active, refuse to set any priority higher than 1
|
|
127 |
if (KCrazySchedulerEnabled())
|
|
128 |
iPriority=TUint8(Min(1,aInfo.iPriority));
|
|
129 |
else
|
|
130 |
#endif
|
|
131 |
{
|
|
132 |
iPriority=TUint8(aInfo.iPriority);
|
|
133 |
}
|
|
134 |
iHandlers = aInfo.iHandlers ? aInfo.iHandlers : &NThread_Default_Handlers;
|
|
135 |
iFastExecTable=aInfo.iFastExecTable?aInfo.iFastExecTable:&DefaultFastExecTable;
|
|
136 |
iSlowExecTable=(aInfo.iSlowExecTable?aInfo.iSlowExecTable:&DefaultSlowExecTable)->iEntries;
|
|
137 |
iSpare2=(TUint8)aInfo.iAttributes; // iSpare2 is NThread attributes
|
|
138 |
if (aInitial)
|
|
139 |
{
|
|
140 |
iNState=EReady;
|
|
141 |
iSuspendCount=0;
|
|
142 |
TheScheduler.Add(this);
|
|
143 |
TheScheduler.iCurrentThread=this;
|
|
144 |
TheScheduler.iKernCSLocked=0; // now that current thread is defined
|
|
145 |
}
|
|
146 |
else
|
|
147 |
{
|
|
148 |
iNState=ESuspended;
|
|
149 |
iSuspendCount=-1;
|
|
150 |
}
|
|
151 |
return KErrNone;
|
|
152 |
}
|
|
153 |
|
|
154 |
void NThread_Default_State_Handler(NThread* __DEBUG_ONLY(aThread), TInt __DEBUG_ONLY(aOperation), TInt __DEBUG_ONLY(aParameter))
|
|
155 |
{
|
|
156 |
__KTRACE_OPT(KPANIC,DEBUGPRINT("Unknown NState %d: thread %T op %08x par %08x",aThread,aThread->iNState,aOperation,aParameter));
|
|
157 |
FAULT();
|
|
158 |
}
|
|
159 |
|
|
160 |
void NThread_Default_Exception_Handler(TAny* aContext, NThread*)
|
|
161 |
{
|
|
162 |
ExcFault(aContext);
|
|
163 |
}
|
|
164 |
|
|
165 |
|
|
166 |
/** Create a nanothread.
|
|
167 |
|
|
168 |
This function is intended to be used by the EPOC kernel and by personality
|
|
169 |
layers. A nanothread may not use most of the functions available to normal
|
|
170 |
Symbian OS threads. Use Kern::ThreadCreate() to create a Symbian OS thread.
|
|
171 |
|
|
172 |
@param aThread Pointer to control block for thread to create.
|
|
173 |
@param aInfo Information needed for creating the thread.
|
|
174 |
|
|
175 |
@see SNThreadCreateInfo
|
|
176 |
@see Kern::ThreadCreate
|
|
177 |
|
|
178 |
@pre Call in a thread context.
|
|
179 |
@pre Interrupts must be enabled.
|
|
180 |
@pre Kernel must be unlocked.
|
|
181 |
*/
|
|
182 |
EXPORT_C TInt NKern::ThreadCreate(NThread* aThread, SNThreadCreateInfo& aInfo)
|
|
183 |
{
|
|
184 |
CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadCreate");
|
|
185 |
return aThread->Create(aInfo,FALSE);
|
|
186 |
}
|
|
187 |
|
|
188 |
// User-mode callbacks
|
|
189 |
|
|
190 |
TUserModeCallback::TUserModeCallback(TUserModeCallbackFunc aFunc) :
|
|
191 |
iNext(KUserModeCallbackUnqueued),
|
|
192 |
iFunc(aFunc)
|
|
193 |
{
|
|
194 |
}
|
|
195 |
|
|
196 |
TUserModeCallback::~TUserModeCallback()
|
|
197 |
{
|
|
198 |
__NK_ASSERT_DEBUG(iNext == KUserModeCallbackUnqueued);
|
|
199 |
}
|
|
200 |
|
|
201 |
TInt NKern::QueueUserModeCallback(NThreadBase* aThread, TUserModeCallback* aCallback)
|
|
202 |
{
|
|
203 |
if (aCallback->iNext != KUserModeCallbackUnqueued)
|
|
204 |
return KErrInUse;
|
|
205 |
TInt r = KErrDied;
|
|
206 |
NKern::Lock();
|
|
207 |
TUserModeCallback* listHead = aThread->iUserModeCallbacks;
|
|
208 |
if (((TLinAddr)listHead & 3) == 0)
|
|
209 |
{
|
|
210 |
aCallback->iNext = listHead;
|
|
211 |
aThread->iUserModeCallbacks = aCallback;
|
|
212 |
r = KErrNone;
|
|
213 |
}
|
|
214 |
NKern::Unlock();
|
|
215 |
return r;
|
|
216 |
}
|
|
217 |
|
|
218 |
// Called with interrupts disabled
|
|
219 |
// The vast majority of times this is called with zero or one callback pending
|
|
220 |
void NThreadBase::CallUserModeCallbacks()
|
|
221 |
{
|
|
222 |
while (iUserModeCallbacks != NULL)
|
|
223 |
{
|
|
224 |
// Remove first callback
|
|
225 |
TUserModeCallback* callback = iUserModeCallbacks;
|
|
226 |
iUserModeCallbacks = callback->iNext;
|
|
227 |
|
|
228 |
// Enter critical section to ensure callback is called
|
|
229 |
NKern::ThreadEnterCS();
|
|
230 |
|
|
231 |
// Re-enable interrupts and call callback
|
|
232 |
NKern::EnableAllInterrupts();
|
|
233 |
callback->iNext = KUserModeCallbackUnqueued;
|
|
234 |
callback->iFunc(callback, EUserModeCallbackRun);
|
|
235 |
|
|
236 |
// Leave critical section: thread may die at this point
|
|
237 |
NKern::ThreadLeaveCS();
|
|
238 |
|
|
239 |
NKern::DisableAllInterrupts();
|
|
240 |
}
|
|
241 |
}
|
|
242 |
|
|
243 |
void NKern::CancelUserModeCallbacks()
|
|
244 |
{
|
|
245 |
// Call any queued callbacks with the EUserModeCallbackCancel reason code, in the current
|
|
246 |
// thread.
|
|
247 |
|
|
248 |
NThreadBase* thread = NCurrentThread();
|
|
249 |
NKern::Lock();
|
|
250 |
TUserModeCallback* listHead = thread->iUserModeCallbacks;
|
|
251 |
thread->iUserModeCallbacks = NULL;
|
|
252 |
NKern::Unlock();
|
|
253 |
|
|
254 |
while (listHead != NULL)
|
|
255 |
{
|
|
256 |
TUserModeCallback* callback = listHead;
|
|
257 |
listHead = listHead->iNext;
|
|
258 |
callback->iNext = KUserModeCallbackUnqueued;
|
|
259 |
callback->iFunc(callback, EUserModeCallbackCancel);
|
|
260 |
}
|
|
261 |
}
|
|
262 |
|
|
263 |
void NKern::MoveUserModeCallbacks(NThreadBase* aDestThread, NThreadBase* aSrcThread)
|
|
264 |
{
|
|
265 |
// Move all queued user-mode callbacks from the source thread to the destination thread, and
|
|
266 |
// prevent any more from being queued. Used by the kernel thread code so that callbacks get
|
|
267 |
// cancelled in another thread if the thread they were originally queued on dies.
|
|
268 |
|
|
269 |
NKern::Lock();
|
|
270 |
TUserModeCallback* sourceListStart = aSrcThread->iUserModeCallbacks;
|
|
271 |
aSrcThread->iUserModeCallbacks = (TUserModeCallback*)1;
|
|
272 |
NKern::Unlock();
|
|
273 |
__NK_ASSERT_DEBUG(((TUint)sourceListStart & 3) == 0); // check this only gets called once per thread
|
|
274 |
|
|
275 |
if (sourceListStart == NULL)
|
|
276 |
return;
|
|
277 |
|
|
278 |
TUserModeCallback* sourceListEnd = sourceListStart;
|
|
279 |
while (sourceListEnd->iNext != NULL)
|
|
280 |
sourceListEnd = sourceListEnd->iNext;
|
|
281 |
|
|
282 |
NKern::Lock();
|
|
283 |
TUserModeCallback* destListStart = aDestThread->iUserModeCallbacks;
|
|
284 |
__NK_ASSERT_DEBUG(((TUint)destListStart & 3) == 0);
|
|
285 |
sourceListEnd->iNext = destListStart;
|
|
286 |
aDestThread->iUserModeCallbacks = sourceListStart;
|
|
287 |
NKern::Unlock();
|
|
288 |
}
|
|
289 |
|
|
290 |
/** Initialise the null thread
|
|
291 |
@internalComponent
|
|
292 |
*/
|
|
293 |
void NKern::Init(NThread* aThread, SNThreadCreateInfo& aInfo)
|
|
294 |
{
|
|
295 |
aInfo.iFunction=NULL; // irrelevant
|
|
296 |
aInfo.iPriority=0; // null thread has lowest priority
|
|
297 |
aInfo.iTimeslice=0; // null thread not timesliced
|
|
298 |
aInfo.iAttributes=0; // null thread does not require implicit locks
|
|
299 |
aThread->Create(aInfo,TRUE); // create the null thread
|
|
300 |
}
|
|
301 |
|
|
302 |
extern "C" {
|
|
303 |
TUint32 CrashState;
|
|
304 |
}
|
|
305 |
|
|
306 |
EXPORT_C TBool NKern::Crashed()
|
|
307 |
{
|
|
308 |
return CrashState!=0;
|
|
309 |
}
|
|
310 |
|
|
311 |
|
|
312 |
/** @internalTechnology */
|
|
313 |
EXPORT_C void NKern::RecordIntLatency(TInt /*aLatency*/, TInt /*aIntMask*/)
|
|
314 |
{
|
|
315 |
}
|
|
316 |
|
|
317 |
|
|
318 |
/** @internalTechnology */
|
|
319 |
EXPORT_C void NKern::RecordThreadLatency(TInt /*aLatency*/)
|
|
320 |
{
|
|
321 |
}
|
|
322 |
|
|
323 |
/********************************************
|
|
324 |
* Deterministic Priority List Implementation
|
|
325 |
********************************************/
|
|
326 |
|
|
327 |
|
|
328 |
/** Construct a priority list with the specified number of priorities
|
|
329 |
|
|
330 |
@param aNumPriorities The number of priorities (must be 1-64).
|
|
331 |
*/
|
|
332 |
EXPORT_C TPriListBase::TPriListBase(TInt aNumPriorities)
|
|
333 |
{
|
|
334 |
memclr(this, sizeof(TPriListBase)+(aNumPriorities-1)*sizeof(SDblQueLink*) );
|
|
335 |
}
|
|
336 |
|
|
337 |
|
|
338 |
/********************************************
|
|
339 |
* Miscellaneous
|
|
340 |
********************************************/
|
|
341 |
|
|
342 |
|
|
343 |
/** Returns number of nanokernel timer ticks since system started.
|
|
344 |
@return tick count
|
|
345 |
@pre any context
|
|
346 |
*/
|
|
347 |
EXPORT_C TUint32 NKern::TickCount()
|
|
348 |
{
|
|
349 |
return NTickCount();
|
|
350 |
}
|
|
351 |
|
|
352 |
|
|
353 |
TUint32 BTrace::BigTraceId = 0;
|
|
354 |
|
|
355 |
TBool BTrace::DoOutBig(TUint32 a0, TUint32 a1, const TAny* aData, TInt aDataSize, TUint32 aContext, TUint32 aPc)
|
|
356 |
{
|
|
357 |
SBTraceData& traceData = BTraceData;
|
|
358 |
|
|
359 |
// see if trace is small enough to fit in single record...
|
|
360 |
if(TUint(aDataSize)<=TUint(KMaxBTraceDataArray+4))
|
|
361 |
{
|
|
362 |
a0 += aDataSize;
|
|
363 |
TUint32 a2 = 0;
|
|
364 |
TUint32 a3 = 0;
|
|
365 |
if(aDataSize)
|
|
366 |
{
|
|
367 |
a2 = *((TUint32*&)aData)++; // first 4 bytes into a2
|
|
368 |
if(aDataSize>=4 && aDataSize<=8)
|
|
369 |
a3 = *(TUint32*)aData; // only 4 more bytes, so pass by value, not pointer
|
|
370 |
else
|
|
371 |
a3 = (TUint32)aData;
|
|
372 |
}
|
|
373 |
return traceData.iHandler(a0,0,aContext,a1,a2,a3,0,aPc);
|
|
374 |
}
|
|
375 |
|
|
376 |
// adjust for header2, extra, and size word...
|
|
377 |
a0 |= BTrace::EHeader2Present<<(BTrace::EFlagsIndex*8)|BTrace::EExtraPresent<<(BTrace::EFlagsIndex*8);
|
|
378 |
a0 += 12;
|
|
379 |
|
|
380 |
TUint32 traceId = __e32_atomic_add_ord32(&BigTraceId, 1);
|
|
381 |
TUint32 header2 = BTrace::EMultipartFirst;
|
|
382 |
TInt offset = 0;
|
|
383 |
do
|
|
384 |
{
|
|
385 |
TUint32 size = aDataSize-offset;
|
|
386 |
if(size>KMaxBTraceDataArray)
|
|
387 |
size = KMaxBTraceDataArray;
|
|
388 |
else
|
|
389 |
header2 = BTrace::EMultipartLast;
|
|
390 |
if(size<=4)
|
|
391 |
*(TUint32*)&aData = *(TUint32*)aData; // 4 bytes or less are passed by value, not pointer
|
|
392 |
|
|
393 |
TBool result = traceData.iHandler(a0+size,header2,aContext,aDataSize,a1,(TUint32)aData,traceId,aPc);
|
|
394 |
if(!result)
|
|
395 |
return result;
|
|
396 |
|
|
397 |
offset += size;
|
|
398 |
*(TUint8**)&aData += size;
|
|
399 |
|
|
400 |
header2 = BTrace::EMultipartMiddle;
|
|
401 |
a1 = offset;
|
|
402 |
}
|
|
403 |
while(offset<aDataSize);
|
|
404 |
|
|
405 |
return TRUE;
|
|
406 |
}
|
|
407 |
|
|
408 |
EXPORT_C TSpinLock* BTrace::LockPtr()
|
|
409 |
{
|
|
410 |
return 0;
|
|
411 |
}
|