0
|
1 |
// Copyright (c) 1994-2009 Nokia Corporation and/or its subsidiary(-ies).
|
|
2 |
// All rights reserved.
|
|
3 |
// This component and the accompanying materials are made available
|
|
4 |
// under the terms of the License "Eclipse Public License v1.0"
|
|
5 |
// which accompanies this distribution, and is available
|
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
|
|
7 |
//
|
|
8 |
// Initial Contributors:
|
|
9 |
// Nokia Corporation - initial contribution.
|
|
10 |
//
|
|
11 |
// Contributors:
|
|
12 |
//
|
|
13 |
// Description:
|
|
14 |
// e32\kernel\sthread.cpp
|
|
15 |
//
|
|
16 |
//
|
|
17 |
|
|
18 |
#include <kernel/kern_priv.h>
|
|
19 |
#include "execs.h"
|
|
20 |
#include <kernel/emi.h>
|
|
21 |
|
|
22 |
#define iMState iWaitLink.iSpare1 // Allow a sensible name to be used for iMState
|
|
23 |
#define iExiting iWaitLink.iSpare2 // Allow a sensible name to be used for iExiting
|
|
24 |
#define iWaitListReserved iWaitLink.iSpare3 // Allow a sensible name to be used for iWaitListReserved
|
|
25 |
#define __CHECK_PRIORITIES(t) __ASSERT_DEBUG((t)->iWaitLink.iPriority==(t)->iNThread.i_NThread_BasePri,Kern::Fault("WaitLinkPriT",__LINE__))
|
|
26 |
|
|
27 |
_LIT(KLitKill,"Kill");
|
|
28 |
_LIT(KLitTerminate,"Terminate");
|
|
29 |
|
|
30 |
extern const SNThreadHandlers EpocThreadHandlers =
|
|
31 |
{
|
|
32 |
&DThread::EpocThreadExitHandler,
|
|
33 |
NTHREAD_DEFAULT_STATE_HANDLER,
|
|
34 |
&Exc::Dispatch,
|
|
35 |
&DThread::EpocThreadTimeoutHandler
|
|
36 |
};
|
|
37 |
|
|
38 |
/********************************************
|
|
39 |
* Thread cleanup entries
|
|
40 |
********************************************/
|
|
41 |
EXPORT_C TThreadCleanup::TThreadCleanup()
|
|
42 |
: iThread(NULL)
|
|
43 |
{
|
|
44 |
}
|
|
45 |
|
|
46 |
// Enter and return with system locked.
|
|
47 |
void TThreadCleanup::ChangePriority(TInt aNewPriority)
|
|
48 |
{
|
|
49 |
__KTRACE_OPT(KTHREAD,Kern::Printf("TThreadCleanup::ChangePriority %d to %d, thread %O nest %d",iPriority,aNewPriority,iThread,K::PINestLevel));
|
|
50 |
if (aNewPriority != iPriority)
|
|
51 |
{
|
|
52 |
if (iThread)
|
|
53 |
{
|
|
54 |
iThread->iCleanupQ.ChangePriority(this,aNewPriority);
|
|
55 |
if (++K::PINestLevel<KMaxPriorityInheritanceNesting)
|
|
56 |
iThread->SetRequiredPriority();
|
|
57 |
}
|
|
58 |
else
|
|
59 |
iPriority=TUint8(aNewPriority);
|
|
60 |
}
|
|
61 |
}
|
|
62 |
|
|
63 |
// Enter and return with system locked.
|
|
64 |
/**
|
|
65 |
@pre Interrupts must be enabled.
|
|
66 |
@pre Kernel must be unlocked.
|
|
67 |
@pre System must be locked
|
|
68 |
@pre Call in a thread context.
|
|
69 |
*/
|
|
70 |
EXPORT_C void TThreadCleanup::Remove()
|
|
71 |
{
|
|
72 |
CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED|MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,
|
|
73 |
"TThreadCleanup::Remove");
|
|
74 |
// __KTRACE_OPT(KTHREAD,Kern::Printf("TThreadCleanup::Remove priority %d, thread %O",iPriority,iThread));
|
|
75 |
iThread->iCleanupQ.Remove(this);
|
|
76 |
if (iPriority>=iThread->iNThread.i_NThread_BasePri) // can't affect priority if lower than current
|
|
77 |
iThread->SetRequiredPriority();
|
|
78 |
}
|
|
79 |
|
|
80 |
/********************************************
|
|
81 |
* Thread
|
|
82 |
********************************************/
|
|
83 |
|
|
84 |
#ifdef KTHREAD
|
|
85 |
void DThread::DefaultUnknownStateHandler(DThread* aThread, TInt anOperation, TInt aParameter)
|
|
86 |
#else
|
|
87 |
void DThread::DefaultUnknownStateHandler(DThread* aThread, TInt, TInt)
|
|
88 |
#endif
|
|
89 |
{
|
|
90 |
__KTRACE_OPT(KTHREAD,Kern::Printf("UNKNOWN THREAD MSTATE!! thread %O Mstate=%d, operation=%d, parameter=%d",
|
|
91 |
aThread,aThread->iMState,anOperation,aParameter));
|
|
92 |
Kern::Fault("THREAD STATE",aThread->iMState);
|
|
93 |
}
|
|
94 |
|
|
95 |
DThread::DThread()
|
|
96 |
: iTls(KTlsArrayGranularity,_FOFF(STls,iHandle)),
|
|
97 |
iDebugMask(0xffffffff), iKillDfc(NULL,this,K::SvMsgQ,2),
|
|
98 |
iUnknownStateHandler(DefaultUnknownStateHandler),
|
|
99 |
iExitType((TUint8)EExitPending)
|
|
100 |
#ifdef __SMP__
|
|
101 |
, iSMPSafeCallback(SMPSafeCallback)
|
|
102 |
#endif
|
|
103 |
{
|
|
104 |
iKernMsg.iSem.iOwningThread=&iNThread;
|
|
105 |
iMState=ECreated;
|
|
106 |
iExiting=EFalse;
|
|
107 |
}
|
|
108 |
|
|
109 |
// Enter and return with system unlocked.
|
|
110 |
void DThread::Destruct()
|
|
111 |
{
|
|
112 |
__KTRACE_OPT(KTHREAD,Kern::Printf("DThread::Destruct %O",this));
|
|
113 |
__NK_ASSERT_DEBUG(((TUint)iNThread.iUserModeCallbacks & ~3) == NULL);
|
|
114 |
iTls.Close();
|
|
115 |
if (iSyncMsgPtr)
|
|
116 |
{
|
|
117 |
__KTRACE_OPT(KSERVER,Kern::Printf("DThread::Destruct(%08X) freeing sync message at %08X", this, iSyncMsgPtr));
|
|
118 |
iSyncMsgPtr->ReleaseMessagePool(RMessageK::ESync, 1);
|
|
119 |
iSyncMsgPtr = NULL;
|
|
120 |
}
|
|
121 |
FreeSupervisorStack();
|
|
122 |
iHandles.Close(iOwningProcess);
|
|
123 |
Kern::SafeClose(iExtTempObj,NULL);
|
|
124 |
if (iNThread.iExtraContextSize > 0)
|
|
125 |
Kern::Free(iNThread.iExtraContext);
|
|
126 |
}
|
|
127 |
|
|
128 |
// Enter and return with system unlocked.
|
|
129 |
void DThread::Release()
|
|
130 |
{
|
|
131 |
__KTRACE_OPT(KTHREAD,Kern::Printf("Thread %O Release()",this));
|
|
132 |
if (iWaitListReserved)
|
|
133 |
{
|
|
134 |
iWaitListReserved = 0;
|
|
135 |
TThreadWaitList::ThreadDestroyed();
|
|
136 |
}
|
|
137 |
iTls.Close();
|
|
138 |
__KTRACE_OPT(KTHREAD,Kern::Printf("Close temporary object %O",iTempObj));
|
|
139 |
Kern::SafeClose(iTempObj,NULL);
|
|
140 |
__KTRACE_OPT(KTHREAD,Kern::Printf("Decrement temporary message %08x",iTempMsg));
|
|
141 |
if (iTempMsg)
|
|
142 |
{
|
|
143 |
NKern::LockSystem();
|
|
144 |
iTempMsg->CloseRef();
|
|
145 |
NKern::UnlockSystem();
|
|
146 |
}
|
|
147 |
iTempMsg=NULL;
|
|
148 |
__KTRACE_OPT(KTHREAD,Kern::Printf("Free temporary allocation %08x",iTempAlloc));
|
|
149 |
Kern::Free(iTempAlloc);
|
|
150 |
iTempAlloc=NULL;
|
|
151 |
|
|
152 |
__KTRACE_OPT(KTHREAD,Kern::Printf("Removing closing libraries"));
|
|
153 |
if (!iClosingLibs.IsEmpty())
|
|
154 |
RemoveClosingLibs();
|
|
155 |
|
|
156 |
__KTRACE_OPT(KTHREAD, Kern::Printf("Unmapping code segs delayed by User::Leave() (if any)"));
|
|
157 |
if (iLeaveDepth)
|
|
158 |
CleanupLeave(iLeaveDepth);
|
|
159 |
|
|
160 |
__KTRACE_OPT(KTHREAD,Kern::Printf("Deleting handles..."));
|
|
161 |
iHandles.Close(iOwningProcess);
|
|
162 |
__KTRACE_OPT(KTHREAD,Kern::Printf("Deleted handles"));
|
|
163 |
|
|
164 |
// deallocate the user stack if any
|
|
165 |
if (iThreadType==EThreadUser)
|
|
166 |
FreeUserStack();
|
|
167 |
|
|
168 |
// cancel the timer
|
|
169 |
__KTRACE_OPT(KTHREAD,Kern::Printf("Cancelling timer"));
|
|
170 |
iTimer.Cancel(NULL);
|
|
171 |
|
|
172 |
__KTRACE_OPT(KTHREAD,Kern::Printf("Freeing supervisor-mode stack"));
|
|
173 |
FreeSupervisorStack();
|
|
174 |
#ifdef BTRACE_THREAD_IDENTIFICATION
|
|
175 |
BTrace12(BTrace::EThreadIdentification,BTrace::EThreadDestroy,&iNThread,iOwningProcess,iId);
|
|
176 |
#endif
|
|
177 |
__DEBUG_EVENT(EEventRemoveThread, this);
|
|
178 |
__KTRACE_OPT(KTHREAD,Kern::Printf("Closing thread"));
|
|
179 |
Close(iOwningProcess); // close the thread
|
|
180 |
}
|
|
181 |
|
|
182 |
// Enter and return with system locked.
|
|
183 |
/**
|
|
184 |
@pre Interrupts must be enabled.
|
|
185 |
@pre Kernel must be unlocked.
|
|
186 |
@pre System must be locked
|
|
187 |
@pre Call in a thread context.
|
|
188 |
*/
|
|
189 |
EXPORT_C void DThread::AddCleanup(TThreadCleanup* aCleanup)
|
|
190 |
{
|
|
191 |
CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED|MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,
|
|
192 |
"DThread::AddCleanup");
|
|
193 |
aCleanup->iThread=this;
|
|
194 |
iCleanupQ.Add(aCleanup);
|
|
195 |
if (aCleanup->iPriority>iNThread.i_NThread_BasePri) // can't affect priority unless it's higher than current
|
|
196 |
SetActualPriority(aCleanup->iPriority); // this cleanup item must be the highest priority
|
|
197 |
}
|
|
198 |
|
|
199 |
// Enter and return with system locked.
|
|
200 |
void DThread::Suspend(TInt aCount)
|
|
201 |
{
|
|
202 |
__KTRACE_OPT(KTHREAD,Kern::Printf("Suspend thread %O, count %d",this,aCount));
|
|
203 |
|
|
204 |
TBool r=NKern::ThreadSuspend(&iNThread,aCount);
|
|
205 |
if (!r)
|
|
206 |
return; // suspend was deferred (always so if self-suspend)
|
|
207 |
|
|
208 |
// suspend has taken effect - fix up wait queues
|
|
209 |
K::PINestLevel=0;
|
|
210 |
switch (iMState)
|
|
211 |
{
|
|
212 |
case ECreated:
|
|
213 |
case EDead:
|
|
214 |
case EReady:
|
|
215 |
case EWaitSemaphoreSuspended:
|
|
216 |
case EWaitMutexSuspended:
|
|
217 |
case EWaitCondVarSuspended:
|
|
218 |
break;
|
|
219 |
case EWaitSemaphore:
|
|
220 |
((DSemaphore*)iWaitObj)->SuspendWaitingThread(this);
|
|
221 |
break;
|
|
222 |
case EWaitMutex:
|
|
223 |
((DMutex*)iWaitObj)->SuspendWaitingThread(this);
|
|
224 |
break;
|
|
225 |
case EHoldMutexPending:
|
|
226 |
((DMutex*)iWaitObj)->SuspendPendingThread(this);
|
|
227 |
break;
|
|
228 |
case EWaitCondVar:
|
|
229 |
((DCondVar*)iWaitObj)->SuspendWaitingThread(this);
|
|
230 |
break;
|
|
231 |
default:
|
|
232 |
UnknownState(ESuspend,0);
|
|
233 |
break;
|
|
234 |
}
|
|
235 |
}
|
|
236 |
|
|
237 |
// Enter and return with system locked.
|
|
238 |
void DThread::Resume()
|
|
239 |
{
|
|
240 |
__KTRACE_OPT(KTHREAD,Kern::Printf("Resume thread %O",this));
|
|
241 |
|
|
242 |
TBool r=NKern::ThreadResume(&iNThread);
|
|
243 |
if (!r)
|
|
244 |
return; // just cancelled deferred suspends
|
|
245 |
|
|
246 |
// resume has taken effect - fix up wait queues
|
|
247 |
K::PINestLevel=0;
|
|
248 |
switch (iMState)
|
|
249 |
{
|
|
250 |
case ECreated:
|
|
251 |
case EDead:
|
|
252 |
case EReady:
|
|
253 |
case EWaitSemaphore:
|
|
254 |
case EWaitMutex:
|
|
255 |
case EHoldMutexPending:
|
|
256 |
case EWaitCondVar:
|
|
257 |
break;
|
|
258 |
case EWaitSemaphoreSuspended:
|
|
259 |
((DSemaphore*)iWaitObj)->ResumeWaitingThread(this);
|
|
260 |
break;
|
|
261 |
case EWaitMutexSuspended:
|
|
262 |
((DMutex*)iWaitObj)->ResumeWaitingThread(this);
|
|
263 |
break;
|
|
264 |
case EWaitCondVarSuspended:
|
|
265 |
((DCondVar*)iWaitObj)->ResumeWaitingThread(this);
|
|
266 |
break;
|
|
267 |
default:
|
|
268 |
UnknownState(EResume,0);
|
|
269 |
break;
|
|
270 |
}
|
|
271 |
}
|
|
272 |
|
|
273 |
void DThread::ForceResume()
|
|
274 |
//
|
|
275 |
// Resume the thread regardless of nested suspensions
|
|
276 |
// Enter and return with system locked.
|
|
277 |
//
|
|
278 |
{
|
|
279 |
__KTRACE_OPT(KTHREAD,Kern::Printf("ForceResume thread %O",this));
|
|
280 |
|
|
281 |
TBool r=NKern::ThreadForceResume(&iNThread);
|
|
282 |
if (!r)
|
|
283 |
return; // just cancelled deferred suspends
|
|
284 |
|
|
285 |
// resume has taken effect - fix up wait queues
|
|
286 |
K::PINestLevel=0;
|
|
287 |
switch (iMState)
|
|
288 |
{
|
|
289 |
case ECreated:
|
|
290 |
case EDead:
|
|
291 |
case EReady:
|
|
292 |
case EWaitSemaphore:
|
|
293 |
case EWaitMutex:
|
|
294 |
case EHoldMutexPending:
|
|
295 |
case EWaitCondVar:
|
|
296 |
break;
|
|
297 |
case EWaitSemaphoreSuspended:
|
|
298 |
((DSemaphore*)iWaitObj)->ResumeWaitingThread(this);
|
|
299 |
break;
|
|
300 |
case EWaitMutexSuspended:
|
|
301 |
((DMutex*)iWaitObj)->ResumeWaitingThread(this);
|
|
302 |
break;
|
|
303 |
case EWaitCondVarSuspended:
|
|
304 |
((DCondVar*)iWaitObj)->ResumeWaitingThread(this);
|
|
305 |
break;
|
|
306 |
default:
|
|
307 |
UnknownState(EResume,0);
|
|
308 |
break;
|
|
309 |
}
|
|
310 |
}
|
|
311 |
|
|
312 |
// Cancel the thread's timer
|
|
313 |
// Enter and return with system locked.
|
|
314 |
EXPORT_C void DThread::CancelTimer()
|
|
315 |
{
|
|
316 |
iTimer.Cancel(NULL);
|
|
317 |
iTimer.iState = (TUint8)TTimer::EIdle;
|
|
318 |
}
|
|
319 |
|
|
320 |
// Release the thread's wait on any sync object (not explicit suspend)
|
|
321 |
// Enter and return with system locked.
|
|
322 |
/**
|
|
323 |
@pre Interrupts must be enabled.
|
|
324 |
@pre Kernel must be unlocked.
|
|
325 |
@pre System must be locked
|
|
326 |
@pre Call in a thread context.
|
|
327 |
*/
|
|
328 |
EXPORT_C TInt DThread::ReleaseWait(TInt aReturnCode)
|
|
329 |
{
|
|
330 |
CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED|MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,
|
|
331 |
"DThread::ReleaseWait");
|
|
332 |
__KTRACE_OPT(KTHREAD,Kern::Printf("Thread %O ReleaseWait() retcode=%d",this,aReturnCode));
|
|
333 |
switch(iMState)
|
|
334 |
{
|
|
335 |
case ECreated:
|
|
336 |
case EDead:
|
|
337 |
case EReady:
|
|
338 |
return KErrGeneral;
|
|
339 |
case EWaitSemaphore:
|
|
340 |
((DSemaphore*)iWaitObj)->WaitCancel(this);
|
|
341 |
break;
|
|
342 |
case EWaitSemaphoreSuspended:
|
|
343 |
((DSemaphore*)iWaitObj)->WaitCancelSuspended(this);
|
|
344 |
break;
|
|
345 |
case EWaitMutex:
|
|
346 |
((DMutex*)iWaitObj)->WaitCancel(this);
|
|
347 |
break;
|
|
348 |
case EWaitMutexSuspended:
|
|
349 |
((DMutex*)iWaitObj)->WaitCancelSuspended(this);
|
|
350 |
break;
|
|
351 |
case EHoldMutexPending:
|
|
352 |
((DMutex*)iWaitObj)->RemovePendingThread(this);
|
|
353 |
break;
|
|
354 |
case EWaitCondVar:
|
|
355 |
((DCondVar*)iWaitObj)->WaitCancel(this);
|
|
356 |
break;
|
|
357 |
case EWaitCondVarSuspended:
|
|
358 |
((DCondVar*)iWaitObj)->WaitCancelSuspended(this);
|
|
359 |
break;
|
|
360 |
default:
|
|
361 |
UnknownState(EReleaseWait,aReturnCode);
|
|
362 |
break;
|
|
363 |
}
|
|
364 |
iWaitObj=NULL;
|
|
365 |
iMState=EReady;
|
|
366 |
NKern::ThreadRelease(&iNThread,aReturnCode);
|
|
367 |
return KErrNone;
|
|
368 |
}
|
|
369 |
|
|
370 |
void DThread::EpocThreadTimeoutHandler(NThread* aThread, TInt aOp)
|
|
371 |
{
|
|
372 |
if (aOp == NThreadBase::ETimeoutPreamble)
|
|
373 |
{
|
|
374 |
NKern::LockSystem();
|
|
375 |
return;
|
|
376 |
}
|
|
377 |
if (aOp == NThreadBase::ETimeoutPostamble)
|
|
378 |
{
|
|
379 |
DThread* pT = _LOFF(aThread, DThread, iNThread);
|
|
380 |
pT->ReleaseWait(KErrTimedOut);
|
|
381 |
}
|
|
382 |
NKern::UnlockSystem();
|
|
383 |
}
|
|
384 |
|
|
385 |
#ifdef SYMBIAN_CURB_SYSTEMSERVER_PRIORITIES
|
|
386 |
const TUint8 KPrioritySystemServerMore = 23;
|
|
387 |
#else
|
|
388 |
const TUint8 KPrioritySystemServerMore = 24;
|
|
389 |
#endif // SYMBIAN_CURB_SYSTEMSERVER_PRIORITIES
|
|
390 |
const TInt KMaxPriorityWithoutProtServ = KPrioritySystemServerMore;
|
|
391 |
|
|
392 |
// Mapping table for thread+process priority to thread absolute priority
|
|
393 |
LOCAL_D const TUint8 ThreadPriorityTable[64] =
|
|
394 |
{
|
|
395 |
// Idle MuchLess Less Normal More MuchMore RealTime
|
|
396 |
/*Low*/ 1, 1, 2, 3, 4, 5, 22, 0,
|
|
397 |
/*Background*/ 3, 5, 6, 7, 8, 9, 22, 0,
|
|
398 |
/*Foreground*/ 3, 10, 11, 12, 13, 14, 22, 0,
|
|
399 |
/*High*/ 3, 17, 18, 19, 20, 22, 23, 0,
|
|
400 |
/*SystemServer1*/ 9, 15, 16, 21, KPrioritySystemServerMore, 25, 28, 0,
|
|
401 |
/*SystemServer2*/ 9, 15, 16, 21, KPrioritySystemServerMore, 25, 28, 0,
|
|
402 |
/*SystemServer3*/ 9, 15, 16, 21, KPrioritySystemServerMore, 25, 28, 0,
|
|
403 |
/*RealTimeServer*/ 18, 26, 27, 28, 29, 30, 31, 0
|
|
404 |
};
|
|
405 |
|
|
406 |
TInt DThread::CalcDefaultThreadPriority()
|
|
407 |
{
|
|
408 |
TInt r;
|
|
409 |
TInt tp=iThreadPriority;
|
|
410 |
if (tp>=0)
|
|
411 |
r=(tp<KNumPriorities)?tp:KNumPriorities-1; // absolute thread priorities
|
|
412 |
else
|
|
413 |
{
|
|
414 |
tp+=8;
|
|
415 |
if (tp<0)
|
|
416 |
tp=0;
|
|
417 |
TInt pp=iOwningProcess->iPriority; // process priority in range 0 to 7
|
|
418 |
TInt i=(pp<<3)+tp;
|
|
419 |
r=ThreadPriorityTable[i]; // map thread+process priority to actual priority
|
|
420 |
if ((r > KMaxPriorityWithoutProtServ) &&
|
|
421 |
!(this->HasCapability(ECapabilityProtServ,__PLATSEC_DIAGNOSTIC_STRING("Warning: thread priority capped to below realtime range"))))
|
|
422 |
r = KMaxPriorityWithoutProtServ;
|
|
423 |
}
|
|
424 |
__KTRACE_OPT(KTHREAD,Kern::Printf("CalcDefaultThreadPriority tp %d pp %d abs %d",iThreadPriority,iOwningProcess->iPriority,r));
|
|
425 |
return r;
|
|
426 |
}
|
|
427 |
|
|
428 |
// Enter and return with system locked.
|
|
429 |
/**
|
|
430 |
@pre Interrupts must be enabled.
|
|
431 |
@pre Kernel must be unlocked.
|
|
432 |
@pre System must be locked
|
|
433 |
@pre Call in a thread context.
|
|
434 |
*/
|
|
435 |
EXPORT_C void DThread::SetThreadPriority(TInt aThreadPriority)
|
|
436 |
{
|
|
437 |
CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED|MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,
|
|
438 |
"DThread::SetThreadPriority");
|
|
439 |
__KTRACE_OPT(KTHREAD,Kern::Printf("DThread %O SetThreadPriority %d",this,aThreadPriority));
|
|
440 |
iThreadPriority=aThreadPriority;
|
|
441 |
TInt def=CalcDefaultThreadPriority();
|
|
442 |
SetDefaultPriority(def);
|
|
443 |
}
|
|
444 |
|
|
445 |
// Enter and return with system locked.
|
|
446 |
void DThread::SetDefaultPriority(TInt aDefaultPriority)
|
|
447 |
{
|
|
448 |
__KTRACE_OPT(KTHREAD,Kern::Printf("Thread %O SetDefaultPriority %d",this,aDefaultPriority));
|
|
449 |
#ifdef BTRACE_THREAD_PRIORITY
|
|
450 |
BTrace12(BTrace::EThreadPriority,BTrace::EDThreadPriority,&this->iNThread,iThreadPriority,aDefaultPriority);
|
|
451 |
#endif
|
|
452 |
#if defined(_DEBUG) && !defined(__SMP__)
|
|
453 |
// Cap priorities at 1 if we're doing crazy scheduling
|
|
454 |
if (TheSuperPage().KernelConfigFlags() & EKernelConfigCrazyScheduling && aDefaultPriority > 1)
|
|
455 |
aDefaultPriority = 1;
|
|
456 |
#endif
|
|
457 |
iDefaultPriority=aDefaultPriority;
|
|
458 |
K::PINestLevel=0;
|
|
459 |
SetRequiredPriority();
|
|
460 |
}
|
|
461 |
|
|
462 |
// Enter and return with system locked.
|
|
463 |
void DThread::SetRequiredPriority()
|
|
464 |
{
|
|
465 |
TInt p=iDefaultPriority;
|
|
466 |
TInt c=iCleanupQ.HighestPriority();
|
|
467 |
__KTRACE_OPT(KTHREAD,Kern::Printf("Thread %O SetRequiredPriority def %d cleanup %d nest %d",
|
|
468 |
this,p,c,K::PINestLevel));
|
|
469 |
if (c>p)
|
|
470 |
p=c;
|
|
471 |
if (p!=iNThread.i_NThread_BasePri)
|
|
472 |
SetActualPriority(p);
|
|
473 |
}
|
|
474 |
|
|
475 |
// Enter and return with system locked.
|
|
476 |
void DThread::SetActualPriority(TInt anActualPriority)
|
|
477 |
{
|
|
478 |
__KTRACE_OPT(KTHREAD,Kern::Printf("Thread %O MState %d SetActualPriority %d",this,iMState,anActualPriority));
|
|
479 |
TInt newp=anActualPriority;
|
|
480 |
__ASSERT_DEBUG(newp>=0 && newp<KNumPriorities, K::Fault(K::EBadThreadPriority));
|
|
481 |
NKern::ThreadSetPriority(&iNThread,newp);
|
|
482 |
switch(iMState)
|
|
483 |
{
|
|
484 |
case ECreated:
|
|
485 |
case EWaitSemaphoreSuspended:
|
|
486 |
case EWaitMutexSuspended:
|
|
487 |
case EWaitCondVarSuspended:
|
|
488 |
case EReady:
|
|
489 |
case EDead:
|
|
490 |
iWaitLink.iPriority=TUint8(newp);
|
|
491 |
break;
|
|
492 |
case EWaitSemaphore:
|
|
493 |
((DSemaphore*)iWaitObj)->ChangeWaitingThreadPriority(this,newp);
|
|
494 |
break;
|
|
495 |
case EWaitMutex:
|
|
496 |
((DMutex*)iWaitObj)->ChangeWaitingThreadPriority(this,newp);
|
|
497 |
break;
|
|
498 |
case EHoldMutexPending:
|
|
499 |
((DMutex*)iWaitObj)->ChangePendingThreadPriority(this,newp);
|
|
500 |
break;
|
|
501 |
case EWaitCondVar:
|
|
502 |
((DCondVar*)iWaitObj)->ChangeWaitingThreadPriority(this,newp);
|
|
503 |
break;
|
|
504 |
default:
|
|
505 |
UnknownState(EChangePriority,newp);
|
|
506 |
break;
|
|
507 |
}
|
|
508 |
__CHECK_PRIORITIES(this);
|
|
509 |
}
|
|
510 |
|
|
511 |
// Enter with system locked, return with system unlocked.
|
|
512 |
// Assumes thread MState is READY.
|
|
513 |
void DThread::RevertPriority()
|
|
514 |
{
|
|
515 |
TInt p=iDefaultPriority;
|
|
516 |
TInt c=iCleanupQ.HighestPriority();
|
|
517 |
__KTRACE_OPT(KTHREAD,Kern::Printf("Thread %O RevertPriority def %d cleanup %d",
|
|
518 |
this,iDefaultPriority,c));
|
|
519 |
if (c>p)
|
|
520 |
p=c;
|
|
521 |
if (p!=iNThread.i_NThread_BasePri)
|
|
522 |
{
|
|
523 |
iWaitLink.iPriority=TUint8(p);
|
|
524 |
NKern::ThreadSetPriority(&iNThread,p,SYSTEM_LOCK); // anti-thrash
|
|
525 |
}
|
|
526 |
else
|
|
527 |
NKern::UnlockSystem();
|
|
528 |
}
|
|
529 |
|
|
530 |
// Enter and return with system unlocked.
|
|
531 |
TInt DThread::DoCreate(SThreadCreateInfo& aInfo)
|
|
532 |
{
|
|
533 |
__KTRACE_OPT(KTHREAD,Kern::Printf("Thread %O DoCreate: Function %08x Ptr %08x",this,aInfo.iFunction,aInfo.iPtr));
|
|
534 |
__KTRACE_OPT(KTHREAD,Kern::Printf("type %d, sup stack=%08x, sup stack size=%x",aInfo.iType,aInfo.iSupervisorStack,aInfo.iSupervisorStackSize));
|
|
535 |
__KTRACE_OPT(KTHREAD,Kern::Printf("user stack=%08x size %x, init priority=%d",aInfo.iUserStack,aInfo.iUserStackSize,aInfo.iInitialThreadPriority));
|
|
536 |
iSupervisorStackSize=aInfo.iSupervisorStackSize;
|
|
537 |
if (iSupervisorStackSize==0)
|
|
538 |
iSupervisorStackSize=K::SupervisorThreadStackSize;
|
|
539 |
if (iThreadType==EThreadInitial || iThreadType==EThreadAPInitial || iThreadType==EThreadMinimalSupervisor)
|
|
540 |
iSupervisorStack=aInfo.iSupervisorStack;
|
|
541 |
TInt r=KErrNone;
|
|
542 |
if (!iSupervisorStack)
|
|
543 |
r=AllocateSupervisorStack();
|
|
544 |
if (r!=KErrNone)
|
|
545 |
return r;
|
|
546 |
iSyncMsgPtr = RMessageK::ClaimMessagePool(RMessageK::ESync, 1, NULL);
|
|
547 |
__KTRACE_OPT(KSERVER,Kern::Printf("DThread::DoCreate(%08X) claimed sync message at %08X", this, iSyncMsgPtr));
|
|
548 |
if (K::MsgInfo.iChunk && !iSyncMsgPtr)
|
|
549 |
return KErrNoMemory;
|
|
550 |
SNThreadCreateInfo ni;
|
|
551 |
ni.iFunction=EpocThreadFunction;
|
|
552 |
ni.iPriority=1; // Overridden by nkern for initial thread(s)
|
|
553 |
#if defined(_DEBUG) && !defined(__SMP__)
|
|
554 |
// When doing crazy scheduling, all threads get just 1-tick timeslices
|
|
555 |
if (TheSuperPage().KernelConfigFlags() & EKernelConfigCrazyScheduling)
|
|
556 |
ni.iTimeslice = (iThreadType==EThreadMinimalSupervisor || iThreadType==EThreadAPInitial || iThreadType==EThreadInitial) ? -1 : 1;
|
|
557 |
else
|
|
558 |
#endif
|
|
559 |
{
|
|
560 |
ni.iTimeslice = ( iThreadType==EThreadMinimalSupervisor
|
|
561 |
|| iThreadType==EThreadAPInitial
|
|
562 |
|| iThreadType==EThreadInitial
|
|
563 |
) ? -1
|
|
564 |
: NKern::TimesliceTicks(EDefaultUserTimeSliceMs*1000);
|
|
565 |
}
|
|
566 |
ni.iAttributes=0; // overridden if necessary by memory model
|
|
567 |
ni.iHandlers = &EpocThreadHandlers;
|
|
568 |
ni.iFastExecTable=(const SFastExecTable*)EpocFastExecTable;
|
|
569 |
ni.iSlowExecTable=(const SSlowExecTable*)EpocSlowExecTable;
|
|
570 |
ni.iParameterBlock=(const TUint32*)&aInfo;
|
|
571 |
ni.iParameterBlockSize=aInfo.iTotalSize;
|
|
572 |
ni.iStackBase=iSupervisorStack;
|
|
573 |
ni.iStackSize=iSupervisorStackSize;
|
|
574 |
#ifdef __SMP__
|
|
575 |
TUint32 config = TheSuperPage().KernelConfigFlags();
|
|
576 |
if (iThreadType==EThreadUser)
|
|
577 |
{
|
|
578 |
// user thread
|
|
579 |
if ((config & EKernelConfigSMPUnsafeCPU0) && iOwningProcess->iSMPUnsafeCount)
|
|
580 |
{
|
|
581 |
ni.iCpuAffinity = 0; // compatibility mode
|
|
582 |
ni.iGroup = 0;
|
|
583 |
}
|
|
584 |
else
|
|
585 |
{
|
|
586 |
ni.iCpuAffinity = KCpuAffinityAny;
|
|
587 |
if ((config & EKernelConfigSMPUnsafeCompat) && iOwningProcess->iSMPUnsafeCount)
|
|
588 |
ni.iGroup = iOwningProcess->iSMPUnsafeGroup;
|
|
589 |
else
|
|
590 |
ni.iGroup = 0;
|
|
591 |
}
|
|
592 |
|
|
593 |
}
|
|
594 |
else
|
|
595 |
{
|
|
596 |
// kernel thread
|
|
597 |
ni.iCpuAffinity = 0;
|
|
598 |
ni.iGroup = 0;
|
|
599 |
}
|
|
600 |
#endif
|
|
601 |
if (iThreadType!=EThreadInitial)
|
|
602 |
{
|
|
603 |
if (iSupervisorStack)
|
|
604 |
memset(iSupervisorStack,0xee,iSupervisorStackSize);
|
|
605 |
if (iThreadType != EThreadAPInitial)
|
|
606 |
{
|
|
607 |
TAny* ec = iNThread.iExtraContext;
|
|
608 |
TInt ecs = iNThread.iExtraContextSize;
|
|
609 |
r=NKern::ThreadCreate(&iNThread,ni);
|
|
610 |
if (r!=KErrNone)
|
|
611 |
return r;
|
|
612 |
iNThread.SetExtraContext(ec, ecs);
|
|
613 |
NKern::LockSystem();
|
|
614 |
SetThreadPriority(aInfo.iInitialThreadPriority);
|
|
615 |
NKern::UnlockSystem();
|
|
616 |
}
|
|
617 |
}
|
|
618 |
else
|
|
619 |
iMState=EReady;
|
|
620 |
r=SetupContext(aInfo);
|
|
621 |
if (r==KErrNone)
|
|
622 |
r=iTimer.Create();
|
|
623 |
return r;
|
|
624 |
}
|
|
625 |
|
|
626 |
void svThreadKill(TAny* aPtr)
|
|
627 |
{
|
|
628 |
((DThread*)aPtr)->SvKill();
|
|
629 |
}
|
|
630 |
|
|
631 |
// Enter and return with system unlocked.
|
|
632 |
TDfc* DThread::EpocThreadExitHandler(NThread* aThread)
|
|
633 |
{
|
|
634 |
DThread* pT=_LOFF(aThread,DThread,iNThread);
|
|
635 |
pT->Exit();
|
|
636 |
pT->iKillDfc.SetFunction(svThreadKill);
|
|
637 |
return &pT->iKillDfc; // NKERN will queue this before terminating this thread
|
|
638 |
}
|
|
639 |
|
|
640 |
void DThread::Exit()
|
|
641 |
//
|
|
642 |
// This function runs in the context of the exiting thread
|
|
643 |
// Enter and leave with system unlocked
|
|
644 |
//
|
|
645 |
{
|
|
646 |
#ifdef KPANIC
|
|
647 |
if (iExitType==EExitPanic)
|
|
648 |
{
|
|
649 |
__KTRACE_OPT2(KPANIC,KSCHED,Kern::Printf("Thread %O Panic %lS %d",this,&iExitCategory,iExitReason));
|
|
650 |
}
|
|
651 |
else if (iExitType==EExitTerminate)
|
|
652 |
{
|
|
653 |
__KTRACE_OPT2(KPANIC,KSCHED,Kern::Printf("Thread %O Terminated %d",this,iExitReason));
|
|
654 |
}
|
|
655 |
else if (iExitType==EExitKill && iExitReason!=KErrNone)
|
|
656 |
{
|
|
657 |
__KTRACE_OPT2(KPANIC,KSCHED,Kern::Printf("Thread %O Killed %d",this,iExitReason));
|
|
658 |
}
|
|
659 |
#endif
|
|
660 |
if (iExitType!=EExitKill && (iFlags & (KThreadFlagSystemPermanent|KThreadFlagSystemCritical)))
|
|
661 |
K::Fault(K::ESystemThreadPanic);
|
|
662 |
if (iFlags & KThreadFlagSystemPermanent)
|
|
663 |
K::Fault(K::EPermanentThreadExit);
|
|
664 |
if (this==K::EventThread)
|
|
665 |
K::Fault(K::EThrdEventHookDied);
|
|
666 |
|
|
667 |
// call crash debugger after application panic if KALLTHREADSSYSTEM bit is set
|
|
668 |
if (iExitType==EExitPanic && KDebugNum(KALLTHREADSSYSTEM))
|
|
669 |
K::Fault(K::ESystemThreadPanic);
|
|
670 |
|
|
671 |
#if defined(_DEBUG) && !defined(__SMP__)
|
|
672 |
// Delay cleanup if we're using the delayed scheduler
|
|
673 |
if (KDebugNum(KCRAZYSCHEDDELAY))
|
|
674 |
NKern::Sleep(1);
|
|
675 |
#endif
|
|
676 |
|
|
677 |
// Clear any paging exc trap as if exiting this thread causes any page faults
|
|
678 |
// the thread must not resume execution from the trap handler.
|
|
679 |
iPagingExcTrap = 0;
|
|
680 |
|
|
681 |
NKern::LockSystem();
|
|
682 |
ReleaseWait(KErrDied);
|
|
683 |
iExiting=ETrue; // used to make logons complete early
|
|
684 |
NKern::UnlockSystem();
|
|
685 |
|
|
686 |
// regularize the thread state before main exit processing
|
|
687 |
DoExit1();
|
|
688 |
|
|
689 |
#ifdef __EMI_SUPPORT__
|
|
690 |
EMI::CallExitHandler(this);
|
|
691 |
#endif
|
|
692 |
|
|
693 |
// Hook into debugger if any. Not conditioned by __DEBUGGER_SUPPORT__ so
|
|
694 |
// minimal post-mortem debugging is possible even without full debugger support.
|
|
695 |
DKernelEventHandler::Dispatch(EEventKillThread, this, NULL);
|
|
696 |
|
|
697 |
TBool kill_process = (iFlags&KThreadFlagProcessPermanent) || ((iFlags&KThreadFlagProcessCritical) && iExitType!=EExitKill);
|
|
698 |
DProcess* pP=iOwningProcess;
|
|
699 |
if (kill_process)
|
|
700 |
pP->Die((TExitType)iExitType,iExitReason,iExitCategory);
|
|
701 |
|
|
702 |
__KTRACE_OPT(KTHREAD,Kern::Printf("Cancelling kernel message"));
|
|
703 |
iKernMsg.Cancel();
|
|
704 |
|
|
705 |
NKern::LockSystem();
|
|
706 |
|
|
707 |
if (iIpcCount)
|
|
708 |
{
|
|
709 |
iIpcCount |= 0x80000000u;
|
|
710 |
Open();
|
|
711 |
}
|
|
712 |
|
|
713 |
// clean up - release held mutexes etc.
|
|
714 |
while(iCleanupQ.NonEmpty())
|
|
715 |
{
|
|
716 |
K::PINestLevel=0;
|
|
717 |
TThreadCleanup* pCln=iCleanupQ.First();
|
|
718 |
pCln->Cleanup(); // this also removes the cleanup entry
|
|
719 |
NKern::FlashSystem();
|
|
720 |
}
|
|
721 |
NKern::UnlockSystem();
|
|
722 |
|
|
723 |
// complete target logons
|
|
724 |
TLogon::CompleteAll(iTargetLogons, TLogon::ETarget, iExitReason);
|
|
725 |
|
|
726 |
// remove any outstanding owned logons
|
|
727 |
TLogon::CompleteAll(iOwnedLogons, TLogon::EOwned, KErrNone);
|
|
728 |
|
|
729 |
// remove any outstanding miscellaneous notifiers
|
|
730 |
KillMiscNotifiers();
|
|
731 |
|
|
732 |
// close the heap originally used by this thread
|
|
733 |
CloseCreatedHeap();
|
|
734 |
|
|
735 |
// release any CPU resources (eg coprocessors)
|
|
736 |
DoExit2();
|
|
737 |
|
|
738 |
//#ifdef KPANIC
|
|
739 |
#if 0
|
|
740 |
extern void DumpMemory(const char* aTitle, TLinAddr aStart, TLinAddr aSize);
|
|
741 |
|
|
742 |
if (KDebugNum(KPANIC) && (iExitType!=EExitKill || iExitReason!=KErrNone))
|
|
743 |
{
|
|
744 |
Kern::Printf("Thread %O iSavedSP=%08x", this, iNThread.iSavedSP);
|
|
745 |
DumpMemory("Supervisor Stack", TLinAddr(iSupervisorStack), iSupervisorStackSize);
|
|
746 |
if (iUserStackRunAddress)
|
|
747 |
{
|
|
748 |
DumpMemory("User Stack", iUserStackRunAddress, iUserStackSize);
|
|
749 |
}
|
|
750 |
}
|
|
751 |
#endif
|
|
752 |
|
|
753 |
// stop the M-state machine
|
|
754 |
NKern::LockSystem();
|
|
755 |
iMState=EDead;
|
|
756 |
NKern::UnlockSystem();
|
|
757 |
}
|
|
758 |
|
|
759 |
void DThread::DoExit1()
|
|
760 |
{
|
|
761 |
__KTRACE_OPT(KTHREAD,Kern::Printf("Thread %O DoExit1",this));
|
|
762 |
}
|
|
763 |
|
|
764 |
/** Terminates the execution of a thread
|
|
765 |
|
|
766 |
It terminates the specified thread and it sets its exit info.
|
|
767 |
This is an asynchronous operation.
|
|
768 |
|
|
769 |
This method can only be used by a thread to kill itself, or to kill
|
|
770 |
a user thread (iThreadType==EThreadUser). An attempt to kill a non-user
|
|
771 |
thread which is not the currently running thread will cause the system to fault
|
|
772 |
with KERN 94 (ENonUserThreadKilled).
|
|
773 |
|
|
774 |
@pre System Locked
|
|
775 |
@post System Un-locked
|
|
776 |
*/
|
|
777 |
void DThread::Die(TExitType aType, TInt aReason, const TDesC& aCategory)
|
|
778 |
{
|
|
779 |
CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED,"DThread::Die");
|
|
780 |
__KTRACE_OPT(KTHREAD,Kern::Printf("Thread %O Die: %d,%d,%lS",this,aType,aReason,&aCategory));
|
|
781 |
SetExitInfo(aType,aReason,aCategory);
|
|
782 |
|
|
783 |
// If necessary, decrement count of running user threads in this process. We get here if the
|
|
784 |
// thread exited without calling User::Exit (eg panic, termination).
|
|
785 |
if (iUserThreadState == EUserThreadCreated || iUserThreadState == EUserThreadRunning)
|
|
786 |
{
|
|
787 |
__e32_atomic_tas_ord32(&iOwningProcess->iUserThreadsRunning, 1, -1, 0);
|
|
788 |
iUserThreadState = EUserThreadExiting;
|
|
789 |
}
|
|
790 |
|
|
791 |
if (this==TheCurrentThread)
|
|
792 |
{
|
|
793 |
SetDefaultPriority(KDefaultExitPriority);
|
|
794 |
NKern::ThreadKill(&iNThread,SYSTEM_LOCK); // this will not return
|
|
795 |
K::Fault(K::EDeadThreadRunning);
|
|
796 |
}
|
|
797 |
if(iThreadType!=EThreadUser)
|
|
798 |
K::Fault(K::ENonUserThreadKilled);
|
|
799 |
NKern::ThreadKill(&iNThread);
|
|
800 |
SetDefaultPriority(KDefaultExitPriority);
|
|
801 |
NKern::UnlockSystem();
|
|
802 |
}
|
|
803 |
|
|
804 |
void DThread::SvKill()
|
|
805 |
{
|
|
806 |
//
|
|
807 |
// This function runs in the kernel server context
|
|
808 |
// Enter and leave with system unlocked
|
|
809 |
//
|
|
810 |
__KTRACE_OPT(KTHREAD,Kern::Printf("Thread %O SvKill()",this));
|
|
811 |
|
|
812 |
// move any queued user-mode callbacks to this thread and cancel them
|
|
813 |
NKern::MoveUserModeCallbacks(NCurrentThread(), &iNThread);
|
|
814 |
NKern::CancelUserModeCallbacks();
|
|
815 |
|
|
816 |
DProcess* pP=iOwningProcess;
|
|
817 |
|
|
818 |
// take this thread off the process thread list
|
|
819 |
Kern::MutexWait(*pP->iProcessLock);
|
|
820 |
if (iProcessLink.iNext)
|
|
821 |
{
|
|
822 |
iProcessLink.Deque();
|
|
823 |
iProcessLink.iNext=NULL;
|
|
824 |
}
|
|
825 |
TBool all_threads_gone=pP->iThreadQ.IsEmpty();
|
|
826 |
Kern::MutexSignal(*pP->iProcessLock);
|
|
827 |
|
|
828 |
// if all threads have exited, complete process logons
|
|
829 |
if (all_threads_gone)
|
|
830 |
{
|
|
831 |
NKern::LockSystem();
|
|
832 |
if (pP->iExitType == EExitPending)
|
|
833 |
{
|
|
834 |
// process has exited by last thread exiting, take its exit reason
|
|
835 |
pP->iExitType = (TUint8)iExitType;
|
|
836 |
pP->iExitReason = iExitReason;
|
|
837 |
pP->iExitCategory = iExitCategory;
|
|
838 |
}
|
|
839 |
NKern::UnlockSystem();
|
|
840 |
__KTRACE_OPT(KPROC,Kern::Printf("Completing process logons for %O",pP));
|
|
841 |
TLogon::CompleteAll(pP->iTargetLogons, TLogon::ETarget, pP->iExitReason);
|
|
842 |
}
|
|
843 |
|
|
844 |
// notify undertakers + change notifiers
|
|
845 |
Kern::NotifyChanges(EChangesThreadDeath);
|
|
846 |
Kern::NotifyThreadDeath(this);
|
|
847 |
|
|
848 |
// close handles, free stacks, close thread
|
|
849 |
Release();
|
|
850 |
|
|
851 |
// if all threads in process now dead, clean up process
|
|
852 |
if (all_threads_gone)
|
|
853 |
pP->Release();
|
|
854 |
}
|
|
855 |
|
|
856 |
void DThread::AbortTimer(TBool aAbortAbsolute)
|
|
857 |
{
|
|
858 |
TInt typeMask=aAbortAbsolute?(TTimer::ELocked|TTimer::EAbsolute):(TTimer::ELocked);
|
|
859 |
iTimer.Abort(this,typeMask);
|
|
860 |
}
|
|
861 |
|
|
862 |
|
|
863 |
void DThread::SetPaging(TUint& aCreateFlags)
|
|
864 |
{// Default implementation that doesn't allow threads to be paged.
|
|
865 |
// It is virtual so can be overridden on memory models that allow data paging.
|
|
866 |
__ASSERT_COMPILE(EThreadCreateFlagPagingUnspec == 0);
|
|
867 |
aCreateFlags &= ~EThreadCreateFlagPagingMask;
|
|
868 |
}
|
|
869 |
|
|
870 |
|
|
871 |
TInt DThread::Create(SThreadCreateInfo& aInfo)
|
|
872 |
{
|
|
873 |
__KTRACE_OPT(KTHREAD,Kern::Printf("DThread::Create %lS owner %O size %03x", &aInfo.iName,
|
|
874 |
iOwningProcess, aInfo.iTotalSize));
|
|
875 |
|
|
876 |
if (aInfo.iTotalSize < (TInt)sizeof(SThreadCreateInfo))
|
|
877 |
return KErrArgument;
|
|
878 |
if (aInfo.iTotalSize > KMaxThreadCreateInfo)
|
|
879 |
return KErrArgument;
|
|
880 |
SetOwner(iOwningProcess);
|
|
881 |
__KTRACE_OPT(KTHREAD,Kern::Printf("Owner set"));
|
|
882 |
TInt r=KErrNone;
|
|
883 |
if (aInfo.iName.Length()!=0)
|
|
884 |
{
|
|
885 |
SetProtection(DObject::EGlobal);
|
|
886 |
r=SetName(&aInfo.iName);
|
|
887 |
__KTRACE_OPT(KTHREAD,Kern::Printf("Name set, %d",r));
|
|
888 |
if (r!=KErrNone)
|
|
889 |
return r;
|
|
890 |
}
|
|
891 |
else
|
|
892 |
SetProtection(DObject::EProtected);
|
|
893 |
|
|
894 |
iId = K::NewId();
|
|
895 |
iThreadPriority=aInfo.iInitialThreadPriority;
|
|
896 |
iThreadType=(TUint8)aInfo.iType;
|
|
897 |
|
|
898 |
// Determine the data paging properties for the thread and create user stack.
|
|
899 |
if (iThreadType == EThreadUser)
|
|
900 |
{
|
|
901 |
// This is a user side thread so aInfo will be a SStdEpocThreadCreateInfo.
|
|
902 |
__NK_ASSERT_DEBUG(aInfo.iTotalSize == sizeof(SStdEpocThreadCreateInfo));
|
|
903 |
SStdEpocThreadCreateInfo& info = (SStdEpocThreadCreateInfo&)aInfo;
|
|
904 |
SetPaging(info.iFlags); // Will update info.iFlags to have paged or unpaged set.
|
|
905 |
r = AllocateUserStack(info.iUserStackSize, info.iFlags & EThreadCreateFlagPaged);
|
|
906 |
if (r != KErrNone)
|
|
907 |
return r;
|
|
908 |
}
|
|
909 |
|
|
910 |
// inherit system critical + process critical from process (implements AllThreasdCritical)
|
|
911 |
if (iThreadType == EThreadInitial || iThreadType == EThreadAPInitial)
|
|
912 |
iFlags |= KThreadFlagSystemPermanent; // initial thread can't exit for any reason
|
|
913 |
else if (iThreadType == EThreadUser)
|
|
914 |
iFlags |= (iOwningProcess->iFlags & KThreadFlagProcessCritical);
|
|
915 |
else
|
|
916 |
iFlags |= KThreadFlagSystemCritical; // kernel threads can't panic
|
|
917 |
|
|
918 |
// create platform-dependent stuff
|
|
919 |
r=DoCreate(aInfo);
|
|
920 |
if (r!=KErrNone)
|
|
921 |
return r;
|
|
922 |
r = TThreadWaitList::ThreadCreated();
|
|
923 |
if (r!=KErrNone)
|
|
924 |
return r;
|
|
925 |
iWaitListReserved = 1;
|
|
926 |
if (iThreadType!=EThreadInitial && iThreadType!=EThreadMinimalSupervisor)
|
|
927 |
r=K::Containers[EThread]->Add(this);
|
|
928 |
return r;
|
|
929 |
}
|
|
930 |
|
|
931 |
TInt DThread::SetPriority(TThreadPriority aPriority)
|
|
932 |
{
|
|
933 |
TInt tp=0;
|
|
934 |
switch(aPriority)
|
|
935 |
{
|
|
936 |
case EPriorityMuchLess: tp=EThrdPriorityMuchLess; break;
|
|
937 |
case EPriorityLess: tp=EThrdPriorityLess; break;
|
|
938 |
case EPriorityNormal: tp=EThrdPriorityNormal; break;
|
|
939 |
case EPriorityMore: tp=EThrdPriorityMore; break;
|
|
940 |
case EPriorityMuchMore: tp=EThrdPriorityMuchMore; break;
|
|
941 |
case EPriorityRealTime: tp=EThrdPriorityRealTime; break;
|
|
942 |
case EPriorityAbsoluteVeryLow: tp=EThrdPriorityAbsoluteVeryLow; break;
|
|
943 |
case EPriorityAbsoluteLowNormal: tp=EThrdPriorityAbsoluteLowNormal; break;
|
|
944 |
case EPriorityAbsoluteLow: tp=EThrdPriorityAbsoluteLow; break;
|
|
945 |
case EPriorityAbsoluteBackgroundNormal: tp=EThrdPriorityAbsoluteBackgroundNormal; break;
|
|
946 |
case EPriorityAbsoluteBackground: tp=EThrdPriorityAbsoluteBackground; break;
|
|
947 |
case EPriorityAbsoluteForegroundNormal: tp=EThrdPriorityAbsoluteForegroundNormal; break;
|
|
948 |
case EPriorityAbsoluteForeground: tp=EThrdPriorityAbsoluteForeground; break;
|
|
949 |
case EPriorityAbsoluteHighNormal: tp=EThrdPriorityAbsoluteHighNormal; break;
|
|
950 |
case EPriorityAbsoluteHigh: tp=EThrdPriorityAbsoluteHigh; break;
|
|
951 |
case EPriorityAbsoluteRealTime1: tp=EThrdPriorityAbsoluteRealTime1; break;
|
|
952 |
case EPriorityAbsoluteRealTime2: tp=EThrdPriorityAbsoluteRealTime2; break;
|
|
953 |
case EPriorityAbsoluteRealTime3: tp=EThrdPriorityAbsoluteRealTime3; break;
|
|
954 |
case EPriorityAbsoluteRealTime4: tp=EThrdPriorityAbsoluteRealTime4; break;
|
|
955 |
case EPriorityAbsoluteRealTime5: tp=EThrdPriorityAbsoluteRealTime5; break;
|
|
956 |
case EPriorityAbsoluteRealTime6: tp=EThrdPriorityAbsoluteRealTime6; break;
|
|
957 |
case EPriorityAbsoluteRealTime7: tp=EThrdPriorityAbsoluteRealTime7; break;
|
|
958 |
case EPriorityAbsoluteRealTime8: tp=EThrdPriorityAbsoluteRealTime8; break;
|
|
959 |
default:
|
|
960 |
K::PanicCurrentThread(EBadPriority);
|
|
961 |
}
|
|
962 |
SetThreadPriority(tp);
|
|
963 |
return KErrNone;
|
|
964 |
}
|
|
965 |
|
|
966 |
#ifndef __REQUEST_COMPLETE_MACHINE_CODED__
|
|
967 |
|
|
968 |
void DThread::RequestComplete(TRequestStatus*& aStatus, TInt aReason)
|
|
969 |
//
|
|
970 |
// Signal this threads request semaphore.
|
|
971 |
// Enter with system locked, return with system unlocked.
|
|
972 |
//
|
|
973 |
{
|
|
974 |
|
|
975 |
__KTRACE_OPT(KDATAPAGEWARN,Kern::Printf("Data paging: Use of deprecated DThread::RequestComplete API by %O", TheCurrentThread));
|
|
976 |
TRequestStatus *theStatus=aStatus;
|
|
977 |
aStatus=NULL; // to indicate that this request has been completed
|
|
978 |
__KTRACE_OPT2(KTHREAD,KSEMAPHORE,Kern::Printf("DThread::RequestComplete %O %d->%08x",this,aReason,theStatus));
|
|
979 |
|
|
980 |
#ifndef __MEMMODEL_FLEXIBLE__
|
|
981 |
|
|
982 |
TInt r = KErrDied;
|
|
983 |
if (iMState!=EDead)
|
|
984 |
{
|
|
985 |
TIpcExcTrap xt;
|
|
986 |
xt.iLocalBase=0;
|
|
987 |
xt.iRemoteBase=(TLinAddr)theStatus;
|
|
988 |
xt.iSize=sizeof(TInt);
|
|
989 |
xt.iDir=1;
|
|
990 |
r=xt.Trap(this);
|
|
991 |
if (r==KErrNone)
|
|
992 |
{
|
|
993 |
// On some memory models(such as moving), RawRead may update the content of xt. It
|
|
994 |
// happens if home address is accessed (instead of the provided run address) or if it
|
|
995 |
// reads/writes in chunks.
|
|
996 |
r=RawWrite(theStatus,&aReason,sizeof(TInt),0,this,&xt);
|
|
997 |
xt.UnTrap();
|
|
998 |
}
|
|
999 |
}
|
|
1000 |
|
|
1001 |
if (r == KErrNone)
|
|
1002 |
{
|
|
1003 |
#ifdef BTRACE_REQUESTS
|
|
1004 |
BTraceContext12(BTrace::ERequests,BTrace::ERequestComplete,&this->iNThread,theStatus,aReason);
|
|
1005 |
#endif
|
|
1006 |
NKern::ThreadRequestSignal(&iNThread, SYSTEM_LOCK);
|
|
1007 |
}
|
|
1008 |
else
|
|
1009 |
NKern::UnlockSystem();
|
|
1010 |
|
|
1011 |
#else
|
|
1012 |
|
|
1013 |
NKern::UnlockSystem();
|
|
1014 |
if (iMState!=EDead && Kern::ThreadRawWrite(this,theStatus,&aReason,sizeof(TInt))==KErrNone)
|
|
1015 |
{
|
|
1016 |
#ifdef BTRACE_REQUESTS
|
|
1017 |
BTraceContext12(BTrace::ERequests,BTrace::ERequestComplete,&this->iNThread,theStatus,aReason);
|
|
1018 |
#endif
|
|
1019 |
NKern::ThreadRequestSignal(&iNThread);
|
|
1020 |
}
|
|
1021 |
|
|
1022 |
#endif
|
|
1023 |
}
|
|
1024 |
|
|
1025 |
#endif
|
|
1026 |
|
|
1027 |
#if !defined(__REQUEST_COMPLETE_MACHINE_CODED__) || defined(__MEMMODEL_FLEXIBLE__)
|
|
1028 |
|
|
1029 |
/** Write back a completion code and signal a thread's request semaphore, indicating that an asynchronous request has completed.
|
|
1030 |
|
|
1031 |
@param aThread Thread to be signaled
|
|
1032 |
@param aStatus A TRequestStatus instance that will receive the request status code.
|
|
1033 |
It must reside in user side address space.
|
|
1034 |
@param aReason Request status code. KErrCancel indicates the request has been canceled.
|
|
1035 |
|
|
1036 |
@pre No fast mutex can be held.
|
|
1037 |
@pre Call in a thread context.
|
|
1038 |
@pre Kernel must be unlocked
|
|
1039 |
@pre interrupts enabled
|
|
1040 |
|
|
1041 |
@deprecated
|
|
1042 |
*/
|
|
1043 |
EXPORT_C void Kern::RequestComplete(DThread* aThread, TRequestStatus*& aStatus, TInt aReason)
|
|
1044 |
{
|
|
1045 |
CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Kern::RequestComplete");
|
|
1046 |
__KTRACE_OPT(KDATAPAGEWARN,Kern::Printf("Data paging: Use of deprecated Kern::RequestComplete API by %O", TheCurrentThread));
|
|
1047 |
NKern::LockSystem();
|
|
1048 |
if (aStatus)
|
|
1049 |
aThread->RequestComplete(aStatus,aReason);
|
|
1050 |
else
|
|
1051 |
NKern::UnlockSystem();
|
|
1052 |
}
|
|
1053 |
|
|
1054 |
#endif
|
|
1055 |
|
|
1056 |
#ifndef __REQUEST_COMPLETE_MACHINE_CODED__
|
|
1057 |
|
|
1058 |
/** Complete a request made by the current thread.
|
|
1059 |
|
|
1060 |
This writes the completion code and signals the current thread's request semaphore, indicating
|
|
1061 |
that an asynchronous request has completed.
|
|
1062 |
|
|
1063 |
Note that this must only be called in the context of the thread that made the request.
|
|
1064 |
|
|
1065 |
@param aStatus A TRequestStatus instance that will receive the request status code.
|
|
1066 |
It must reside in the user side address space of the current thread.
|
|
1067 |
@param aReason Request status code. KErrCancel indicates the request has been canceled.
|
|
1068 |
|
|
1069 |
@pre Call in a thread context.
|
|
1070 |
@pre Kernel must be unlocked
|
|
1071 |
@pre interrupts enabled
|
|
1072 |
|
|
1073 |
@publishedPartner
|
|
1074 |
@released
|
|
1075 |
*/
|
|
1076 |
EXPORT_C void Kern::RequestComplete(TRequestStatus*& aStatus, TInt aReason)
|
|
1077 |
{
|
|
1078 |
CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED | MASK_INTERRUPTS_ENABLED | MASK_NOT_ISR | MASK_NOT_IDFC,"Kern::RequestComplete");
|
|
1079 |
TRequestStatus* status = (TRequestStatus*)__e32_atomic_swp_rel_ptr(&aStatus, 0);
|
|
1080 |
if (status && KUSafeWrite(status, &aReason, sizeof(aReason)) == NULL)
|
|
1081 |
NKern::ThreadRequestSignal(NKern::CurrentThread());
|
|
1082 |
}
|
|
1083 |
|
|
1084 |
#endif
|
|
1085 |
|
|
1086 |
/********************************************
|
|
1087 |
* User heap
|
|
1088 |
********************************************/
|
|
1089 |
|
|
1090 |
class RUserAllocator : public RAllocator
|
|
1091 |
{
|
|
1092 |
public:
|
|
1093 |
TBool Close();
|
|
1094 |
TInt* GetHandleList(TInt& aCount);
|
|
1095 |
};
|
|
1096 |
|
|
1097 |
// Decrement the object's reference count and return true if it reached zero
|
|
1098 |
TBool RUserAllocator::Close()
|
|
1099 |
{
|
|
1100 |
return Kern::KUSafeDec(iAccessCount) == 1;
|
|
1101 |
}
|
|
1102 |
|
|
1103 |
TInt* RUserAllocator::GetHandleList(TInt& aCount)
|
|
1104 |
{
|
|
1105 |
__KTRACE_OPT(KTHREAD,Kern::Printf("RUserAllocator::GetHandleList() %08x", this));
|
|
1106 |
TInt* h[2] = {NULL, NULL};
|
|
1107 |
if (Close())
|
|
1108 |
kumemget32(h, &iHandleCount, sizeof(h));
|
|
1109 |
aCount = (TInt)h[0];
|
|
1110 |
return h[1];
|
|
1111 |
}
|
|
1112 |
|
|
1113 |
#ifdef __USERSIDE_THREAD_DATA__
|
|
1114 |
void TLocalThreadData::Close()
|
|
1115 |
{
|
|
1116 |
RUserAllocator* tlsHeap;
|
|
1117 |
kumemget32(&tlsHeap, &iTlsHeap, sizeof(tlsHeap));
|
|
1118 |
if (tlsHeap)
|
|
1119 |
tlsHeap->Close();
|
|
1120 |
}
|
|
1121 |
#endif
|
|
1122 |
|
|
1123 |
void DThread::CloseCreatedHeap()
|
|
1124 |
{
|
|
1125 |
__KTRACE_OPT(KTHREAD,Kern::Printf("DThread::CloseCreatedHeap() %O",this));
|
|
1126 |
TInt r;
|
|
1127 |
#ifdef __USERSIDE_THREAD_DATA__
|
|
1128 |
if (iFlags & KThreadFlagLocalThreadDataValid)
|
|
1129 |
{
|
|
1130 |
TLocalThreadData* threadData = (TLocalThreadData*)(iUserStackRunAddress + iUserStackSize - KLocalThreadDataSize);
|
|
1131 |
XTRAP(r, XT_DEFAULT, threadData->Close());
|
|
1132 |
__KTRACE_OPT(KTHREAD, Kern::Printf("TLocalThreadData::Close() r=%d", r));
|
|
1133 |
if (r != KErrNone)
|
|
1134 |
return;
|
|
1135 |
}
|
|
1136 |
#endif
|
|
1137 |
RUserAllocator* pA = (RUserAllocator*)__e32_atomic_swp_ord_ptr(&iCreatedAllocator, 0);
|
|
1138 |
if (!pA)
|
|
1139 |
return;
|
|
1140 |
TInt c = 0;
|
|
1141 |
TInt* h = NULL;
|
|
1142 |
XTRAP(r, XT_DEFAULT, h = pA->GetHandleList(c));
|
|
1143 |
__KTRACE_OPT(KTHREAD,Kern::Printf("RUserAllocator::SvClose() r=%d c=%d",r,c));
|
|
1144 |
if (r!=KErrNone || (TUint)c>(TUint)RAllocator::EMaxHandles)
|
|
1145 |
return;
|
|
1146 |
|
|
1147 |
TInt handle[RAllocator::EMaxHandles];
|
|
1148 |
XTRAP(r, XT_DEFAULT, kumemget32(handle, h, c*sizeof(TInt)));
|
|
1149 |
__KTRACE_OPT(KTHREAD,Kern::Printf("RUserAllocator::SvClose() r=%d h=%08x",r,h));
|
|
1150 |
if (r!=KErrNone)
|
|
1151 |
return;
|
|
1152 |
h = handle;
|
|
1153 |
TInt* e = h + c;
|
|
1154 |
for (; h<e; ++h)
|
|
1155 |
{
|
|
1156 |
if (*h)
|
|
1157 |
HandleClose(*h);
|
|
1158 |
}
|
|
1159 |
}
|
|
1160 |
|
|
1161 |
TInt DThread::SetTls(TInt aHandle, TInt aDllUid, TAny* aPtr)
|
|
1162 |
//
|
|
1163 |
// Set the Thread Local Storage variable for a DLL.
|
|
1164 |
//
|
|
1165 |
{
|
|
1166 |
STls tls;
|
|
1167 |
tls.iHandle = aHandle;
|
|
1168 |
tls.iDllUid = aDllUid;
|
|
1169 |
tls.iPtr = aPtr;
|
|
1170 |
TInt i;
|
|
1171 |
TInt r=iTls.FindInSignedKeyOrder(tls,i);
|
|
1172 |
if (r==KErrNone)
|
|
1173 |
{
|
|
1174 |
iTls[i] = tls;
|
|
1175 |
return KErrNone;
|
|
1176 |
}
|
|
1177 |
return iTls.Insert(tls,i);
|
|
1178 |
}
|
|
1179 |
|
|
1180 |
TAny* DThread::Tls(TInt aHandle, TInt aDllUid)
|
|
1181 |
//
|
|
1182 |
// Retrieve the Thread Local Storage variable for a DLL.
|
|
1183 |
//
|
|
1184 |
{
|
|
1185 |
STls tls;
|
|
1186 |
tls.iHandle=aHandle;
|
|
1187 |
TInt r=iTls.FindInSignedKeyOrder(tls);
|
|
1188 |
if (r>=0 && iTls[r].iDllUid==aDllUid)
|
|
1189 |
return iTls[r].iPtr;
|
|
1190 |
return NULL;
|
|
1191 |
}
|
|
1192 |
|
|
1193 |
void DThread::FreeTls(TInt aHandle)
|
|
1194 |
//
|
|
1195 |
// Remove the Thread Local Storage variable for a DLL.
|
|
1196 |
//
|
|
1197 |
{
|
|
1198 |
STls tls;
|
|
1199 |
tls.iHandle=aHandle;
|
|
1200 |
TInt r=iTls.FindInSignedKeyOrder(tls);
|
|
1201 |
if (r>=0)
|
|
1202 |
{
|
|
1203 |
iTls.Remove(r);
|
|
1204 |
iTls.Compress();
|
|
1205 |
}
|
|
1206 |
}
|
|
1207 |
|
|
1208 |
TInt DThread::Rename(const TDesC& aName)
|
|
1209 |
{
|
|
1210 |
TKName n;
|
|
1211 |
Name(n); // get current name
|
|
1212 |
if (n.MatchF(aName)==0)
|
|
1213 |
return KErrNone; // new name is the same so nothing to do
|
|
1214 |
K::Containers[EThread]->Wait();
|
|
1215 |
TInt r=K::Containers[EThread]->CheckUniqueFullName(this,aName);
|
|
1216 |
if (r==KErrNone)
|
|
1217 |
{
|
|
1218 |
__KTRACE_OPT(KTHREAD,Kern::Printf("DThread::Rename %O to %lS",this,&aName));
|
|
1219 |
r=SetName(&aName);
|
|
1220 |
#ifdef BTRACE_THREAD_IDENTIFICATION
|
|
1221 |
Name(n);
|
|
1222 |
BTraceN(BTrace::EThreadIdentification,BTrace::EThreadName,&iNThread,iOwningProcess,n.Ptr(),n.Size());
|
|
1223 |
#endif
|
|
1224 |
}
|
|
1225 |
K::Containers[EThread]->Signal();
|
|
1226 |
__COND_DEBUG_EVENT(r==KErrNone, EEventUpdateThread, this);
|
|
1227 |
return(r);
|
|
1228 |
}
|
|
1229 |
|
|
1230 |
|
|
1231 |
/**
|
|
1232 |
@pre Interrupts must be enabled.
|
|
1233 |
@pre Kernel must be unlocked.
|
|
1234 |
@pre System must be locked
|
|
1235 |
@pre Call in a thread context.
|
|
1236 |
*/
|
|
1237 |
TInt DThread::GetDesInfo(const TAny* aDes, TInt& aLength, TInt& aMaxLength, TUint8*& aPtr, TBool aWriteable)
|
|
1238 |
//
|
|
1239 |
// Get remote descriptor info.
|
|
1240 |
// Enter and leave with system locked
|
|
1241 |
//
|
|
1242 |
{
|
|
1243 |
#ifndef __MEMMODEL_FLEXIBLE__
|
|
1244 |
CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED|MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,
|
|
1245 |
"DThread::GetDesInfo");
|
|
1246 |
#else
|
|
1247 |
CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,
|
|
1248 |
"DThread::GetDesInfo");
|
|
1249 |
#endif
|
|
1250 |
TDesHeader d;
|
|
1251 |
TInt r=ReadAndParseDesHeader(aDes,d);
|
|
1252 |
if (r!=KErrNone)
|
|
1253 |
return r;
|
|
1254 |
aLength = d.Length();
|
|
1255 |
aPtr = (TUint8*)d.DataPtr();
|
|
1256 |
if (d.IsWriteable())
|
|
1257 |
aMaxLength = d.MaxLength();
|
|
1258 |
else
|
|
1259 |
{
|
|
1260 |
if (aWriteable)
|
|
1261 |
return KErrBadDescriptor;
|
|
1262 |
aMaxLength = 0;
|
|
1263 |
}
|
|
1264 |
return KErrNone;
|
|
1265 |
}
|
|
1266 |
|
|
1267 |
/**
|
|
1268 |
@pre Interrupts must be enabled.
|
|
1269 |
@pre Kernel must be unlocked.
|
|
1270 |
@pre System must be locked
|
|
1271 |
@pre Call in a thread context.
|
|
1272 |
*/
|
|
1273 |
TInt DThread::GetDesLength(const TAny* aPtr)
|
|
1274 |
//
|
|
1275 |
// Get the length of a descriptor.
|
|
1276 |
// Enter and leave with system locked
|
|
1277 |
//
|
|
1278 |
{
|
|
1279 |
#ifndef __MEMMODEL_FLEXIBLE__
|
|
1280 |
CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED|MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,
|
|
1281 |
"DThread::GetDesLength");
|
|
1282 |
#else
|
|
1283 |
CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,
|
|
1284 |
"DThread::GetDesLength");
|
|
1285 |
#endif
|
|
1286 |
TDesHeader d;
|
|
1287 |
TInt r=ReadAndParseDesHeader(aPtr,d);
|
|
1288 |
if (r<0)
|
|
1289 |
return r;
|
|
1290 |
return d.Length();
|
|
1291 |
}
|
|
1292 |
|
|
1293 |
/**
|
|
1294 |
@pre Interrupts must be enabled.
|
|
1295 |
@pre Kernel must be unlocked.
|
|
1296 |
@pre System must be locked
|
|
1297 |
@pre Call in a thread context.
|
|
1298 |
*/
|
|
1299 |
TInt DThread::GetDesMaxLength(const TAny* aPtr)
|
|
1300 |
//
|
|
1301 |
// Get the maximum length of a descriptor.
|
|
1302 |
// Enter and leave with system locked
|
|
1303 |
//
|
|
1304 |
{
|
|
1305 |
#ifndef __MEMMODEL_FLEXIBLE__
|
|
1306 |
CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED|MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,
|
|
1307 |
"DThread::GetDesMaxLength");
|
|
1308 |
#else
|
|
1309 |
CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,
|
|
1310 |
"DThread::GetDesMaxLength");
|
|
1311 |
#endif
|
|
1312 |
TDesHeader d;
|
|
1313 |
TInt r=ReadAndParseDesHeader(aPtr,d);
|
|
1314 |
if (r!=KErrNone)
|
|
1315 |
return r;
|
|
1316 |
return d.MaxLength();
|
|
1317 |
}
|
|
1318 |
|
|
1319 |
/**
|
|
1320 |
Reads from this thread's address space.
|
|
1321 |
Enter and leave with system locked
|
|
1322 |
|
|
1323 |
@param aPtr Points to the source descriptor. It must reside in this thread's address space.
|
|
1324 |
@param aDest Points to the destination buffer, which is in the current process's address space.
|
|
1325 |
@param aMax Specifies maximum number of characters to read.
|
|
1326 |
@param anOffset The offset in the source descriptor to copy from.
|
|
1327 |
@param aMode if orred by KCheckLocalAddress, aDest memory in the current process will be accessed with user attributes.
|
|
1328 |
@return nonnegative value indicates the number of characters transferred.
|
|
1329 |
KErrDied if this thread has died.
|
|
1330 |
KErrBadDescriptor if the attempt to read aPtr descriptor caused the exception.
|
|
1331 |
|
|
1332 |
@pre Interrupts must be enabled.
|
|
1333 |
@pre Kernel must be unlocked.
|
|
1334 |
@pre System must be locked
|
|
1335 |
@pre Call in a thread context.
|
|
1336 |
*/
|
|
1337 |
TInt DThread::DesRead(const TAny* aPtr, TUint8* aDest, TInt aMax, TInt anOffset, TInt aMode)
|
|
1338 |
{
|
|
1339 |
#ifndef __MEMMODEL_FLEXIBLE__
|
|
1340 |
CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED|MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,
|
|
1341 |
"DThread::DesRead");
|
|
1342 |
#else
|
|
1343 |
CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,
|
|
1344 |
"DThread::DesRead");
|
|
1345 |
#endif
|
|
1346 |
TDesHeader d;
|
|
1347 |
TInt r=ReadAndParseDesHeader(aPtr,d);
|
|
1348 |
if (r!=KErrNone)
|
|
1349 |
return r;
|
|
1350 |
return DoDesRead(d, aDest, aMax, anOffset, aMode);
|
|
1351 |
}
|
|
1352 |
|
|
1353 |
// Read descriptor from thread's address space, given the descriptor header
|
|
1354 |
TInt DThread::DoDesRead(const TDesHeader& aDesInfo, TUint8* aDest, TInt aMax, TInt anOffset, TInt aMode)
|
|
1355 |
{
|
|
1356 |
if (anOffset<0 || aMax<0)
|
|
1357 |
return KErrArgument;
|
|
1358 |
TInt len=aDesInfo.Length();
|
|
1359 |
len-=anOffset;
|
|
1360 |
if (len<0)
|
|
1361 |
len=0;
|
|
1362 |
if (len>aMax)
|
|
1363 |
len=aMax;
|
|
1364 |
TInt l=len;
|
|
1365 |
if (aMode & KChunkShiftBy1)
|
|
1366 |
{
|
|
1367 |
l<<=1;
|
|
1368 |
anOffset<<=1;
|
|
1369 |
}
|
|
1370 |
const TUint8* pS = (TUint8*)aDesInfo.DataPtr() + anOffset;
|
|
1371 |
#ifndef __MEMMODEL_FLEXIBLE__
|
|
1372 |
NKern::FlashSystem();
|
|
1373 |
#endif
|
|
1374 |
TIpcExcTrap xt;
|
|
1375 |
xt.iLocalBase=(TLinAddr)aDest;
|
|
1376 |
xt.iRemoteBase=(TLinAddr)pS;
|
|
1377 |
xt.iSize=l;
|
|
1378 |
xt.iDir=0;
|
|
1379 |
TInt r=xt.Trap(this);
|
|
1380 |
if (r==0)
|
|
1381 |
{
|
|
1382 |
//On some memory models(such as moving), RawRead may update the content of xt. It happens if home address
|
|
1383 |
// is accessed (instead of the provided run address) or if it reads in chunks.
|
|
1384 |
r=RawRead(pS,aDest,l,aMode&KCheckLocalAddress, &xt);
|
|
1385 |
xt.UnTrap();
|
|
1386 |
}
|
|
1387 |
return (r<0)?r:len;
|
|
1388 |
}
|
|
1389 |
|
|
1390 |
/**
|
|
1391 |
Write to the thread's address space.
|
|
1392 |
Enter and leave with system locked
|
|
1393 |
|
|
1394 |
@param aPtr points to descriptor to write to. It is in remote (this thread's) address space.
|
|
1395 |
@param aSrc points to source buffer, which is in the current process's address space
|
|
1396 |
@param aLength is number of characters to copy
|
|
1397 |
@param anOffset The offset in aPtr descriptor where to start writing.
|
|
1398 |
@param aOrigThread The thread on behalf of which this operation is performed (eg client of device driver). If NULL, current thread is assumed.
|
|
1399 |
@return KErrDied if this thread has died.
|
|
1400 |
KErrBadDescriptor if the attempt to read aPtr descriptor caused the exception.
|
|
1401 |
KErrNone otherwise
|
|
1402 |
|
|
1403 |
@pre Interrupts must be enabled.
|
|
1404 |
@pre Kernel must be unlocked.
|
|
1405 |
@pre System must be locked
|
|
1406 |
@pre Call in a thread context.
|
|
1407 |
*/
|
|
1408 |
TInt DThread::DesWrite(const TAny* aPtr, const TUint8* aSrc, TInt aLength, TInt anOffset, TInt aMode, DThread* anOriginatingThread)
|
|
1409 |
{
|
|
1410 |
#ifndef __MEMMODEL_FLEXIBLE__
|
|
1411 |
CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED|MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,
|
|
1412 |
"DThread::DesWrite");
|
|
1413 |
#else
|
|
1414 |
CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,
|
|
1415 |
"DThread::DesWrite");
|
|
1416 |
#endif
|
|
1417 |
TDesHeader d;
|
|
1418 |
TInt r=ReadAndParseDesHeader(aPtr, d);
|
|
1419 |
if (r!=KErrNone)
|
|
1420 |
return r;
|
|
1421 |
r = DoDesWrite(aPtr, d, aSrc, aLength, anOffset, aMode, anOriginatingThread);
|
|
1422 |
return r < KErrNone ? r : KErrNone;
|
|
1423 |
}
|
|
1424 |
|
|
1425 |
|
|
1426 |
// Write descriptor to thread's address space, given the descriptor header
|
|
1427 |
// Return the new flags + length word, or one of the system-wide error codes
|
|
1428 |
TInt DThread::DoDesWrite(const TAny* aPtr, TDesHeader& aDesInfo, const TUint8* aSrc, TInt aLength, TInt anOffset, TInt aMode, DThread* anOriginatingThread)
|
|
1429 |
{
|
|
1430 |
if (anOffset<0 || aLength<0)
|
|
1431 |
return KErrArgument;
|
|
1432 |
if (!aDesInfo.IsWriteable())
|
|
1433 |
return KErrBadDescriptor;
|
|
1434 |
TUint maxLen=aDesInfo.MaxLength();
|
|
1435 |
TUint8* pT=(TUint8*)aDesInfo.DataPtr(); // remote descriptor Ptr()
|
|
1436 |
TUint finalLen=aLength+anOffset;
|
|
1437 |
if (finalLen>maxLen)
|
|
1438 |
{
|
|
1439 |
if (aMode & KTruncateToMaxLength)
|
|
1440 |
{
|
|
1441 |
aLength=maxLen-anOffset;
|
|
1442 |
finalLen=maxLen;
|
|
1443 |
if (aLength<0)
|
|
1444 |
return KErrOverflow;
|
|
1445 |
}
|
|
1446 |
else
|
|
1447 |
return KErrOverflow;
|
|
1448 |
}
|
|
1449 |
TInt type=aDesInfo.Type();
|
|
1450 |
TUint typelen=TUint(type)<<KShiftDesType | TUint(finalLen);
|
|
1451 |
TInt shift=(aMode&KChunkShiftBy1)?1:0;
|
|
1452 |
anOffset<<=shift;
|
|
1453 |
aLength<<=shift;
|
|
1454 |
#ifndef __MEMMODEL_FLEXIBLE__
|
|
1455 |
NKern::FlashSystem();
|
|
1456 |
#endif
|
|
1457 |
TIpcExcTrap xt;
|
|
1458 |
xt.iDir=1;
|
|
1459 |
TInt r=xt.Trap(this);
|
|
1460 |
if (r==0)
|
|
1461 |
{
|
|
1462 |
xt.iLocalBase=(TLinAddr)aSrc;
|
|
1463 |
xt.iRemoteBase=(TLinAddr)(pT+anOffset);
|
|
1464 |
xt.iSize=aLength;
|
|
1465 |
if (aLength)
|
|
1466 |
{
|
|
1467 |
//On some memory models(such as moving), RawRead may update the content of xt. It happens if home address
|
|
1468 |
// is accessed (instead of the provided run address) or if it reads in chunks.
|
|
1469 |
r=RawWrite(pT+anOffset,aSrc,aLength,aMode&KCheckLocalAddress,anOriginatingThread, &xt);
|
|
1470 |
}
|
|
1471 |
if ((aMode & KDoNotUpdateDesLength) == 0)
|
|
1472 |
{
|
|
1473 |
if (r == KErrNone && type == EBufCPtr)
|
|
1474 |
{
|
|
1475 |
TUint8* pL = pT - sizeof(TDesC);
|
|
1476 |
xt.iLocalBase=0;
|
|
1477 |
xt.iRemoteBase=(TLinAddr)pL;
|
|
1478 |
xt.iSize=sizeof(finalLen);
|
|
1479 |
r=RawWrite(pL,&finalLen,sizeof(finalLen),0,anOriginatingThread, &xt);
|
|
1480 |
}
|
|
1481 |
xt.iLocalBase=0;
|
|
1482 |
xt.iRemoteBase=(TLinAddr)aPtr;
|
|
1483 |
xt.iSize=sizeof(finalLen);
|
|
1484 |
if (r==KErrNone)
|
|
1485 |
r=RawWrite(aPtr,&typelen,sizeof(typelen),0,anOriginatingThread, &xt);
|
|
1486 |
}
|
|
1487 |
xt.UnTrap();
|
|
1488 |
}
|
|
1489 |
return r == KErrNone ? typelen : r;
|
|
1490 |
}
|
|
1491 |
|
|
1492 |
TBool DThread::IsExceptionHandled(TExcType aType)
|
|
1493 |
{
|
|
1494 |
if (iExceptionHandler==NULL)
|
|
1495 |
return EFalse;
|
|
1496 |
if (iFlags&KThreadFlagLastChance)
|
|
1497 |
return EFalse;
|
|
1498 |
switch (aType)
|
|
1499 |
{
|
|
1500 |
case EExcGeneral:
|
|
1501 |
case EExcUserInterrupt:
|
|
1502 |
return iExceptionMask & KExceptionUserInterrupt;
|
|
1503 |
case EExcIntegerDivideByZero:
|
|
1504 |
case EExcIntegerOverflow:
|
|
1505 |
return iExceptionMask & KExceptionInteger;
|
|
1506 |
case EExcSingleStep:
|
|
1507 |
case EExcBreakPoint:
|
|
1508 |
return iExceptionMask & KExceptionDebug;
|
|
1509 |
case EExcBoundsCheck:
|
|
1510 |
case EExcInvalidOpCode:
|
|
1511 |
case EExcDoubleFault:
|
|
1512 |
case EExcStackFault:
|
|
1513 |
case EExcAccessViolation:
|
|
1514 |
case EExcPrivInstruction:
|
|
1515 |
case EExcAlignment:
|
|
1516 |
case EExcPageFault:
|
|
1517 |
return iExceptionMask & KExceptionFault;
|
|
1518 |
case EExcFloatDenormal:
|
|
1519 |
case EExcFloatDivideByZero:
|
|
1520 |
case EExcFloatInexactResult:
|
|
1521 |
case EExcFloatInvalidOperation:
|
|
1522 |
case EExcFloatOverflow:
|
|
1523 |
case EExcFloatStackCheck:
|
|
1524 |
case EExcFloatUnderflow:
|
|
1525 |
return iExceptionMask & KExceptionFpe;
|
|
1526 |
case EExcAbort:
|
|
1527 |
return iExceptionMask & KExceptionAbort;
|
|
1528 |
case EExcKill:
|
|
1529 |
return iExceptionMask & KExceptionKill;
|
|
1530 |
default:
|
|
1531 |
return EFalse;
|
|
1532 |
}
|
|
1533 |
}
|
|
1534 |
|
|
1535 |
/**
|
|
1536 |
Terminates the current thread.
|
|
1537 |
|
|
1538 |
@param aReason Reason to be set in the current thread's exit information.
|
|
1539 |
|
|
1540 |
@pre No fast mutex can be held.
|
|
1541 |
@pre Call in a thread context.
|
|
1542 |
@pre Interrupts must be enabled.
|
|
1543 |
@pre Kernel must be unlocked.
|
|
1544 |
@pre Calling thread must not be in a critical section
|
|
1545 |
@pre Can be used in a device driver.
|
|
1546 |
|
|
1547 |
@post It doesn't return.
|
|
1548 |
*/
|
|
1549 |
EXPORT_C void Kern::Exit(TInt aReason)
|
|
1550 |
{
|
|
1551 |
// CHECK_PRECONDITIONS(MASK_THREAD_STANDARD|MASK_NO_CRITICAL,"Kern::Exit");
|
|
1552 |
#ifdef _DEBUG
|
|
1553 |
#if (defined (__KERNEL_APIS_CONTEXT_CHECKS_WARNING__)||defined (__KERNEL_APIS_CONTEXT_CHECKS_FAULT__))
|
|
1554 |
if (TheCurrentThread->iThreadType==EThreadUser)
|
|
1555 |
{
|
|
1556 |
CHECK_PRECONDITIONS(MASK_THREAD_STANDARD|MASK_NO_CRITICAL,"Kern::Exit");
|
|
1557 |
}
|
|
1558 |
else
|
|
1559 |
{
|
|
1560 |
CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Kern::Exit");
|
|
1561 |
}
|
|
1562 |
#endif
|
|
1563 |
#endif
|
|
1564 |
|
|
1565 |
NKern::LockSystem();
|
|
1566 |
TheCurrentThread->Die(EExitKill,aReason,KNullDesC);
|
|
1567 |
}
|
|
1568 |
|
|
1569 |
#ifdef KTHREAD
|
|
1570 |
TInt DThread::RaiseException(TExcType aType)
|
|
1571 |
#else
|
|
1572 |
TInt DThread::RaiseException(TExcType)
|
|
1573 |
#endif
|
|
1574 |
{
|
|
1575 |
__KTRACE_OPT(KTHREAD,Kern::Printf("Thread %O RaiseException(%d)",this,aType));
|
|
1576 |
if (this==TheCurrentThread)
|
|
1577 |
return KErrGeneral;
|
|
1578 |
else if (iThreadType!=EThreadUser)
|
|
1579 |
return KErrAccessDenied;
|
|
1580 |
return KErrNotSupported;
|
|
1581 |
}
|
|
1582 |
|
|
1583 |
void DThread::SetExitInfo(TExitType aType, TInt aReason, const TDesC& aCategory)
|
|
1584 |
//
|
|
1585 |
// Set a thread's exit info.
|
|
1586 |
// Called with system locked.
|
|
1587 |
//
|
|
1588 |
{
|
|
1589 |
if (iExitType==EExitPending)
|
|
1590 |
{
|
|
1591 |
if (iOwningProcess->iExitType!=EExitPending)
|
|
1592 |
{
|
|
1593 |
// process has already exited, so take its exit info.
|
|
1594 |
iExitType=iOwningProcess->iExitType;
|
|
1595 |
iExitReason=iOwningProcess->iExitReason;
|
|
1596 |
iExitCategory=iOwningProcess->iExitCategory;
|
|
1597 |
return;
|
|
1598 |
}
|
|
1599 |
iExitType=(TUint8)aType;
|
|
1600 |
iExitReason=aReason;
|
|
1601 |
if (iExitType==EExitKill)
|
|
1602 |
iExitCategory=KLitKill;
|
|
1603 |
else if (iExitType==EExitTerminate)
|
|
1604 |
iExitCategory=KLitTerminate;
|
|
1605 |
else if (aType==EExitPanic)
|
|
1606 |
iExitCategory=aCategory;
|
|
1607 |
}
|
|
1608 |
}
|
|
1609 |
|
|
1610 |
void DThread::CleanupLeave(TInt aDepth)
|
|
1611 |
//
|
|
1612 |
// Enter and return with system unlocked and calling thread in critical section.
|
|
1613 |
//
|
|
1614 |
{
|
|
1615 |
__ASSERT_CRITICAL;
|
|
1616 |
__NK_ASSERT_DEBUG(iLeaveDepth);
|
|
1617 |
|
|
1618 |
DProcess* pP = iOwningProcess;
|
|
1619 |
|
|
1620 |
iLeaveDepth -= aDepth;
|
|
1621 |
|
|
1622 |
if ((!iLeaveDepth) && (__e32_atomic_add_ord32(&pP->iThreadsLeaving, TUint32(-1)) == 1) && (!pP->iGarbageList.IsEmpty()))
|
|
1623 |
{
|
|
1624 |
DCodeSeg::Wait();
|
|
1625 |
|
|
1626 |
// Avoid race condition where code segs are put onto garbage list by another thread leaving,
|
|
1627 |
// between the decrement of iThreadsLeaving above and aquiring the code seg mutex
|
|
1628 |
if (!pP->iThreadsLeaving)
|
|
1629 |
{
|
|
1630 |
while (!pP->iGarbageList.IsEmpty())
|
|
1631 |
{
|
|
1632 |
DLibrary* pL = _LOFF(pP->iGarbageList.First()->Deque(), DLibrary, iGbgLink);
|
|
1633 |
__NK_ASSERT_DEBUG(pL->iAccessCount);
|
|
1634 |
pL->iGbgLink.iNext = NULL;
|
|
1635 |
|
|
1636 |
pL->ReallyRemoveFromProcess();
|
|
1637 |
pL->DObject::Close(NULL);
|
|
1638 |
}
|
|
1639 |
}
|
|
1640 |
|
|
1641 |
DCodeSeg::Signal();
|
|
1642 |
}
|
|
1643 |
}
|
|
1644 |
|
|
1645 |
/** Function only temporarily supported to aid migration to process emulation...
|
|
1646 |
*/
|
|
1647 |
TInt ExecHandler::ThreadAsProcess(DThread* aThread, TInt aLibrary)
|
|
1648 |
{
|
|
1649 |
__KTRACE_OPT(KEXEC,Kern::Printf("ExecHandler::ThreadAsProcess %O",aThread));
|
|
1650 |
if (aThread->iOwningProcess->iSecurityZone!=TheCurrentThread->iOwningProcess->iSecurityZone)
|
|
1651 |
K::ProcessIsolationFailure(__PLATSEC_DIAGNOSTIC_STRING("Use of RThread::Create(const TDesC& aName,TThreadFunction aFunction,TInt aStackSize,TAny* aPtr,RLibrary* aLibrary,RHeap* aHeap, TInt aHeapMinSize,TInt aHeapMaxSize,TOwnerType aType)"));
|
|
1652 |
DObject* pL = K::ObjectFromHandle(aLibrary,ELibrary);
|
|
1653 |
pL->CheckedOpen();
|
|
1654 |
TInt r = aThread->Open();
|
|
1655 |
NKern::ThreadEnterCS();
|
|
1656 |
NKern::UnlockSystem();
|
|
1657 |
if(r==KErrNone)
|
|
1658 |
{
|
|
1659 |
TInt h;
|
|
1660 |
r = aThread->MakeHandleAndOpen(EOwnerThread,pL,h);
|
|
1661 |
aThread->Close(NULL);
|
|
1662 |
}
|
|
1663 |
pL->Close(NULL);
|
|
1664 |
NKern::ThreadLeaveCS();
|
|
1665 |
return r;
|
|
1666 |
}
|
|
1667 |
|
|
1668 |
/**
|
|
1669 |
Sets the realtime state for the current thread.
|
|
1670 |
|
|
1671 |
@param aNewState The new realtime state for the thread.
|
|
1672 |
|
|
1673 |
@pre No fast mutex can be held.
|
|
1674 |
@pre Call in a thread context.
|
|
1675 |
@pre Kernel must be unlocked
|
|
1676 |
@pre Interrupts enabled
|
|
1677 |
@pre Can be used in a device driver.
|
|
1678 |
*/
|
|
1679 |
EXPORT_C void Kern::SetRealtimeState(TThreadRealtimeState aNewState)
|
|
1680 |
{
|
|
1681 |
CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Kern::RequestComplete");
|
|
1682 |
TheCurrentThread->SetRealtimeState(aNewState);
|
|
1683 |
}
|
|
1684 |
|
|
1685 |
void DThread::SetRealtimeState(TThreadRealtimeState aState)
|
|
1686 |
{
|
|
1687 |
__KTRACE_OPT(KTHREAD,Kern::Printf("Thread %O SetRealtimeState %d", this, aState));
|
|
1688 |
const TUint32 clear = KThreadFlagRealtime | KThreadFlagRealtimeTest;
|
|
1689 |
TUint32 set = 0;
|
|
1690 |
switch (aState)
|
|
1691 |
{
|
|
1692 |
case ERealtimeStateOff: set = 0; break;
|
|
1693 |
case ERealtimeStateOn: set = KThreadFlagRealtime; break;
|
|
1694 |
case ERealtimeStateWarn: set = KThreadFlagRealtime|KThreadFlagRealtimeTest; break;
|
|
1695 |
|
|
1696 |
default:
|
|
1697 |
K::PanicCurrentThread(EInvalidRealtimeState);
|
|
1698 |
}
|
|
1699 |
NKern::LockSystem();
|
|
1700 |
iFlags=(iFlags & ~clear) | set;
|
|
1701 |
NKern::UnlockSystem();
|
|
1702 |
}
|
|
1703 |
|
|
1704 |
TBool DThread::IsRealtime()
|
|
1705 |
{
|
|
1706 |
if(iFlags&KThreadFlagRealtime)
|
|
1707 |
return ETrue;
|
|
1708 |
return EFalse;
|
|
1709 |
}
|
|
1710 |
|
|
1711 |
void K::CheckThreadNotRealtime(const char* aTraceMessage)
|
|
1712 |
{
|
|
1713 |
if(TheCurrentThread->IsRealtime())
|
|
1714 |
IllegalFunctionForRealtimeThread(NULL,aTraceMessage);
|
|
1715 |
}
|
|
1716 |
|
|
1717 |
/**
|
|
1718 |
Panic the specified thread with EIllegalFunctionForRealtimeThread.
|
|
1719 |
If the thread is the current one, and it is in a critical section, then the panic
|
|
1720 |
will be deferred until the critical section has been left.
|
|
1721 |
@return True if thread was/will be panicked. False if function should be made 'legal' again.
|
|
1722 |
@pre No fast mutexes held
|
|
1723 |
*/
|
|
1724 |
TBool K::IllegalFunctionForRealtimeThread(DThread* aThread,const char* aTraceMessage)
|
|
1725 |
{
|
|
1726 |
DThread* pC=TheCurrentThread;
|
|
1727 |
if(!aThread)
|
|
1728 |
aThread = pC;
|
|
1729 |
|
|
1730 |
// If current thread has RealtimeTest just emit a warning trace...
|
|
1731 |
if(pC->iFlags&KThreadFlagRealtimeTest)
|
|
1732 |
{
|
|
1733 |
__KTRACE_OPT(KREALTIME,Kern::Printf("*Realtime* WARNING - Illegal function for Thread %O - %s",aThread,aTraceMessage));
|
|
1734 |
#ifndef _DEBUG
|
|
1735 |
(void)aTraceMessage; // stop unreferenced formal parameter warning
|
|
1736 |
#endif
|
|
1737 |
return EFalse;
|
|
1738 |
}
|
|
1739 |
|
|
1740 |
// Kill the thread...
|
|
1741 |
__KTRACE_OPT2(KREALTIME,KPANIC,Kern::Printf("*Realtime* ERROR - Illegal function for Thread %O - %s",aThread,aTraceMessage));
|
|
1742 |
NKern::LockSystem();
|
|
1743 |
if(aThread!=pC || aThread->iNThread.iCsCount==0)
|
|
1744 |
{
|
|
1745 |
aThread->Die(EExitPanic,EIllegalFunctionForRealtimeThread,KLitKernExec());
|
|
1746 |
}
|
|
1747 |
else
|
|
1748 |
{
|
|
1749 |
// Want to kill current thread but it is in a critical section, so poke the thread
|
|
1750 |
// with state so it panics when it leaves the critical section
|
|
1751 |
aThread->SetExitInfo(EExitPanic,EIllegalFunctionForRealtimeThread,KLitKernExec());
|
|
1752 |
NKern::DeferredExit();
|
|
1753 |
NKern::UnlockSystem();
|
|
1754 |
}
|
|
1755 |
return ETrue;
|
|
1756 |
}
|
|
1757 |
|
|
1758 |
void DThread::BTracePrime(TInt aCategory)
|
|
1759 |
{
|
|
1760 |
#ifdef BTRACE_THREAD_IDENTIFICATION
|
|
1761 |
if(aCategory==BTrace::EThreadIdentification || aCategory==-1)
|
|
1762 |
{
|
|
1763 |
DProcess* process = iOwningProcess;
|
|
1764 |
TKName nameBuf;
|
|
1765 |
process->Name(nameBuf);
|
|
1766 |
BTraceN(BTrace::EThreadIdentification,BTrace::EProcessName,&iNThread,process,nameBuf.Ptr(),nameBuf.Size());
|
|
1767 |
Name(nameBuf);
|
|
1768 |
BTraceN(BTrace::EThreadIdentification,BTrace::EThreadName,&iNThread,process,nameBuf.Ptr(),nameBuf.Size());
|
|
1769 |
BTrace12(BTrace::EThreadIdentification,BTrace::EThreadId,&iNThread,process,iId);
|
|
1770 |
}
|
|
1771 |
#endif
|
|
1772 |
}
|
|
1773 |
|
|
1774 |
/******************************************************************************
|
|
1775 |
* Miscellaneous notifications
|
|
1776 |
******************************************************************************/
|
|
1777 |
TMiscNotifierMgr::TMiscNotifierMgr()
|
|
1778 |
: iCompletionDfc(&CompletionDfcFn, this, 1),
|
|
1779 |
iIdleNotifierQ(0),
|
|
1780 |
iIdleDfc(&IdleDfcFn, this, 1)
|
|
1781 |
{
|
|
1782 |
}
|
|
1783 |
|
|
1784 |
void TMiscNotifierMgr::Init2()
|
|
1785 |
{
|
|
1786 |
iCompletionDfc.SetDfcQ(K::SvMsgQ);
|
|
1787 |
iIdleDfc.SetDfcQ(K::SvMsgQ);
|
|
1788 |
iIdleNotifierQ = new SMiscNotifierQ;
|
|
1789 |
__NK_ASSERT_ALWAYS(iIdleNotifierQ);
|
|
1790 |
}
|
|
1791 |
|
|
1792 |
// runs in supervisor thread when completions are required
|
|
1793 |
void TMiscNotifierMgr::CompletionDfcFn(TAny* aMgr)
|
|
1794 |
{
|
|
1795 |
__KTRACE_OPT(KOBJECT,Kern::Printf("TMiscNotifierMgr::CompletionDfcFn"));
|
|
1796 |
TMiscNotifierMgr& m = *(TMiscNotifierMgr*)aMgr;
|
|
1797 |
m.Lock();
|
|
1798 |
while (!m.iCompleted.IsEmpty())
|
|
1799 |
{
|
|
1800 |
TMiscNotifier* n = _LOFF(m.iCompleted.First()->Deque(), TMiscNotifier, iObjLink);
|
|
1801 |
n->iThreadLink.Deque();
|
|
1802 |
m.Unlock();
|
|
1803 |
__KTRACE_OPT(KOBJECT,Kern::Printf("Completing %08x thread %O 0->%08x", n, n->iThread, n->iStatus));
|
|
1804 |
DThread* t = n->iThread;
|
|
1805 |
Kern::QueueRequestComplete(t, n, KErrNone);
|
|
1806 |
t->Close(NULL);
|
|
1807 |
n->Close();
|
|
1808 |
m.Lock();
|
|
1809 |
}
|
|
1810 |
m.Unlock();
|
|
1811 |
}
|
|
1812 |
|
|
1813 |
// runs in supervisor thread when idle notification completions are required
|
|
1814 |
void TMiscNotifierMgr::IdleDfcFn(TAny* aMgr)
|
|
1815 |
{
|
|
1816 |
TMiscNotifierMgr& m = *(TMiscNotifierMgr*)aMgr;
|
|
1817 |
m.CompleteNotifications(*m.iIdleNotifierQ);
|
|
1818 |
}
|
|
1819 |
|
|
1820 |
void TMiscNotifierMgr::CompleteNotifications(SDblQue& aQ)
|
|
1821 |
{
|
|
1822 |
__KTRACE_OPT(KOBJECT,Kern::Printf("TMiscNotifierMgr::CompleteNotifications(%08x)",&aQ));
|
|
1823 |
Lock();
|
|
1824 |
TBool queue_dfc = EFalse;
|
|
1825 |
if (!aQ.IsEmpty())
|
|
1826 |
{
|
|
1827 |
// remember if completed list was originally empty
|
|
1828 |
queue_dfc = iCompleted.IsEmpty();
|
|
1829 |
|
|
1830 |
// move items on provided list onto completed list
|
|
1831 |
iCompleted.MoveFrom(&aQ);
|
|
1832 |
}
|
|
1833 |
Unlock();
|
|
1834 |
if (queue_dfc)
|
|
1835 |
iCompletionDfc.Enque();
|
|
1836 |
}
|
|
1837 |
|
|
1838 |
|
|
1839 |
// Create and install a miscellanous notifier
|
|
1840 |
// Return 1 if the list was originally empty
|
|
1841 |
// Return 0 if success but list not originally empty
|
|
1842 |
// Return <0 on error
|
|
1843 |
// If aN is non-null on entry and an TMiscNotifier is required, *aN is used instead
|
|
1844 |
// of allocating memory. aN is then set to NULL. Similarly for aQ.
|
|
1845 |
// If aObj is TRUE, aPtr points to a DObject to which the notifier should be attached
|
|
1846 |
// If aObj is FALSE, aPtr points to a SMiscNotifierQ pointer
|
|
1847 |
TInt TMiscNotifierMgr::NewMiscNotifier(TRequestStatus* aStatus, TBool aObj, TAny* aPtr, TMiscNotifier*& aN, SMiscNotifierQ*& aQ)
|
|
1848 |
{
|
|
1849 |
TInt result = 0;
|
|
1850 |
DThread& t = *TheCurrentThread;
|
|
1851 |
DObject* pO = aObj ? (DObject*)aPtr : 0;
|
|
1852 |
SMiscNotifierQ** pQ = aObj ? 0 : (SMiscNotifierQ**)aPtr;
|
|
1853 |
TMiscNotifier* n = aN;
|
|
1854 |
if (!n)
|
|
1855 |
n = new TMiscNotifier;
|
|
1856 |
if (!n)
|
|
1857 |
return KErrNoMemory;
|
|
1858 |
__KTRACE_OPT(KEXEC,Kern::Printf("NewMiscNotifier at %08x, status=%08x, pO=%O, pQ=%08x",n,aStatus,pO,pQ));
|
|
1859 |
TInt r = n->SetStatus(aStatus);
|
|
1860 |
__NK_ASSERT_DEBUG(r == KErrNone); // can't fail
|
|
1861 |
(void)r;
|
|
1862 |
n->iThread = &t;
|
|
1863 |
SMiscNotifierQ* newq = 0;
|
|
1864 |
SMiscNotifierQ* q = 0;
|
|
1865 |
TBool newq_reqd = pO ? (!pO->HasNotifierQ()) : (!*pQ);
|
|
1866 |
FOREVER
|
|
1867 |
{
|
|
1868 |
if (!newq && newq_reqd)
|
|
1869 |
{
|
|
1870 |
newq = aQ;
|
|
1871 |
if (!newq)
|
|
1872 |
newq = new SMiscNotifierQ;
|
|
1873 |
if (!newq)
|
|
1874 |
{
|
|
1875 |
if (n != aN)
|
|
1876 |
n->Close();
|
|
1877 |
return KErrNoMemory;
|
|
1878 |
}
|
|
1879 |
}
|
|
1880 |
Lock();
|
|
1881 |
q = pO ? (pO->NotifierQ()) : (*pQ);
|
|
1882 |
if (!q)
|
|
1883 |
{
|
|
1884 |
if (!newq)
|
|
1885 |
{
|
|
1886 |
Unlock();
|
|
1887 |
newq_reqd = ETrue;
|
|
1888 |
continue;
|
|
1889 |
}
|
|
1890 |
q = newq;
|
|
1891 |
if (q == aQ)
|
|
1892 |
aQ = 0;
|
|
1893 |
newq = 0;
|
|
1894 |
if (pO)
|
|
1895 |
pO->SetNotifierQ(q);
|
|
1896 |
else
|
|
1897 |
*pQ = q;
|
|
1898 |
}
|
|
1899 |
if (q->IsEmpty())
|
|
1900 |
result = 1;
|
|
1901 |
q->Add(&n->iObjLink);
|
|
1902 |
t.iMiscNotifiers.Add(&n->iThreadLink);
|
|
1903 |
Unlock();
|
|
1904 |
break;
|
|
1905 |
}
|
|
1906 |
if (newq && newq!=aQ)
|
|
1907 |
Kern::Free(newq);
|
|
1908 |
if (n == aN)
|
|
1909 |
aN = 0;
|
|
1910 |
t.Open();
|
|
1911 |
return result;
|
|
1912 |
}
|
|
1913 |
|
|
1914 |
|
|
1915 |
void ExecHandler::NotifyOnIdle(TRequestStatus* aStatus)
|
|
1916 |
{
|
|
1917 |
__KTRACE_OPT(KEXEC,Kern::Printf("ExecHandler::NotifyOnIdle %08x",aStatus));
|
|
1918 |
TMiscNotifierMgr& m = K::TheMiscNotifierMgr;
|
|
1919 |
NKern::ThreadEnterCS();
|
|
1920 |
// FIXME: TIME ORDERING PROBLEM IF IDLE DFC IS CURRENTLY PENDING
|
|
1921 |
TMiscNotifier* n = 0;
|
|
1922 |
SMiscNotifierQ* q = 0;
|
|
1923 |
TInt r = m.NewMiscNotifier(aStatus, FALSE, &K::TheMiscNotifierMgr.iIdleNotifierQ, n, q);
|
|
1924 |
if (r<0)
|
|
1925 |
Kern::RequestComplete(aStatus, r);
|
|
1926 |
else if (r>0)
|
|
1927 |
m.iIdleDfc.QueueOnIdle();
|
|
1928 |
NKern::ThreadLeaveCS();
|
|
1929 |
}
|
|
1930 |
|
|
1931 |
|
|
1932 |
// Find all this thread's miscellaneous notifiers with request status equal to aStatus
|
|
1933 |
// Dequeue them from both the thread and the object they monitor and place them in
|
|
1934 |
// an output doubly linked list.
|
|
1935 |
// If aStatus==NULL, extract all this thread's miscellaneous notifiers
|
|
1936 |
void DThread::ExtractMiscNotifiers(SDblQue& aQ, TRequestStatus* aStatus)
|
|
1937 |
{
|
|
1938 |
__KTRACE_OPT(KTHREAD,Kern::Printf("%T ExtractMiscNotifiers(%08x)", this, aStatus));
|
|
1939 |
TMiscNotifierMgr& m = K::TheMiscNotifierMgr;
|
|
1940 |
SDblQueLink* anchor = &iMiscNotifiers.iA;
|
|
1941 |
m.Lock();
|
|
1942 |
SDblQueLink* p = anchor->iNext;
|
|
1943 |
for (; p!=anchor; p=p->iNext)
|
|
1944 |
{
|
|
1945 |
TMiscNotifier* n = _LOFF(p, TMiscNotifier, iThreadLink);
|
|
1946 |
if (aStatus && n->StatusPtr() != aStatus)
|
|
1947 |
continue;
|
|
1948 |
__KTRACE_OPT(KTHREAD,Kern::Printf("Found %08x", n));
|
|
1949 |
p = p->iNext;
|
|
1950 |
n->iThreadLink.Deque();
|
|
1951 |
n->iObjLink.Deque();
|
|
1952 |
|
|
1953 |
// no-one else can see this notifier now
|
|
1954 |
aQ.Add(&n->iThreadLink);
|
|
1955 |
}
|
|
1956 |
m.Unlock();
|
|
1957 |
}
|
|
1958 |
|
|
1959 |
|
|
1960 |
|
|
1961 |
void ExecHandler::CancelMiscNotifier(TRequestStatus* aStatus)
|
|
1962 |
{
|
|
1963 |
__KTRACE_OPT(KEXEC,Kern::Printf("ExecHandler::CancelMiscNotifier %08x",aStatus));
|
|
1964 |
DThread& t = *TheCurrentThread;
|
|
1965 |
SDblQue garbage;
|
|
1966 |
NKern::ThreadEnterCS();
|
|
1967 |
t.ExtractMiscNotifiers(garbage, aStatus);
|
|
1968 |
while (!garbage.IsEmpty())
|
|
1969 |
{
|
|
1970 |
TMiscNotifier* n = _LOFF(garbage.First()->Deque(), TMiscNotifier, iThreadLink);
|
|
1971 |
__KTRACE_OPT(KOBJECT,Kern::Printf("Completing %08x thread %O -3->%08x", n, n->iThread, n->iStatus));
|
|
1972 |
TRequestStatus* s = n->StatusPtr();
|
|
1973 |
n->Close(); // delete before complete in case someone's testing for kernel heap leaks
|
|
1974 |
Kern::RequestComplete(s, KErrCancel);
|
|
1975 |
t.Close(NULL);
|
|
1976 |
}
|
|
1977 |
NKern::ThreadLeaveCS();
|
|
1978 |
}
|
|
1979 |
|
|
1980 |
|
|
1981 |
void ExecHandler::NotifyObjectDestruction(TInt aHandle, TRequestStatus* aStatus)
|
|
1982 |
{
|
|
1983 |
TMiscNotifierMgr& m = K::TheMiscNotifierMgr;
|
|
1984 |
DObject* pO=NULL;
|
|
1985 |
TInt r=K::OpenObjectFromHandle(aHandle,pO);
|
|
1986 |
if (r!=KErrNone)
|
|
1987 |
K::PanicKernExec(EBadHandle);
|
|
1988 |
__KTRACE_OPT(KEXEC,Kern::Printf("ExecHandler::NotifyObjectDestruction %O %08x",pO,aStatus));
|
|
1989 |
TMiscNotifier* n = 0;
|
|
1990 |
SMiscNotifierQ* q = 0;
|
|
1991 |
r = m.NewMiscNotifier(aStatus, TRUE, pO, n, q);
|
|
1992 |
pO->Close(NULL);
|
|
1993 |
if (r<0)
|
|
1994 |
Kern::RequestComplete(aStatus, r);
|
|
1995 |
NKern::ThreadLeaveCS();
|
|
1996 |
}
|
|
1997 |
|
|
1998 |
void DThread::KillMiscNotifiers()
|
|
1999 |
{
|
|
2000 |
__KTRACE_OPT(KTHREAD,Kern::Printf("%T KillMiscNotifiers", this));
|
|
2001 |
SDblQue garbage;
|
|
2002 |
ExtractMiscNotifiers(garbage, 0);
|
|
2003 |
while (!garbage.IsEmpty())
|
|
2004 |
{
|
|
2005 |
TMiscNotifier* n = _LOFF(garbage.First()->Deque(), TMiscNotifier, iThreadLink);
|
|
2006 |
// don't bother completing since thread has terminated anyway
|
|
2007 |
__KTRACE_OPT(KOBJECT,Kern::Printf("Killing %08x", n));
|
|
2008 |
DThread* t = n->iThread;
|
|
2009 |
n->Close();
|
|
2010 |
t->Close(NULL);
|
|
2011 |
}
|
|
2012 |
}
|
|
2013 |
|
|
2014 |
void DThread::Rendezvous(TInt aReason)
|
|
2015 |
//
|
|
2016 |
// Enter and return with system unlocked and calling thread in critical section.
|
|
2017 |
//
|
|
2018 |
{
|
|
2019 |
TLogon::CompleteAll(iTargetLogons, TLogon::ETargetRendezvous, aReason);
|
|
2020 |
}
|
|
2021 |
|
|
2022 |
TInt DThread::Logon(TRequestStatus* aStatus, TBool aRendezvous)
|
|
2023 |
{
|
|
2024 |
TInt r = KErrNoMemory;
|
|
2025 |
DThread* pC = TheCurrentThread;
|
|
2026 |
__KTRACE_OPT(KTHREAD, Kern::Printf("Thread %O Logon to thread %O, status at %08x rdv=%x",
|
|
2027 |
pC, this, aStatus, aRendezvous));
|
|
2028 |
|
|
2029 |
TLogon* pL = new TLogon;
|
|
2030 |
if (pL)
|
|
2031 |
{
|
|
2032 |
TUint32 type = TLogon::ETargetThread;
|
|
2033 |
if (aRendezvous)
|
|
2034 |
type |= TLogon::ERendezvous;
|
|
2035 |
r = pL->Attach(iTargetLogons, pC, this, aStatus, type);
|
|
2036 |
if (r != KErrNone)
|
|
2037 |
pL->Close();
|
|
2038 |
}
|
|
2039 |
|
|
2040 |
__KTRACE_OPT(KTHREAD, Kern::Printf("DThread::Logon ret %d", r));
|
|
2041 |
return r;
|
|
2042 |
}
|
|
2043 |
|
|
2044 |
#ifdef __SMP__
|
|
2045 |
void DThread::SMPSafeCallback(TAny* aThisPtr, TUserModeCallbackReason aReasonCode)
|
|
2046 |
{
|
|
2047 |
if (aReasonCode == EUserModeCallbackRun)
|
|
2048 |
{
|
|
2049 |
DThread* t = _LOFF(aThisPtr,DThread,iSMPSafeCallback);
|
|
2050 |
TUint32 config = TheSuperPage().KernelConfigFlags();
|
|
2051 |
if (config & EKernelConfigSMPUnsafeCPU0)
|
|
2052 |
{
|
|
2053 |
if (t->iOwningProcess->iSMPUnsafeCount)
|
|
2054 |
NKern::ThreadSetCpuAffinity(&t->iNThread, 0);
|
|
2055 |
else
|
|
2056 |
NKern::ThreadSetCpuAffinity(&t->iNThread, KCpuAffinityAny);
|
|
2057 |
}
|
|
2058 |
else if (config & EKernelConfigSMPUnsafeCompat)
|
|
2059 |
{
|
|
2060 |
if (t->iOwningProcess->iSMPUnsafeCount)
|
|
2061 |
NKern::JoinGroup(t->iOwningProcess->iSMPUnsafeGroup);
|
|
2062 |
else
|
|
2063 |
NKern::LeaveGroup();
|
|
2064 |
}
|
|
2065 |
}
|
|
2066 |
}
|
|
2067 |
#endif
|
|
2068 |
|
|
2069 |
/********************************************
|
|
2070 |
* TLogon
|
|
2071 |
********************************************/
|
|
2072 |
|
|
2073 |
//
|
|
2074 |
// Pseudo-constructor for TLogon class. Fills in all the private members, and
|
|
2075 |
// attaches the TLogon object to both the owner's and target's linked lists.
|
|
2076 |
// Enter and return with no fast mutexes held and current thread in CS
|
|
2077 |
//
|
|
2078 |
TInt TLogon::Attach(SDblQue& aList, DThread* aOwner, DObject* aTarget,
|
|
2079 |
TRequestStatus* aStatus, TUint32 aType)
|
|
2080 |
{
|
|
2081 |
TInt r = KErrNone;
|
|
2082 |
Lock();
|
|
2083 |
if (aType & ETargetProcess)
|
|
2084 |
{
|
|
2085 |
DProcess* pP = (DProcess*)aTarget;
|
|
2086 |
if (pP->iAttributes & DProcess::EBeingLoaded)
|
|
2087 |
r = KErrAccessDenied;
|
|
2088 |
else if (pP->iExitType != EExitPending)
|
|
2089 |
r = KErrDied;
|
|
2090 |
}
|
|
2091 |
else
|
|
2092 |
{
|
|
2093 |
DThread* pT = (DThread*)aTarget;
|
|
2094 |
if (pT->iMState == DThread::ECreated)
|
|
2095 |
r = KErrAccessDenied;
|
|
2096 |
else if (pT->iExiting)
|
|
2097 |
r = KErrDied;
|
|
2098 |
}
|
|
2099 |
if (r == KErrNone)
|
|
2100 |
{
|
|
2101 |
iType = aType;
|
|
2102 |
r = SetStatus(aStatus);
|
|
2103 |
__NK_ASSERT_DEBUG(r == KErrNone); // can't fail
|
|
2104 |
iOwningThread = aOwner;
|
|
2105 |
iTarget = aTarget;
|
|
2106 |
aOwner->Open();
|
|
2107 |
aTarget->Open();
|
|
2108 |
|
|
2109 |
// Rendezvous entries are always kept ahead of non-rendezvous ones
|
|
2110 |
if (aType & ERendezvous)
|
|
2111 |
aList.AddHead(&iTargetLink);
|
|
2112 |
else
|
|
2113 |
aList.Add(&iTargetLink);
|
|
2114 |
aOwner->iOwnedLogons.Add(&iOwningThreadLink);
|
|
2115 |
}
|
|
2116 |
Unlock();
|
|
2117 |
return r;
|
|
2118 |
}
|
|
2119 |
|
|
2120 |
//
|
|
2121 |
// Cancel a specific logon on the owning thread list aList
|
|
2122 |
// The logon to be cancelled is identified by aTarget, aStatus, and aType
|
|
2123 |
// Enter and return with no fast mutexes held and current thread in CS
|
|
2124 |
//
|
|
2125 |
TInt TLogon::Cancel(SDblQue& aList, DObject* aTarget, TRequestStatus* aStatus, TUint32 aType)
|
|
2126 |
{
|
|
2127 |
Lock();
|
|
2128 |
SDblQueLink* anchor = &aList.iA;
|
|
2129 |
SDblQueLink* pLink = aList.First();
|
|
2130 |
|
|
2131 |
for ( ; pLink != anchor; pLink = pLink->iNext)
|
|
2132 |
{
|
|
2133 |
TLogon* pL = _LOFF(pLink, TLogon, iOwningThreadLink);
|
|
2134 |
if (pL->iType == aType && pL->iTarget == aTarget && pL->StatusPtr() == aStatus)
|
|
2135 |
{
|
|
2136 |
// found it
|
|
2137 |
pL->iOwningThreadLink.Deque();
|
|
2138 |
pL->iTargetLink.Deque();
|
|
2139 |
Unlock();
|
|
2140 |
|
|
2141 |
// To avoid a race condition after dropping the lock, we must complete this
|
|
2142 |
// request BEFORE closing the referenced target object and owning thread ...
|
|
2143 |
Kern::QueueRequestComplete(pL->iOwningThread, pL, KErrNone);
|
|
2144 |
pL->iTarget->Close(NULL);
|
|
2145 |
pL->iOwningThread->Close(NULL);
|
|
2146 |
pL->Close();
|
|
2147 |
return KErrNone;
|
|
2148 |
}
|
|
2149 |
}
|
|
2150 |
|
|
2151 |
// not found
|
|
2152 |
Unlock();
|
|
2153 |
return KErrGeneral;
|
|
2154 |
}
|
|
2155 |
|
|
2156 |
//
|
|
2157 |
// Complete either all logons or only the rendezvous entries on the list aList with reason aReason
|
|
2158 |
// Enter and return with no fast mutexes held and current thread in CS
|
|
2159 |
//
|
|
2160 |
void TLogon::CompleteAll(SDblQue& aList, TComplete aAction, TInt aReason)
|
|
2161 |
{
|
|
2162 |
TInt offset = (aAction == EOwned) ? _FOFF(TLogon, iOwningThreadLink) : _FOFF(TLogon, iTargetLink);
|
|
2163 |
|
|
2164 |
FOREVER
|
|
2165 |
{
|
|
2166 |
Lock();
|
|
2167 |
if (aList.IsEmpty())
|
|
2168 |
break;
|
|
2169 |
|
|
2170 |
SDblQueLink* pLink = aList.First();
|
|
2171 |
TLogon* pL = (TLogon*)((TUint8*)pLink-offset);
|
|
2172 |
|
|
2173 |
// Rendezvous entries are always ahead of non-rendezvous ones, so stop
|
|
2174 |
// if we're only interested in rendezvous and we find a non-rendezvous
|
|
2175 |
if (aAction == ETargetRendezvous && !pL->IsRendezvous())
|
|
2176 |
break;
|
|
2177 |
|
|
2178 |
pL->iOwningThreadLink.Deque();
|
|
2179 |
pL->iTargetLink.Deque();
|
|
2180 |
Unlock();
|
|
2181 |
__KTRACE_OPT(KTHREAD, Kern::Printf("Complete Logon to %O type %d",
|
|
2182 |
pL->iOwningThread, pL->iType));
|
|
2183 |
|
|
2184 |
// To avoid a race condition after dropping the lock, we must complete this
|
|
2185 |
// request BEFORE closing the referenced target object and owning thread ...
|
|
2186 |
Kern::QueueRequestComplete(pL->iOwningThread, pL, aReason);
|
|
2187 |
pL->iTarget->Close(NULL);
|
|
2188 |
pL->iOwningThread->Close(NULL);
|
|
2189 |
pL->Close();
|
|
2190 |
}
|
|
2191 |
|
|
2192 |
Unlock();
|
|
2193 |
}
|