author | Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com> |
Thu, 19 Aug 2010 11:14:22 +0300 | |
branch | RCL_3 |
changeset 42 | a179b74831c9 |
parent 22 | 2f92ad2dc5db |
child 43 | c1f20ce4abcf |
permissions | -rw-r--r-- |
0 | 1 |
// Copyright (c) 1994-2009 Nokia Corporation and/or its subsidiary(-ies). |
2 |
// All rights reserved. |
|
3 |
// This component and the accompanying materials are made available |
|
4 |
// under the terms of the License "Eclipse Public License v1.0" |
|
5 |
// which accompanies this distribution, and is available |
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 |
// |
|
8 |
// Initial Contributors: |
|
9 |
// Nokia Corporation - initial contribution. |
|
10 |
// |
|
11 |
// Contributors: |
|
12 |
// |
|
13 |
// Description: |
|
14 |
// e32\kernel\skernel.cpp |
|
15 |
// |
|
16 |
// |
|
17 |
||
18 |
#include <kernel/kern_priv.h> |
|
19 |
#include "execs.h" |
|
20 |
||
21 |
#define iMState iWaitLink.iSpare1 // Allow a sensible name to be used for iMState |
|
22 |
||
23 |
#ifdef BTRACE_SYMBIAN_KERNEL_SYNC |
|
24 |
#define BTRACE_KS(sub,obj) {BTraceContext4(BTrace::ESymbianKernelSync, (sub), (obj));} |
|
25 |
#define COND_BTRACE_KS(cond,sub,obj) if (cond) {BTraceContext4(BTrace::ESymbianKernelSync, (sub), (obj));} |
|
26 |
#define BTRACE_KS2(sub,obj1,obj2) {BTraceContext8(BTrace::ESymbianKernelSync, (sub), (obj1), (obj2));} |
|
27 |
#define COND_BTRACE_KS2(cond,sub,obj1,obj2) if (cond) {BTraceContext4(BTrace::ESymbianKernelSync, (sub), (obj1), (obj2));} |
|
28 |
#define BTRACE_KSC(sub) {TKName n; Name(n); BTraceContextN(BTrace::ESymbianKernelSync, (sub), this, iOwner, n.Ptr(), n.Size());} |
|
29 |
#define COND_BTRACE_KSC(cond,sub) if (cond) {TKName n; Name(n); BTraceContextN(BTrace::ESymbianKernelSync, (sub), this, iOwner, n.Ptr(), n.Size());} |
|
30 |
#else |
|
31 |
#define BTRACE_KS(sub,obj) |
|
32 |
#define COND_BTRACE_KS(cond,sub,obj) |
|
33 |
#define BTRACE_KS2(sub,obj1,obj2) |
|
34 |
#define COND_BTRACE_KS2(cond,sub,obj1,obj2) |
|
35 |
#define BTRACE_KSC(sub) |
|
36 |
#define COND_BTRACE_KSC(cond,sub) |
|
37 |
#endif |
|
38 |
||
39 |
||
40 |
/******************************************** |
|
41 |
* Semaphore |
|
42 |
********************************************/ |
|
43 |
||
44 |
// Enter and return with system unlocked. |
|
45 |
DSemaphore::~DSemaphore() |
|
46 |
{ |
|
47 |
NKern::LockSystem(); |
|
48 |
Reset(); |
|
49 |
BTRACE_KS(BTrace::ESemaphoreDestroy, this); |
|
50 |
NKern::UnlockSystem(); |
|
51 |
} |
|
52 |
||
53 |
// Enter and return with system unlocked. |
|
54 |
TInt DSemaphore::Create(DObject* aOwner, const TDesC* aName, TInt aInitialCount, TBool aVisible) |
|
55 |
{ |
|
56 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("DSemaphore::Create owner %O, name %lS, init count=%d, visible=%d",aOwner,aName,aInitialCount,aVisible)); |
|
57 |
if (aInitialCount<0) |
|
58 |
return KErrArgument; |
|
59 |
SetOwner(aOwner); |
|
60 |
TInt r=KErrNone; |
|
61 |
if (aName && aName->Length()) |
|
62 |
{ |
|
63 |
r=SetName(aName); |
|
64 |
if (r!=KErrNone) |
|
65 |
return r; |
|
66 |
} |
|
67 |
iCount=aInitialCount; |
|
68 |
r = iWaitQ.Construct(); |
|
69 |
if (r==KErrNone && aVisible) |
|
70 |
r=K::AddObject(this,ESemaphore); |
|
71 |
COND_BTRACE_KSC(r==KErrNone, BTrace::ESemaphoreCreate); |
|
72 |
return r; |
|
73 |
} |
|
74 |
||
75 |
// Wait for semaphore with timeout |
|
76 |
// Enter with system locked, return with system unlocked. |
|
77 |
TInt DSemaphore::Wait(TInt aNTicks) |
|
78 |
{ |
|
79 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("Semaphore %O Wait %d Timeout %d",this,iCount,aNTicks)); |
|
80 |
__ASSERT_DEBUG(TheCurrentThread->iMState==DThread::EReady,K::Fault(K::ESemWaitBadState)); |
|
81 |
__ASSERT_DEBUG(!TheCurrentThread->iWaitObj,K::Fault(K::ESemWaitBadWaitObj)); |
|
82 |
TInt r=KErrNone; |
|
83 |
if (iResetting) |
|
84 |
r=KErrGeneral; |
|
85 |
else if (--iCount<0) |
|
86 |
{ |
|
87 |
DThread* pC=TheCurrentThread; |
|
88 |
pC->iMState=DThread::EWaitSemaphore; |
|
89 |
pC->iWaitObj=this; |
|
90 |
iWaitQ.Add(pC); |
|
91 |
BTRACE_KS(BTrace::ESemaphoreBlock, this); |
|
92 |
r=NKern::Block(aNTicks,NKern::ERelease,SYSTEM_LOCK); |
|
93 |
__ASSERT_DEBUG(pC->iMState==DThread::EReady,K::Fault(K::ESemWaitBadState)); |
|
94 |
COND_BTRACE_KS(r==KErrNone, BTrace::ESemaphoreAcquire, this); |
|
95 |
return r; |
|
96 |
} |
|
97 |
#ifdef BTRACE_SYMBIAN_KERNEL_SYNC |
|
98 |
else |
|
99 |
BTRACE_KS(BTrace::ESemaphoreAcquire, this); |
|
100 |
#endif |
|
101 |
NKern::UnlockSystem(); |
|
102 |
return r; |
|
103 |
} |
|
104 |
||
105 |
// Enter with system locked, return with system unlocked. |
|
106 |
void DSemaphore::Signal() |
|
107 |
{ |
|
108 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("Semaphore %O Signal %d",this,iCount)); |
|
109 |
__ASSERT_DEBUG(TheCurrentThread->iMState==DThread::EReady,K::Fault(K::ESemSignalBadState)); |
|
110 |
COND_BTRACE_KS(!iResetting, BTrace::ESemaphoreRelease, this); |
|
111 |
if (!iResetting && ++iCount<=0) |
|
112 |
{ |
|
113 |
DThread* pT=iWaitQ.First(); |
|
114 |
iWaitQ.Remove(pT); |
|
115 |
pT->iMState=DThread::EReady; |
|
116 |
pT->iWaitObj=NULL; |
|
117 |
#if defined(_DEBUG) && !defined(__SMP__) |
|
118 |
// For crazy scheduler: if next thread is same priority as current, let it run |
|
119 |
// Check before releasing it in case it preempts us and exits |
|
120 |
TBool yield = EFalse; |
|
121 |
if (TheSuperPage().KernelConfigFlags() & EKernelConfigCrazyScheduling |
|
122 |
&& NCurrentThread()->iPriority == pT->iNThread.iPriority) |
|
123 |
yield = ETrue; |
|
124 |
#endif |
|
125 |
NKern::ThreadRelease(&pT->iNThread,0,SYSTEM_LOCK); |
|
126 |
#if defined(_DEBUG) && !defined(__SMP__) |
|
127 |
// Actually do the yield |
|
128 |
if (yield) |
|
129 |
NKern::YieldTimeslice(); |
|
130 |
#endif |
|
131 |
return; |
|
132 |
} |
|
133 |
NKern::UnlockSystem(); |
|
134 |
} |
|
135 |
||
136 |
// Enter and return with system locked. |
|
137 |
void DSemaphore::SignalN(TInt aCount) |
|
138 |
{ |
|
139 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("Semaphore %O SignalN(%d) %d",this,aCount,iCount)); |
|
140 |
__ASSERT_DEBUG(TheCurrentThread->iMState==DThread::EReady,K::Fault(K::ESemSignalBadState)); |
|
141 |
if (iResetting) |
|
142 |
return; |
|
143 |
if (iCount<0 && aCount>0) |
|
144 |
{ |
|
145 |
while(aCount--) |
|
146 |
{ |
|
147 |
BTRACE_KS(BTrace::ESemaphoreRelease, this); |
|
148 |
if (++iCount<=0) |
|
149 |
{ |
|
150 |
DThread* pT=iWaitQ.First(); |
|
151 |
iWaitQ.Remove(pT); |
|
152 |
pT->iMState=DThread::EReady; |
|
153 |
pT->iWaitObj=NULL; |
|
154 |
#if defined(_DEBUG) && !defined(__SMP__) |
|
155 |
// For crazy scheduler: if next thread is same priority as current, let it run |
|
156 |
// Check before releasing it in case it preempts us and exits |
|
157 |
TBool yield = EFalse; |
|
158 |
if (TheSuperPage().KernelConfigFlags() & EKernelConfigCrazyScheduling |
|
159 |
&& NCurrentThread()->iPriority == pT->iNThread.iPriority) |
|
160 |
yield = ETrue; |
|
161 |
#endif |
|
162 |
NKern::ThreadRelease(&pT->iNThread,0,SYSTEM_LOCK); |
|
163 |
#if defined(_DEBUG) && !defined(__SMP__) |
|
164 |
// Actually do the yield |
|
165 |
if (yield) |
|
166 |
NKern::YieldTimeslice(); |
|
167 |
#endif |
|
168 |
NKern::LockSystem(); |
|
169 |
if (iResetting) |
|
170 |
return; |
|
171 |
} |
|
172 |
else |
|
173 |
{ |
|
174 |
iCount+=aCount; |
|
175 |
break; |
|
176 |
} |
|
177 |
} |
|
178 |
} |
|
179 |
else if (aCount>0) |
|
180 |
iCount+=aCount; |
|
181 |
} |
|
182 |
||
183 |
// Enter and return with system locked. |
|
184 |
void DSemaphore::Reset() |
|
185 |
{ |
|
186 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("Semaphore %O Reset %d",this,iCount)); |
|
187 |
if (iResetting) |
|
188 |
return; |
|
189 |
iResetting = TRUE; |
|
190 |
||
191 |
// We release the waiting threads before the suspended threads. |
|
192 |
// Other code relies on this. |
|
193 |
while(iCount<0) |
|
194 |
{ |
|
195 |
iCount++; |
|
196 |
DThread* pT=iWaitQ.First(); |
|
197 |
iWaitQ.Remove(pT); |
|
198 |
pT->iMState=DThread::EReady; |
|
199 |
pT->iWaitObj=NULL; |
|
200 |
NKern::ThreadRelease(&pT->iNThread,KErrGeneral,SYSTEM_LOCK); |
|
201 |
NKern::LockSystem(); |
|
202 |
} |
|
203 |
while (!iSuspendedQ.IsEmpty()) |
|
204 |
{ |
|
205 |
DThread* pT=_LOFF(iSuspendedQ.First()->Deque(),DThread,iWaitLink); |
|
206 |
pT->iMState=DThread::EReady; |
|
207 |
pT->iWaitObj=NULL; |
|
208 |
NKern::ThreadRelease(&pT->iNThread,KErrGeneral,SYSTEM_LOCK); |
|
209 |
NKern::LockSystem(); |
|
210 |
} |
|
211 |
iResetting = FALSE; |
|
212 |
iCount=0; |
|
213 |
} |
|
214 |
||
215 |
// Enter and return with system locked. |
|
216 |
void DSemaphore::WaitCancel(DThread* aThread) |
|
217 |
{ |
|
218 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("Semaphore %O WaitCancel(%O) %d",this,aThread,iCount)); |
|
219 |
iWaitQ.Remove(aThread); |
|
220 |
++iCount; |
|
221 |
} |
|
222 |
||
223 |
// Enter and return with system locked. |
|
224 |
void DSemaphore::WaitCancelSuspended(DThread* aThread) |
|
225 |
{ |
|
226 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("Semaphore %O WaitCanSusp(%O) %d",this,aThread,iCount)); |
|
227 |
aThread->iWaitLink.Deque(); |
|
228 |
} |
|
229 |
||
230 |
// Enter and return with system locked. |
|
231 |
void DSemaphore::SuspendWaitingThread(DThread* aThread) |
|
232 |
{ |
|
233 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("Semaphore %O SuspWait(%O) %d",this,aThread,iCount)); |
|
234 |
++iCount; |
|
235 |
iWaitQ.Remove(aThread); |
|
236 |
iSuspendedQ.Add(&aThread->iWaitLink); // OK if resetting since suspended queue is processed after wait queue |
|
237 |
aThread->iMState=DThread::EWaitSemaphoreSuspended; |
|
238 |
} |
|
239 |
||
240 |
// Enter and return with system locked. |
|
241 |
void DSemaphore::ResumeWaitingThread(DThread* aThread) |
|
242 |
{ |
|
243 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("Semaphore %O ResumeWait(%O) %d",this,aThread,iCount)); |
|
244 |
aThread->iWaitLink.Deque(); |
|
245 |
if (!iResetting && --iCount<0) |
|
246 |
{ |
|
247 |
iWaitQ.Add(aThread); |
|
248 |
aThread->iMState=DThread::EWaitSemaphore; |
|
249 |
} |
|
250 |
else |
|
251 |
{ |
|
252 |
aThread->iMState=DThread::EReady; |
|
253 |
aThread->iWaitObj=NULL; |
|
254 |
NKern::ThreadRelease(&aThread->iNThread,iResetting?KErrGeneral:KErrNone); |
|
255 |
} |
|
256 |
} |
|
257 |
||
258 |
// Enter and return with system locked. |
|
259 |
void DSemaphore::ChangeWaitingThreadPriority(DThread* aThread, TInt aNewPriority) |
|
260 |
{ |
|
261 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("Semaphore %O ChangeWaitPri(%O,%d)",this,aThread,aNewPriority)); |
|
262 |
iWaitQ.ChangePriority(aThread,aNewPriority); |
|
263 |
} |
|
264 |
||
265 |
/******************************************** |
|
266 |
* Mutex |
|
267 |
********************************************/ |
|
268 |
inline TThreadMutexCleanup::TThreadMutexCleanup(DMutex* aMutex) |
|
269 |
: iMutex(aMutex) |
|
270 |
{} |
|
271 |
||
272 |
// Enter and return with system locked. |
|
273 |
void TThreadMutexCleanup::Cleanup() |
|
274 |
{ |
|
275 |
__KTRACE_OPT(KTHREAD,Kern::Printf("TThreadCleanup::Cleanup Free mutex %O",iMutex)); |
|
276 |
if (iMutex->iResetting) |
|
277 |
{ |
|
278 |
iMutex->iCleanup.Remove(); |
|
279 |
iMutex->iCleanup.iThread=NULL; |
|
280 |
#ifdef _DEBUG |
|
281 |
iMutex->iOrderLink.Deque(); |
|
282 |
#endif |
|
283 |
} |
|
284 |
else |
|
285 |
{ |
|
286 |
iMutex->iHoldCount=1; |
|
287 |
iMutex->Signal(); |
|
288 |
NKern::LockSystem(); |
|
289 |
} |
|
290 |
} |
|
291 |
||
292 |
||
293 |
DMutex::DMutex() |
|
294 |
: iCleanup(this) |
|
295 |
{} |
|
296 |
||
297 |
// Enter and return with system unlocked. |
|
298 |
DMutex::~DMutex() |
|
299 |
{ |
|
300 |
NKern::LockSystem(); |
|
301 |
Reset(); |
|
302 |
BTRACE_KS(BTrace::EMutexDestroy, this); |
|
303 |
NKern::UnlockSystem(); |
|
304 |
} |
|
305 |
||
306 |
// Enter and return with system unlocked. |
|
307 |
TInt DMutex::Create(DObject* aOwner, const TDesC* aName, TBool aVisible, TUint aOrder) |
|
308 |
{ |
|
309 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("DMutex::Create owner %O, name %lS, visible=%d, order=%02x",aOwner,aName,aVisible,aOrder)); |
|
310 |
iOrder = (TUint8)aOrder; |
|
311 |
SetOwner(aOwner); |
|
312 |
TInt r=KErrNone; |
|
313 |
if (aName && aName->Length()) |
|
314 |
{ |
|
315 |
r=SetName(aName); |
|
316 |
if (r!=KErrNone) |
|
317 |
return r; |
|
318 |
} |
|
319 |
r = iWaitQ.Construct(); |
|
320 |
if (r==KErrNone && aVisible) |
|
321 |
r=K::AddObject(this,EMutex); |
|
322 |
COND_BTRACE_KSC(r==KErrNone, BTrace::EMutexCreate); |
|
323 |
return r; |
|
324 |
} |
|
325 |
#ifdef _DEBUG |
|
326 |
extern const SNThreadHandlers EpocThreadHandlers; |
|
327 |
#endif |
|
328 |
||
329 |
// Enter and return with system locked. |
|
330 |
TInt DMutex::Wait() |
|
331 |
{ |
|
332 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("Mutex %O Wait hold %O hldc=%d wtc=%d",this,iCleanup.iThread,iHoldCount,iWaitCount)); |
|
333 |
__ASSERT_SYSTEM_LOCK; |
|
334 |
__ASSERT_DEBUG(NCurrentThread()->iHandlers==&EpocThreadHandlers, K::Fault(K::EMutexWaitNotDThread)); |
|
335 |
DThread* pC=TheCurrentThread; |
|
336 |
__ASSERT_DEBUG(pC->iMState==DThread::EReady,K::Fault(K::EMutexWaitBadState)); |
|
337 |
__ASSERT_DEBUG(!pC->iWaitObj,K::Fault(K::EMutexWaitBadWaitObj)); |
|
338 |
#ifdef _DEBUG |
|
339 |
SDblQue& ml = pC->iMutexList; |
|
340 |
DMutex* m = ml.IsEmpty() ? NULL : _LOFF(ml.First(), DMutex, iOrderLink); |
|
341 |
TUint last_mutex_order = m ? m->iOrder : KMutexOrdNone; |
|
342 |
if (iCleanup.iThread!=pC && iOrder<KMutexOrdUser && iOrder>=last_mutex_order) |
|
343 |
{ |
|
344 |
__KTRACE_OPT(KPANIC,Kern::Printf("Mutex ordering violation: holding mutex %O (%08x) order %d, trying to acquire mutex %O (%08x) order %d",m,m,last_mutex_order,this,this,iOrder)); |
|
345 |
K::Fault(K::EMutexOrderingViolation); |
|
346 |
} |
|
347 |
#endif |
|
348 |
while(!iResetting && iCleanup.iThread) |
|
349 |
{ |
|
350 |
if (iCleanup.iThread==pC) |
|
351 |
{ |
|
352 |
++iHoldCount; |
|
353 |
BTRACE_KS(BTrace::EMutexAcquire, this); |
|
354 |
return KErrNone; |
|
355 |
} |
|
356 |
K::PINestLevel=0; |
|
357 |
pC->iMState=DThread::EWaitMutex; |
|
358 |
pC->iWaitObj=this; |
|
359 |
++iWaitCount; |
|
360 |
iWaitQ.Add(pC); |
|
361 |
TInt p=pC->iWaitLink.iPriority; |
|
362 |
if (p>iCleanup.iPriority) |
|
363 |
iCleanup.ChangePriority(p); |
|
364 |
BTRACE_KS(BTrace::EMutexBlock, this); |
|
365 |
||
366 |
// If the thread is woken up normally as a result of the mutex being released |
|
367 |
// this function will KErrNone and this thread will have been placed in |
|
368 |
// EHoldMutexPending state. If the thread is killed while waiting this function |
|
369 |
// will not return (since the exit handler will run instead). |
|
370 |
// If the mutex is reset (or deleted) while the thread is waiting this function |
|
371 |
// will return KErrGeneral and the thread will have been placed into EReady |
|
372 |
// state. If however the mutex is reset (or deleted) while the thread is in |
|
373 |
// EHoldMutexPending state, having already been woken up normally as a result |
|
374 |
// of the mutex being released, this function will return KErrNone (since the |
|
375 |
// return value is set at the point where the thread is released from its wait |
|
376 |
// condition). However we can still detect this situation since the thread will |
|
377 |
// have been placed into the EReady state when the mutex was reset. |
|
378 |
TInt r=NKern::Block(0,NKern::ERelease|NKern::EClaim,SYSTEM_LOCK); |
|
379 |
if (r==KErrNone && pC->iMState==DThread::EReady) |
|
380 |
r = KErrGeneral; // mutex has been reset |
|
381 |
if (r!=KErrNone) // if we get an error here... |
|
382 |
{ |
|
383 |
__ASSERT_DEBUG(pC->iMState==DThread::EReady,K::Fault(K::EMutexWaitBadState)); |
|
384 |
return r; // ...bail out now - this mutex may no longer exist |
|
385 |
} |
|
386 |
pC->iMState=DThread::EReady; |
|
387 |
pC->iWaitObj=NULL; |
|
388 |
pC->iWaitLink.Deque(); // remove thread from iPendingQ |
|
389 |
} |
|
390 |
if (iResetting) |
|
391 |
return KErrGeneral; |
|
392 |
BTRACE_KS(BTrace::EMutexAcquire, this); |
|
393 |
iHoldCount=1; |
|
394 |
iCleanup.iPriority=TUint8(HighestWaitingPriority()); |
|
395 |
pC->AddCleanup(&iCleanup); |
|
396 |
#ifdef _DEBUG |
|
397 |
ml.AddHead(&iOrderLink); |
|
398 |
#endif |
|
399 |
return KErrNone; |
|
400 |
} |
|
401 |
||
402 |
// Enter with system locked, return with system unlocked. |
|
403 |
void DMutex::Signal() |
|
404 |
{ |
|
405 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("Mutex %O Signal hold=%O hldc=%d wtc=%d",this,iCleanup.iThread,iHoldCount,iWaitCount)); |
|
406 |
__ASSERT_SYSTEM_LOCK; |
|
407 |
DThread* pC=TheCurrentThread; |
|
408 |
__ASSERT_DEBUG(iCleanup.iThread==pC,K::Fault(K::EMutexSignalWrongThread)); |
|
409 |
__ASSERT_DEBUG(pC->iMState==DThread::EReady,K::Fault(K::EMutexSignalBadState)); |
|
410 |
COND_BTRACE_KS(!iResetting, BTrace::EMutexRelease, this); |
|
411 |
if (!iResetting && --iHoldCount==0) |
|
412 |
{ |
|
413 |
if (iWaitQ.NonEmpty()) |
|
414 |
{ |
|
415 |
// Wake up the next waiting thread. |
|
416 |
// We MUST do this before reliquishing our inherited priority. |
|
417 |
// We won't thrash on the system lock because inheritance ensures our priority is not |
|
418 |
// lower than the waiting thread's and the scheduler will not round-robin a thread which |
|
419 |
// holds a fast mutex (the system lock in this case). |
|
420 |
WakeUpNextThread(); |
|
421 |
} |
|
422 |
else |
|
423 |
{ |
|
424 |
__ASSERT_DEBUG(iCleanup.iPriority==0,Kern::Fault("MutSigBadClnPri",iCleanup.iPriority)); |
|
425 |
} |
|
426 |
iCleanup.iThread=NULL; |
|
427 |
#ifdef _DEBUG |
|
428 |
iOrderLink.Deque(); |
|
429 |
#endif |
|
430 |
pC->iCleanupQ.Remove(&iCleanup); // remove cleanup item but don't set priority yet |
|
431 |
if (iCleanup.iPriority!=0) // if cleanup item had nonzero priority may need to revert our priority |
|
432 |
{ |
|
433 |
// Revert this thread's priority and release the system lock without thrashing. |
|
434 |
// Relies on the fact that our MState is READY here. |
|
435 |
pC->RevertPriority(); |
|
436 |
return; |
|
437 |
} |
|
438 |
} |
|
439 |
NKern::UnlockSystem(); |
|
440 |
} |
|
441 |
||
442 |
// Enter and return with system locked. |
|
443 |
void DMutex::Reset() |
|
444 |
{ |
|
445 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("Mutex %O Reset hold=%O hldc=%d wtc=%d",this,iCleanup.iThread,iHoldCount,iWaitCount)); |
|
446 |
__ASSERT_SYSTEM_LOCK; |
|
447 |
if (iResetting) |
|
448 |
return; |
|
449 |
K::PINestLevel=0; |
|
450 |
iResetting = TRUE; |
|
451 |
||
452 |
// We release the pending threads first, then waiting threads, then suspended threads. |
|
453 |
// Other code relies on this. |
|
454 |
while (!iPendingQ.IsEmpty()) |
|
455 |
{ |
|
456 |
DThread* pT=_LOFF(iPendingQ.First()->Deque(),DThread,iWaitLink); |
|
457 |
pT->iMState=DThread::EReady; |
|
458 |
pT->iWaitObj=NULL; |
|
459 |
NKern::FlashSystem(); |
|
460 |
} |
|
461 |
while (iWaitQ.NonEmpty()) |
|
462 |
{ |
|
463 |
DThread* pT=iWaitQ.First(); |
|
464 |
iWaitQ.Remove(pT); |
|
465 |
pT->iMState=DThread::EReady; |
|
466 |
pT->iWaitObj=NULL; |
|
467 |
NKern::ThreadRelease(&pT->iNThread,KErrGeneral,SYSTEM_LOCK); |
|
468 |
NKern::LockSystem(); |
|
469 |
} |
|
470 |
while (!iSuspendedQ.IsEmpty()) |
|
471 |
{ |
|
472 |
DThread* pT=_LOFF(iSuspendedQ.First()->Deque(),DThread,iWaitLink); |
|
473 |
pT->iMState=DThread::EReady; |
|
474 |
pT->iWaitObj=NULL; |
|
475 |
NKern::ThreadRelease(&pT->iNThread,KErrGeneral,SYSTEM_LOCK); |
|
476 |
NKern::LockSystem(); |
|
477 |
} |
|
478 |
if (iCleanup.iThread) |
|
479 |
{ |
|
480 |
iCleanup.Remove(); |
|
481 |
iCleanup.iThread=NULL; |
|
482 |
#ifdef _DEBUG |
|
483 |
iOrderLink.Deque(); |
|
484 |
#endif |
|
485 |
} |
|
486 |
iCleanup.iPriority=0; |
|
487 |
iHoldCount=0; |
|
488 |
iWaitCount=0; |
|
489 |
iResetting = FALSE; |
|
490 |
} |
|
491 |
||
492 |
// Enter and return with system locked. |
|
493 |
void DMutex::WaitCancel(DThread* aThread) |
|
494 |
{ |
|
495 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("Mutex %O WaitCancel(%O)",this,aThread)); |
|
496 |
iWaitQ.Remove(aThread); |
|
497 |
--iWaitCount; |
|
498 |
K::PINestLevel=0; |
|
499 |
TInt p=HighestWaitingPriority(); |
|
500 |
iCleanup.ChangePriority(p); |
|
501 |
} |
|
502 |
||
503 |
// Enter and return with system locked. |
|
504 |
void DMutex::WaitCancelSuspended(DThread* aThread) |
|
505 |
{ |
|
506 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("Mutex %O WaitCanSusp(%O)",this,aThread)); |
|
507 |
aThread->iWaitLink.Deque(); |
|
508 |
--iWaitCount; |
|
509 |
} |
|
510 |
||
511 |
// Enter and return with system locked. |
|
512 |
void DMutex::SuspendWaitingThread(DThread* aThread) |
|
513 |
{ |
|
514 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("Mutex %O SuspWait(%O)",this,aThread)); |
|
515 |
iWaitQ.Remove(aThread); |
|
516 |
iSuspendedQ.Add(&aThread->iWaitLink); // OK if resetting since suspended queue is processed after wait queue |
|
517 |
aThread->iMState=DThread::EWaitMutexSuspended; |
|
518 |
K::PINestLevel=0; |
|
519 |
TInt p=HighestWaitingPriority(); |
|
520 |
iCleanup.ChangePriority(p); |
|
521 |
} |
|
522 |
||
523 |
// Enter and return with system locked. |
|
524 |
void DMutex::ResumeWaitingThread(DThread* aThread) |
|
525 |
{ |
|
526 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("Mutex %O ResumeWait(%O)",this,aThread)); |
|
527 |
aThread->iWaitLink.Deque(); |
|
528 |
if (!iResetting) |
|
529 |
{ |
|
530 |
if (iCleanup.iThread) |
|
531 |
{ |
|
532 |
// mutex is held, so put this one back on wait queue |
|
533 |
aThread->iMState=DThread::EWaitMutex; |
|
534 |
iWaitQ.Add(aThread); |
|
535 |
K::PINestLevel=0; |
|
536 |
TInt p=aThread->iWaitLink.iPriority; |
|
537 |
if (p>iCleanup.iPriority) |
|
538 |
iCleanup.ChangePriority(p); |
|
539 |
return; |
|
540 |
} |
|
541 |
aThread->iMState=DThread::EHoldMutexPending; |
|
542 |
iPendingQ.Add(&aThread->iWaitLink); |
|
543 |
--iWaitCount; |
|
544 |
NKern::ThreadRelease(&aThread->iNThread,0); |
|
545 |
} |
|
546 |
else |
|
547 |
{ |
|
548 |
// don't want to put it on the wait queue |
|
549 |
aThread->iMState=DThread::EReady; |
|
550 |
aThread->iWaitObj=NULL; |
|
551 |
NKern::ThreadRelease(&aThread->iNThread,KErrGeneral); |
|
552 |
} |
|
553 |
} |
|
554 |
||
555 |
// Enter and return with system locked. |
|
556 |
void DMutex::ChangeWaitingThreadPriority(DThread* aThread, TInt aNewPriority) |
|
557 |
{ |
|
558 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("Mutex %O ChangeWaitPri(%O,%d)",this,aThread,aNewPriority)); |
|
559 |
if (!iCleanup.iThread && aNewPriority>aThread->iWaitLink.iPriority && !iResetting) |
|
560 |
{ |
|
561 |
// if the mutex is currently free and the thread's priority is being increased, wake up the thread |
|
562 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("Mutex %O wake up %O wtc=%d",this,aThread,iWaitCount)); |
|
563 |
iWaitQ.Remove(aThread); |
|
564 |
aThread->iWaitLink.iPriority=(TUint8)aNewPriority; |
|
565 |
iPendingQ.Add(&aThread->iWaitLink); |
|
566 |
aThread->iMState=DThread::EHoldMutexPending; |
|
567 |
--iWaitCount; |
|
568 |
NKern::ThreadRelease(&aThread->iNThread,0); // unfortunately this may well thrash but this should be a rare case |
|
569 |
} |
|
570 |
else |
|
571 |
{ |
|
572 |
iWaitQ.ChangePriority(aThread,aNewPriority); |
|
573 |
iCleanup.ChangePriority(iWaitQ.HighestPriority()); |
|
574 |
} |
|
575 |
} |
|
576 |
||
577 |
// Enter and return with system locked. |
|
578 |
void DMutex::ChangePendingThreadPriority(DThread* aThread, TInt aNewPriority) |
|
579 |
{ |
|
580 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("Mutex %O ChangePendPri(%O,%d)",this,aThread,aNewPriority)); |
|
581 |
if (!iCleanup.iThread && aNewPriority<aThread->iWaitLink.iPriority && !iResetting) |
|
582 |
{ |
|
583 |
if (aNewPriority<HighestWaitingPriority()) |
|
584 |
{ |
|
585 |
// wake up the next thread |
|
586 |
WakeUpNextThread(); |
|
587 |
} |
|
588 |
} |
|
589 |
aThread->iWaitLink.iPriority=(TUint8)aNewPriority; |
|
590 |
} |
|
591 |
||
592 |
// Enter and return with system locked. |
|
593 |
void DMutex::WakeUpNextThread() |
|
594 |
{ |
|
595 |
// wake up the next thread |
|
596 |
DThread* pT=iWaitQ.First(); |
|
597 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("Mutex %O wake up %O wtc=%d",this,pT,iWaitCount)); |
|
598 |
iWaitQ.Remove(pT); |
|
599 |
iPendingQ.Add(&pT->iWaitLink); |
|
600 |
pT->iMState=DThread::EHoldMutexPending; |
|
601 |
--iWaitCount; |
|
602 |
NKern::ThreadRelease(&pT->iNThread,0); |
|
603 |
// If next thread is same priority as current, let it have a go with the mutex |
|
604 |
// Safe to inspect pT here as it can't have run yet, we've still got the system lock |
|
605 |
if (NCurrentThread()->iPriority == pT->iNThread.iPriority) |
|
606 |
NKern::YieldTimeslice(); |
|
607 |
} |
|
608 |
||
609 |
// Called when a thread which was about to claim the mutex is suspended |
|
610 |
// Enter and return with system locked. |
|
611 |
#ifdef KSEMAPHORE |
|
612 |
void DMutex::SuspendPendingThread(DThread* aThread) |
|
613 |
#else |
|
614 |
void DMutex::SuspendPendingThread(DThread*) |
|
615 |
#endif |
|
616 |
{ |
|
617 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("Mutex %O SuspendP(%O)",this,aThread)); |
|
618 |
if (!iResetting && !iCleanup.iThread && iWaitQ.NonEmpty()) |
|
619 |
WakeUpNextThread(); |
|
620 |
} |
|
621 |
||
622 |
// Called when a pending thread exits |
|
623 |
// Enter and return with system locked. |
|
624 |
void DMutex::RemovePendingThread(DThread* aThread) |
|
625 |
{ |
|
626 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("Mutex %O RemoveP(%O)",this,aThread)); |
|
627 |
aThread->iWaitLink.Deque(); |
|
628 |
if (!iResetting && !iCleanup.iThread && iWaitQ.NonEmpty()) |
|
629 |
WakeUpNextThread(); |
|
630 |
} |
|
631 |
||
632 |
TInt DMutex::HighestWaitingPriority() |
|
633 |
{ |
|
634 |
if (iWaitQ.NonEmpty()) |
|
635 |
return iWaitQ.HighestPriority(); |
|
636 |
return 0; |
|
637 |
} |
|
638 |
||
639 |
/******************************************** |
|
640 |
* Condition Variable |
|
641 |
********************************************/ |
|
642 |
DCondVar::DCondVar() |
|
643 |
{ |
|
644 |
} |
|
645 |
||
646 |
DCondVar::~DCondVar() |
|
647 |
{ |
|
648 |
NKern::LockSystem(); |
|
649 |
Reset(); |
|
650 |
BTRACE_KS(BTrace::ECondVarDestroy, this); |
|
651 |
NKern::UnlockSystem(); |
|
652 |
} |
|
653 |
||
654 |
// Enter and return with system unlocked. |
|
655 |
TInt DCondVar::Create(DObject* aOwner, const TDesC* aName, TBool aVisible) |
|
656 |
{ |
|
657 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("DCondVar::Create owner %O, name %lS, visible=%d",aOwner,aName,aVisible)); |
|
658 |
SetOwner(aOwner); |
|
659 |
TInt r=KErrNone; |
|
660 |
if (aName && aName->Length()) |
|
661 |
{ |
|
662 |
r=SetName(aName); |
|
663 |
if (r!=KErrNone) |
|
664 |
return r; |
|
665 |
} |
|
666 |
r = iWaitQ.Construct(); |
|
667 |
if (r==KErrNone && aVisible) |
|
668 |
r=K::AddObject(this,ECondVar); |
|
669 |
COND_BTRACE_KSC(r==KErrNone, BTrace::ECondVarCreate); |
|
670 |
return r; |
|
671 |
} |
|
672 |
||
673 |
void DCondVar::WaitCancel(DThread* aThread) |
|
674 |
{ |
|
675 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("CondVar %O WaitCancel(%O)", this, aThread)); |
|
676 |
iWaitQ.Remove(aThread); |
|
677 |
if (--iWaitCount == 0) |
|
678 |
iMutex = NULL; |
|
679 |
} |
|
680 |
||
681 |
void DCondVar::WaitCancelSuspended(DThread* aThread) |
|
682 |
{ |
|
683 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("CondVar %O WaitCanSusp(%O)", this, aThread)); |
|
684 |
aThread->iWaitLink.Deque(); |
|
685 |
if (--iWaitCount == 0) |
|
686 |
iMutex = NULL; |
|
687 |
} |
|
688 |
||
689 |
void DCondVar::SuspendWaitingThread(DThread* aThread) |
|
690 |
{ |
|
691 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("CondVar %O SuspendWait(%O)", this, aThread)); |
|
692 |
iWaitQ.Remove(aThread); |
|
693 |
iSuspendedQ.Add(&aThread->iWaitLink); // OK if resetting since suspended queue is processed after wait queue |
|
694 |
aThread->iMState = DThread::EWaitCondVarSuspended; |
|
695 |
} |
|
696 |
||
697 |
void DCondVar::ResumeWaitingThread(DThread* aThread) |
|
698 |
{ |
|
699 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("CondVar %O ResumeWait(%O)", this, aThread)); |
|
700 |
aThread->iWaitLink.Deque(); |
|
701 |
aThread->iMState = DThread::EWaitCondVar; |
|
702 |
UnBlockThread(aThread, EFalse); |
|
703 |
} |
|
704 |
||
705 |
void DCondVar::ChangeWaitingThreadPriority(DThread* aThread, TInt aNewPriority) |
|
706 |
{ |
|
707 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("CondVar %O ChangeWaitPri(%O,%d)", this, aThread, aNewPriority)); |
|
708 |
if (aNewPriority>aThread->iWaitLink.iPriority && !iResetting) |
|
709 |
{ |
|
710 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("CV %O wake up %O", this, aThread)); |
|
711 |
iWaitQ.Remove(aThread); |
|
712 |
aThread->iWaitLink.iPriority = (TUint8)aNewPriority; |
|
713 |
UnBlockThread(aThread, EFalse); |
|
714 |
} |
|
715 |
else |
|
716 |
{ |
|
717 |
iWaitQ.ChangePriority(aThread, aNewPriority); |
|
718 |
} |
|
719 |
} |
|
720 |
||
721 |
TInt DCondVar::Wait(DMutex* aMutex, TInt aTimeout) |
|
722 |
{ |
|
723 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("CondVar %O Wait (M=%O, tmout=%d)", this, aMutex, aTimeout)); |
|
724 |
__ASSERT_SYSTEM_LOCK; |
|
725 |
DMutex& m = *aMutex; |
|
726 |
DThread* pC=TheCurrentThread; |
|
727 |
__ASSERT_DEBUG(pC->iMState==DThread::EReady, K::Fault(K::ECondVarWaitBadState1)); |
|
728 |
if (iResetting) |
|
729 |
return KErrGeneral; |
|
730 |
if (aMutex->iCleanup.iThread != pC) |
|
731 |
K::PanicCurrentThread(ECondVarWaitMutexNotLocked); |
|
732 |
if (iMutex && iMutex!=aMutex) |
|
733 |
return KErrInUse; |
|
734 |
if (!iMutex) |
|
735 |
iMutex = aMutex; |
|
736 |
||
737 |
// set the current thread M-State to wait-for-condition-variable |
|
738 |
pC->iMState = DThread::EWaitCondVar; |
|
739 |
pC->iWaitObj = this; |
|
740 |
iWaitQ.Add(pC); |
|
741 |
++iWaitCount; |
|
742 |
||
743 |
// unlock the associated mutex |
|
744 |
TBool unlock = ETrue; |
|
745 |
m.iHoldCount = 0; |
|
746 |
if (m.iWaitQ.NonEmpty()) |
|
747 |
{ |
|
748 |
// Wake up the next waiting thread. |
|
749 |
// We MUST do this before reliquishing our inherited priority. |
|
750 |
// We won't thrash on the system lock because inheritance ensures our priority is not |
|
751 |
// lower than the waiting thread's and the scheduler will not round-robin a thread which |
|
752 |
// holds a fast mutex (the system lock in this case). |
|
753 |
m.WakeUpNextThread(); |
|
754 |
} |
|
755 |
else |
|
756 |
{ |
|
757 |
__ASSERT_DEBUG(m.iCleanup.iPriority==0,Kern::Fault("MutSigBadClnPri",m.iCleanup.iPriority)); |
|
758 |
} |
|
759 |
m.iCleanup.iThread=NULL; |
|
760 |
#ifdef _DEBUG |
|
761 |
m.iOrderLink.Deque(); |
|
762 |
#endif |
|
763 |
pC->iCleanupQ.Remove(&m.iCleanup); // remove cleanup item but don't set priority yet |
|
764 |
if (m.iCleanup.iPriority!=0) // if cleanup item had nonzero priority may need to revert our priority |
|
765 |
{ |
|
766 |
// Revert this thread's priority and release the system lock without thrashing. |
|
767 |
TInt p = pC->iDefaultPriority; |
|
768 |
TInt c = pC->iCleanupQ.HighestPriority(); |
|
769 |
__KTRACE_OPT(KTHREAD,Kern::Printf("Thread %O CVRevertPriority def %d cleanup %d", pC, pC->iDefaultPriority, c)); |
|
770 |
if (c>p) |
|
771 |
p=c; |
|
772 |
if (p != pC->iNThread.i_NThread_BasePri) |
|
773 |
{ |
|
774 |
iWaitQ.ChangePriority(pC, p); |
|
775 |
NKern::ThreadSetPriority(&pC->iNThread, p, SYSTEM_LOCK); // anti-thrash |
|
776 |
unlock = EFalse; |
|
777 |
} |
|
778 |
} |
|
779 |
if (unlock) |
|
780 |
NKern::UnlockSystem(); |
|
781 |
||
782 |
// reacquire the system lock and check if we need to block |
|
783 |
NKern::LockSystem(); |
|
784 |
switch (pC->iMState) |
|
785 |
{ |
|
786 |
case DThread::EReady: // condition variable deleted |
|
787 |
return KErrGeneral; |
|
788 |
case DThread::EHoldMutexPending: // condition variable signalled, mutex free |
|
789 |
case DThread::EWaitMutex: // condition variable signalled, now waiting for mutex |
|
790 |
case DThread::EWaitCondVar: // still waiting |
|
791 |
break; |
|
792 |
case DThread::ECreated: |
|
793 |
case DThread::EDead: |
|
794 |
case DThread::EWaitSemaphore: |
|
795 |
case DThread::EWaitSemaphoreSuspended: |
|
796 |
case DThread::EWaitMutexSuspended: |
|
797 |
case DThread::EWaitCondVarSuspended: |
|
798 |
default: |
|
799 |
K::Fault(K::ECondVarWaitBadState2); |
|
800 |
} |
|
801 |
||
802 |
// block if necessary then reacquire the mutex |
|
803 |
TInt r = KErrNone; |
|
804 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("Mutex %O Wait hold %O hldc=%d wtc=%d", &m, m.iCleanup.iThread, m.iHoldCount, m.iWaitCount)); |
|
805 |
#ifdef _DEBUG |
|
806 |
SDblQue& ml = pC->iMutexList; |
|
807 |
DMutex* mm = ml.IsEmpty() ? NULL : _LOFF(ml.First(), DMutex, iOrderLink); |
|
808 |
TUint last_mutex_order = mm ? mm->iOrder : KMutexOrdNone; |
|
809 |
if (m.iOrder<KMutexOrdUser && m.iOrder>=last_mutex_order) |
|
810 |
K::Fault(K::EMutexOrderingViolation); |
|
811 |
#endif |
|
812 |
while(m.iCleanup.iThread || pC->iMState==DThread::EWaitMutex || pC->iMState==DThread::EWaitCondVar) // mutex can't be resetting since we have a handle on it |
|
813 |
{ |
|
814 |
if (pC->iMState == DThread::EHoldMutexPending) |
|
815 |
pC->iWaitLink.Deque(); |
|
816 |
if (pC->iMState!=DThread::EWaitCondVar && pC->iMState!=DThread::EWaitMutex) |
|
817 |
{ |
|
818 |
K::PINestLevel = 0; |
|
819 |
pC->iMState = DThread::EWaitMutex; |
|
820 |
pC->iWaitObj = &m; |
|
821 |
++m.iWaitCount; |
|
822 |
m.iWaitQ.Add(pC); |
|
823 |
TInt p = pC->iWaitLink.iPriority; |
|
824 |
if (p>m.iCleanup.iPriority) |
|
825 |
m.iCleanup.ChangePriority(p); |
|
826 |
} |
|
827 |
TInt tmout = pC->iMState==DThread::EWaitCondVar ? aTimeout : 0; |
|
828 |
BTRACE_KS2(BTrace::ECondVarBlock, this, &m); |
|
829 |
||
830 |
// The following possibilities exist here: |
|
831 |
// 1. Normal operation: condition variable released and mutex is unlocked |
|
832 |
// s=KErrNone, thread state EHoldMutexPending |
|
833 |
// 2. Timeout while waiting for condition variable |
|
834 |
// s=KErrTimedOut, thread state EReady |
|
835 |
// 3. Condition variable reset while thread waiting for it |
|
836 |
// s=KErrGeneral, thread state EReady |
|
837 |
// 4. Mutex reset while thread waiting for it (after condition variable signalled) |
|
838 |
// s=KErrGeneral, thread state EReady |
|
839 |
// 5. Mutex reset while thread is in EHoldMutexPending state (after condition |
|
840 |
// variable signalled and mutex unlocked) |
|
841 |
// s=KErrNone, thread state EReady |
|
842 |
// 6. Thread killed while waiting for mutex or condition variable |
|
843 |
// Function doesn't return since exit handler runs instead. |
|
844 |
TInt s = NKern::Block(tmout, NKern::ERelease|NKern::EClaim, SYSTEM_LOCK); |
|
845 |
if (s==KErrNone && pC->iMState==DThread::EReady) |
|
846 |
s = KErrGeneral; |
|
847 |
if (s!=KErrNone && s!=KErrTimedOut) // if we get an error here... |
|
848 |
{ |
|
849 |
__ASSERT_DEBUG(pC->iMState==DThread::EReady,K::Fault(K::EMutexWaitBadState)); |
|
850 |
return s; // ...bail out now - this condition variable may no longer exist |
|
851 |
} |
|
852 |
if (s==KErrTimedOut) |
|
853 |
r = s; |
|
854 |
BTRACE_KS2(BTrace::ECondVarWakeUp, this, &m); |
|
855 |
} |
|
856 |
__ASSERT_DEBUG(pC->iMState==DThread::EReady || pC->iMState==DThread::EHoldMutexPending, |
|
857 |
K::Fault(K::ECondVarWaitBadState3)); |
|
858 |
if (pC->iMState == DThread::EHoldMutexPending) |
|
859 |
pC->iWaitLink.Deque(); // remove thread from iPendingQ |
|
860 |
pC->iMState = DThread::EReady; |
|
861 |
pC->iWaitObj = NULL; |
|
862 |
m.iHoldCount = 1; |
|
863 |
m.iCleanup.iPriority = TUint8(m.HighestWaitingPriority()); |
|
864 |
pC->AddCleanup(&m.iCleanup); |
|
865 |
#ifdef _DEBUG |
|
866 |
ml.AddHead(&m.iOrderLink); |
|
867 |
#endif |
|
868 |
return r; |
|
869 |
} |
|
870 |
||
871 |
void DCondVar::UnBlockThread(DThread* t, TBool aUnlock) |
|
872 |
{ |
|
873 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("CondVar %O UnBlockThread %O M:%d U:%d", this, t, t->iMState, aUnlock)); |
|
874 |
if (iResetting) |
|
875 |
{ |
|
876 |
t->iWaitObj = NULL; |
|
877 |
t->iMState = DThread::EReady; |
|
878 |
#if defined(_DEBUG) && !defined(__SMP__) |
|
879 |
// For crazy scheduler: if next thread is same priority as current, let it run |
|
880 |
// Check before releasing it in case it preempts us and exits |
|
881 |
TBool yield = EFalse; |
|
882 |
if (TheSuperPage().KernelConfigFlags() & EKernelConfigCrazyScheduling |
|
883 |
&& NCurrentThread()->iPriority == t->iNThread.iPriority) |
|
884 |
yield = ETrue; |
|
885 |
#endif |
|
886 |
if (aUnlock) |
|
887 |
NKern::ThreadRelease(&t->iNThread, KErrGeneral, SYSTEM_LOCK); |
|
888 |
else |
|
889 |
NKern::ThreadRelease(&t->iNThread, KErrGeneral); |
|
890 |
#if defined(_DEBUG) && !defined(__SMP__) |
|
891 |
// Actually do the yield |
|
892 |
if (yield) |
|
893 |
NKern::YieldTimeslice(); |
|
894 |
#endif |
|
895 |
return; |
|
896 |
} |
|
897 |
t->iWaitObj = iMutex; |
|
898 |
if (t->iMState == DThread::EWaitCondVar) |
|
899 |
{ |
|
900 |
if (iMutex->iCleanup.iThread) |
|
901 |
{ |
|
902 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("WaitThread %O -> EWaitMutex", t)); |
|
903 |
t->iMState = DThread::EWaitMutex; |
|
904 |
iMutex->iWaitQ.Add(t); |
|
905 |
++iMutex->iWaitCount; |
|
906 |
K::PINestLevel = 0; |
|
907 |
TInt p = t->iWaitLink.iPriority; |
|
908 |
if (p > iMutex->iCleanup.iPriority) |
|
909 |
iMutex->iCleanup.ChangePriority(p); |
|
910 |
} |
|
911 |
else |
|
912 |
{ |
|
913 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("WaitThread %O -> EHoldMutexPending", t)); |
|
914 |
t->iMState = DThread::EHoldMutexPending; |
|
915 |
iMutex->iPendingQ.Add(&t->iWaitLink); |
|
916 |
if (aUnlock) |
|
917 |
NKern::ThreadRelease(&t->iNThread, 0, SYSTEM_LOCK); |
|
918 |
else |
|
919 |
NKern::ThreadRelease(&t->iNThread, 0); |
|
920 |
aUnlock = EFalse; |
|
921 |
} |
|
922 |
} |
|
923 |
else if (t->iMState == DThread::EWaitCondVarSuspended) |
|
924 |
{ |
|
925 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("WaitThread %O -> EWaitMutexSusp", t)); |
|
926 |
t->iMState = DThread::EWaitMutexSuspended; |
|
927 |
iMutex->iSuspendedQ.Add(&t->iWaitLink); |
|
928 |
++iMutex->iWaitCount; |
|
929 |
} |
|
930 |
else |
|
931 |
K::Fault(K::ECondVarUnBlockBadState); |
|
932 |
if (--iWaitCount == 0) |
|
933 |
iMutex = NULL; |
|
934 |
if (aUnlock) |
|
935 |
NKern::UnlockSystem(); |
|
936 |
} |
|
937 |
||
938 |
void DCondVar::Signal() |
|
939 |
{ |
|
940 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("CondVar %O Signal", this)); |
|
941 |
__ASSERT_SYSTEM_LOCK; |
|
942 |
BTRACE_KS2(BTrace::ECondVarSignal, this, iMutex); |
|
943 |
DThread* t = NULL; |
|
944 |
if (iWaitQ.NonEmpty()) |
|
945 |
{ |
|
946 |
t = iWaitQ.First(); |
|
947 |
iWaitQ.Remove(t); |
|
948 |
} |
|
949 |
else if (!iSuspendedQ.IsEmpty()) |
|
950 |
{ |
|
951 |
t = _LOFF(iSuspendedQ.First()->Deque(), DThread, iWaitLink); |
|
952 |
} |
|
953 |
if (t) |
|
954 |
UnBlockThread(t, ETrue); |
|
955 |
else |
|
956 |
NKern::UnlockSystem(); |
|
957 |
} |
|
958 |
||
959 |
// On entry the specified mutex is held by the current thread |
|
960 |
// Enter and return with system locked |
|
961 |
void DCondVar::Broadcast(DMutex* m) |
|
962 |
{ |
|
963 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("CondVar %O Broadcast", this)); |
|
964 |
__ASSERT_SYSTEM_LOCK; |
|
965 |
BTRACE_KS2(BTrace::ECondVarBroadcast, this, m); |
|
966 |
while (iMutex == m) |
|
967 |
{ |
|
968 |
Signal(); |
|
969 |
NKern::LockSystem(); |
|
970 |
} |
|
971 |
} |
|
972 |
||
973 |
void DCondVar::Reset() |
|
974 |
{ |
|
975 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("CondVar %O Reset", this)); |
|
976 |
__ASSERT_SYSTEM_LOCK; |
|
977 |
if (iResetting) |
|
978 |
return; |
|
979 |
iResetting = TRUE; |
|
980 |
||
981 |
// We release the waiting threads first, then suspended threads. |
|
982 |
// Other code relies on this. |
|
983 |
while (iWaitQ.NonEmpty()) |
|
984 |
{ |
|
985 |
DThread* pT=iWaitQ.First(); |
|
986 |
iWaitQ.Remove(pT); |
|
987 |
pT->iMState=DThread::EReady; |
|
988 |
pT->iWaitObj=NULL; |
|
989 |
NKern::ThreadRelease(&pT->iNThread, KErrGeneral, SYSTEM_LOCK); |
|
990 |
NKern::LockSystem(); |
|
991 |
} |
|
992 |
while (!iSuspendedQ.IsEmpty()) |
|
993 |
{ |
|
994 |
DThread* pT=_LOFF(iSuspendedQ.First()->Deque(),DThread,iWaitLink); |
|
995 |
pT->iMState=DThread::EReady; |
|
996 |
pT->iWaitObj=NULL; |
|
997 |
NKern::ThreadRelease(&pT->iNThread, KErrGeneral, SYSTEM_LOCK); |
|
998 |
NKern::LockSystem(); |
|
999 |
} |
|
1000 |
iMutex = NULL; |
|
1001 |
iResetting = FALSE; |
|
1002 |
} |
|
1003 |
||
1004 |
TInt ExecHandler::CondVarWait(DCondVar* aCondVar, TInt aMutexHandle, TInt aTimeout) |
|
1005 |
{ |
|
1006 |
if (aTimeout) |
|
1007 |
{ |
|
1008 |
if (aTimeout<0) |
|
1009 |
{ |
|
1010 |
NKern::UnlockSystem(); |
|
1011 |
return KErrArgument; |
|
1012 |
} |
|
1013 |
||
1014 |
// Convert microseconds to NTimer ticks, rounding up |
|
1015 |
TInt ntp = NKern::TickPeriod(); |
|
1016 |
aTimeout += ntp-1; |
|
1017 |
aTimeout /= ntp; |
|
1018 |
} |
|
1019 |
DThread* t = TheCurrentThread; |
|
1020 |
DMutex* m = (DMutex*)K::ObjectFromHandle(aMutexHandle, EMutex); |
|
1021 |
m->CheckedOpen(); |
|
1022 |
t->iTempObj = m; |
|
1023 |
TInt r = aCondVar->Wait(m, aTimeout); |
|
1024 |
t->iTempObj = NULL; |
|
1025 |
TInt c = m->Dec(); |
|
1026 |
if (c==1) |
|
1027 |
{ |
|
1028 |
NKern::ThreadEnterCS(); |
|
1029 |
NKern::UnlockSystem(); |
|
1030 |
K::ObjDelete(m); |
|
1031 |
NKern::ThreadLeaveCS(); |
|
1032 |
} |
|
1033 |
else |
|
1034 |
NKern::UnlockSystem(); |
|
1035 |
return r; |
|
1036 |
} |
|
1037 |
||
1038 |
void ExecHandler::CondVarSignal(DCondVar* aCondVar) |
|
1039 |
{ |
|
1040 |
aCondVar->Signal(); |
|
1041 |
} |
|
1042 |
||
1043 |
void ExecHandler::CondVarBroadcast(DCondVar* aCondVar) |
|
1044 |
{ |
|
1045 |
TBool wm = EFalse; |
|
1046 |
DMutex* m = aCondVar->iMutex; |
|
1047 |
if (m) // if no mutex, no-one is waiting so no-op |
|
1048 |
{ |
|
1049 |
aCondVar->CheckedOpen(); |
|
1050 |
m->CheckedOpen(); |
|
1051 |
NKern::ThreadEnterCS(); |
|
1052 |
if (m->iCleanup.iThread != TheCurrentThread) |
|
1053 |
{ |
|
1054 |
wm = ETrue; |
|
1055 |
m->Wait(); |
|
1056 |
NKern::FlashSystem(); |
|
1057 |
} |
|
1058 |
aCondVar->Broadcast(m); |
|
1059 |
if (wm) |
|
1060 |
m->Signal(); |
|
1061 |
else |
|
1062 |
NKern::UnlockSystem(); |
|
1063 |
m->Close(NULL); |
|
1064 |
aCondVar->Close(NULL); |
|
1065 |
NKern::ThreadLeaveCS(); |
|
1066 |
} |
|
1067 |
else |
|
1068 |
NKern::UnlockSystem(); |
|
1069 |
} |
|
1070 |
||
1071 |
TInt ExecHandler::CondVarCreate(const TDesC8* aName, TOwnerType aType) |
|
1072 |
{ |
|
1073 |
TKName n; |
|
1074 |
DObject* pO=NULL; |
|
1075 |
const TDesC* pN=NULL; |
|
1076 |
if (aName) |
|
1077 |
{ |
|
1078 |
Kern::KUDesGet(n,*aName); |
|
1079 |
pN=&n; |
|
1080 |
} |
|
1081 |
else if (aType==EOwnerThread) |
|
1082 |
pO=TheCurrentThread; |
|
1083 |
else |
|
1084 |
pO=TheCurrentThread->iOwningProcess; |
|
1085 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("Exec::CondVarCreate %lS", aName)); |
|
1086 |
NKern::ThreadEnterCS(); |
|
1087 |
TInt r=KErrNoMemory; |
|
1088 |
DCondVar* pV = new DCondVar; |
|
1089 |
if (pV) |
|
1090 |
{ |
|
1091 |
r = pV->Create(pO, pN, ETrue); |
|
1092 |
if (r==KErrNone && aName) |
|
1093 |
pV->SetProtection(n.Length()? DObject::EGlobal : DObject::EProtected); |
|
1094 |
if (r==KErrNone) |
|
1095 |
r = K::MakeHandle(aType, pV); |
|
1096 |
if (r<KErrNone) |
|
1097 |
pV->Close(NULL); |
|
1098 |
} |
|
1099 |
NKern::ThreadLeaveCS(); |
|
1100 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("Exec::CondVarCreate returns %d",r)); |
|
1101 |
return r; |
|
1102 |
} |
|
1103 |
||
1104 |
/******************************************** |
|
1105 |
* Chunk |
|
1106 |
********************************************/ |
|
1107 |
DChunk::DChunk() |
|
1108 |
{ |
|
1109 |
} |
|
1110 |
||
1111 |
DChunk::~DChunk() |
|
1112 |
{ |
|
1113 |
__COND_DEBUG_EVENT(iAttributes&EConstructed, EEventDeleteChunk, this); |
|
1114 |
} |
|
1115 |
||
1116 |
||
1117 |
void DChunk::SetPaging(TUint /*aCreateAtt*/) |
|
1118 |
{// Default implementation of virtual method that does nothing. |
|
1119 |
} |
|
1120 |
||
1121 |
TInt DChunk::Create(SChunkCreateInfo& aInfo) |
|
1122 |
{ |
|
1123 |
SetOwner(aInfo.iOwner); |
|
1124 |
TInt r=KErrNone; |
|
1125 |
TBool named = (aInfo.iName.Ptr() && aInfo.iName.Length()); |
|
1126 |
if (named) |
|
1127 |
{ |
|
1128 |
r=SetName(&aInfo.iName); |
|
1129 |
if (r!=KErrNone) |
|
1130 |
return r; |
|
1131 |
} |
|
1132 |
||
1133 |
switch(aInfo.iType) |
|
1134 |
{ |
|
1135 |
case ESharedKernelMultiple: |
|
1136 |
SetProtection(DObject::EProtected); |
|
1137 |
// fall through... |
|
1138 |
case ESharedIo: |
|
1139 |
case ESharedKernelSingle: |
|
1140 |
// Shared kernel chunks can't be adjusted from user side |
|
1141 |
iControllingOwner = K::TheKernelProcess->iId; |
|
1142 |
iRestrictions = EChunkPreventAdjust; |
|
1143 |
break; |
|
1144 |
||
1145 |
default: |
|
1146 |
if(aInfo.iGlobal) |
|
1147 |
SetProtection(named ? DObject::EGlobal : DObject::EProtected); |
|
1148 |
iControllingOwner=TheCurrentThread->iOwningProcess->iId; |
|
1149 |
} |
|
1150 |
// Check if chunk is to own its memory |
|
1151 |
if (aInfo.iAtt & TChunkCreate::EMemoryNotOwned) |
|
1152 |
iAttributes |= EMemoryNotOwned; |
|
1153 |
||
1154 |
// Verify and save the mapping attributes. |
|
1155 |
__ASSERT_COMPILE(DChunk::ENormal == 0); |
|
1156 |
switch( aInfo.iAtt & TChunkCreate::EMappingMask) |
|
1157 |
{ |
|
1158 |
case TChunkCreate::ENormal: |
|
1159 |
break; |
|
1160 |
case TChunkCreate::EDoubleEnded: |
|
1161 |
iAttributes |= EDoubleEnded; |
|
1162 |
break; |
|
1163 |
case TChunkCreate::EDisconnected: |
|
1164 |
iAttributes |= EDisconnected; |
|
1165 |
break; |
|
1166 |
case TChunkCreate::ECache: |
|
1167 |
// Use TCB check to help keep cache chunks internal... |
|
1168 |
if(!Kern::CurrentThreadHasCapability(ECapabilityTCB,__PLATSEC_DIAGNOSTIC_STRING("DChunk::Create"))) |
|
1169 |
return KErrPermissionDenied; |
|
1170 |
iAttributes |= EDisconnected|ECache; |
|
1171 |
break; |
|
1172 |
default: |
|
1173 |
return KErrArgument; |
|
1174 |
} |
|
1175 |
||
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1176 |
// Check if chunk is read-only |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1177 |
if (aInfo.iAtt & TChunkCreate::EReadOnly) |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1178 |
{ |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1179 |
iAttributes |= EReadOnly; |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1180 |
iRestrictions |= EChunkPreventAdjust; |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1181 |
} |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1182 |
|
0 | 1183 |
// Save the clear byte. |
1184 |
iClearByte = aInfo.iClearByte; |
|
1185 |
||
1186 |
// Determine the data paging attributes. |
|
1187 |
SetPaging(aInfo.iAtt); |
|
1188 |
||
1189 |
r=DoCreate(aInfo); |
|
1190 |
if (r!=KErrNone) |
|
1191 |
return r; |
|
1192 |
||
1193 |
r=K::AddObject(this,EChunk); |
|
1194 |
if (r==KErrNone) |
|
1195 |
{ |
|
1196 |
iAttributes|=EConstructed; |
|
1197 |
__DEBUG_EVENT(EEventNewChunk, this); |
|
1198 |
} |
|
1199 |
return r; |
|
1200 |
} |
|
1201 |
||
1202 |
TInt DChunk::AddToProcess(DProcess* aProcess) |
|
1203 |
{ |
|
1204 |
__KTRACE_OPT(KEXEC,Kern::Printf("Adding chunk %O to process %O",this,aProcess)); |
|
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1205 |
TBool readOnly = (iAttributes & EReadOnly) && (aProcess->iId != iControllingOwner); |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1206 |
TInt r = aProcess->AddChunk(this, readOnly); |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1207 |
if (r == KErrAccessDenied) |
0 | 1208 |
{ |
1209 |
__KTRACE_OPT(KEXEC,Kern::Printf("Chunk is private - will not be mapped in to process")); |
|
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1210 |
r = KErrNone; |
0 | 1211 |
} |
1212 |
return r; |
|
1213 |
} |
|
1214 |
||
1215 |
||
1216 |
void DChunk::BTracePrime(TInt aCategory) |
|
1217 |
{ |
|
1218 |
#ifdef BTRACE_CHUNKS |
|
1219 |
if (aCategory == BTrace::EChunks || aCategory == -1) |
|
1220 |
{ |
|
1221 |
TKName nameBuf; |
|
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1222 |
Name(nameBuf); |
0 | 1223 |
BTraceN(BTrace::EChunks,BTrace::EChunkCreated,this,iMaxSize,nameBuf.Ptr(),nameBuf.Size()); |
1224 |
if(iOwningProcess) |
|
1225 |
BTrace8(BTrace::EChunks,BTrace::EChunkOwner,this,iOwningProcess); |
|
1226 |
BTrace12(BTrace::EChunks,BTrace::EChunkInfo,this,iChunkType,iAttributes); |
|
1227 |
} |
|
1228 |
#endif |
|
1229 |
} |
|
1230 |
||
1231 |
||
1232 |
SChunkCreateInfo::SChunkCreateInfo() |
|
1233 |
{ |
|
1234 |
memset(this,0,sizeof(*this)); |
|
1235 |
iClearByte = KChunkClearByteDefault; |
|
1236 |
} |
|
1237 |
||
1238 |
||
1239 |
||
1240 |
/** |
|
1241 |
Create a chunk which can be shared between kernel and user code. |
|
1242 |
||
1243 |
Once created, the chunk owns a region of linear address space of the size requested. |
|
1244 |
This region is empty (uncommitted) so before it can be used either RAM or I/O |
|
1245 |
devices must be mapped into it. This is achieved with the Commit functions: |
|
1246 |
||
1247 |
- Kern::ChunkCommit() |
|
1248 |
- Kern::ChunkCommitContiguous() |
|
1249 |
- Kern::ChunkCommitPhysical() |
|
1250 |
||
1251 |
||
1252 |
@param aCreateInfo A structure containing the required attributes of the chunk. |
|
1253 |
See TChunkCreateInfo. |
|
1254 |
||
1255 |
@param aChunk On return, this is set to point to the created chunk object. |
|
1256 |
This pointer is required as an argument for other functions |
|
1257 |
dealing with Shared Chunks. |
|
1258 |
||
1259 |
@param aKernAddr On return, this is set to the linear address in the kernel |
|
1260 |
process where the chunk's memory starts. This address should |
|
1261 |
only be used when executing kernel code; user code must not |
|
1262 |
use this address. |
|
1263 |
||
1264 |
@param aMapAttr On return, this is set to the mmu mapping attributes used for |
|
1265 |
the chunk. This is a value constructed from the bit masks in |
|
1266 |
the enumeration TMappingAttributes. |
|
1267 |
The typical use for this value is to use it as an argument to |
|
1268 |
the Cache::SyncMemoryBeforeDmaWrite() and |
|
1269 |
Cache::SyncMemoryBeforeDmaRead() methods. |
|
1270 |
||
1271 |
@return KErrNone, if successful; otherwise one of the other system wide error codes. |
|
1272 |
||
1273 |
@pre Calling thread must be in a critical section. |
|
1274 |
@pre No fast mutex can be held. |
|
1275 |
@pre Call in a thread context. |
|
1276 |
@pre Kernel must be unlocked. |
|
1277 |
@pre interrupts enabled |
|
1278 |
||
1279 |
@see TChunkCreateInfo |
|
1280 |
*/ |
|
1281 |
EXPORT_C TInt Kern::ChunkCreate(const TChunkCreateInfo& aInfo, DChunk*& aChunk, TLinAddr& aKernAddr, TUint32& aMapAttr) |
|
1282 |
{ |
|
1283 |
CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Kern::ChunkCreate"); |
|
1284 |
__KTRACE_OPT(KMMU,Kern::Printf("Kern::ChunkCreate type %d, maxSize %08x, mapAttr %08x", aInfo.iType, aInfo.iMaxSize, aInfo.iMapAttr)); |
|
1285 |
SChunkCreateInfo c; |
|
1286 |
switch(aInfo.iType) |
|
1287 |
{ |
|
1288 |
// Assert that chunk type enums are consistent between those used by TChunkCreateInfo and SChunkCreateInfo |
|
1289 |
__ASSERT_COMPILE((TInt)TChunkCreateInfo::ESharedKernelSingle == (TInt)::ESharedKernelSingle); |
|
1290 |
__ASSERT_COMPILE((TInt)TChunkCreateInfo::ESharedKernelMultiple == (TInt)::ESharedKernelMultiple); |
|
1291 |
||
1292 |
case TChunkCreateInfo::ESharedKernelSingle: |
|
1293 |
case TChunkCreateInfo::ESharedKernelMultiple: |
|
1294 |
c.iType = (TChunkType)aInfo.iType; |
|
1295 |
c.iAtt = TChunkCreate::EDisconnected | (aInfo.iOwnsMemory? 0 : TChunkCreate::EMemoryNotOwned); |
|
1296 |
c.iGlobal = ETrue; |
|
1297 |
c.iForceFixed = ETrue; |
|
1298 |
c.iMaxSize = aInfo.iMaxSize; |
|
1299 |
c.iMapAttr = aInfo.iMapAttr; |
|
1300 |
c.iOperations = SChunkCreateInfo::EAdjust; // To allocated virtual address space |
|
1301 |
c.iDestroyedDfc = aInfo.iDestroyedDfc; |
|
1302 |
break; |
|
1303 |
||
1304 |
default: |
|
1305 |
return KErrArgument; |
|
1306 |
} |
|
1307 |
||
1308 |
TInt r = K::TheKernelProcess->NewChunk(aChunk, c, aKernAddr); |
|
1309 |
if(r==KErrNone) |
|
1310 |
aMapAttr = aChunk->iMapAttr; |
|
1311 |
else |
|
1312 |
{ |
|
1313 |
if(aChunk) |
|
1314 |
aChunk->Close(NULL), aChunk=0; // can't have been added so NULL |
|
1315 |
} |
|
1316 |
||
1317 |
__KTRACE_OPT(KMMU,Kern::Printf("Kern::ChunkCreate returns %d aChunk=%08x aKernAddr=%08x",r,aChunk,aKernAddr)); |
|
1318 |
return r; |
|
1319 |
} |
|
1320 |
||
1321 |
||
1322 |
||
1323 |
/** |
|
1324 |
Commit RAM to a shared chunk which was previously created with Kern::ChunkCreate(). |
|
1325 |
The memory pages to commit are obtained from the system's free pool. |
|
1326 |
||
1327 |
This method may only be used if the chunk was created with |
|
1328 |
TChunkCreateInfo::iOwnsMemory set to true. |
|
1329 |
||
1330 |
@param aChunk Pointer to the chunk. |
|
1331 |
||
1332 |
@param aOffset The offset (in bytes) from start of chunk, which indicates the start |
|
1333 |
of the memory region to be committed. Must be a multiple of the MMU |
|
1334 |
page size. |
|
1335 |
||
1336 |
@param aSize Number of bytes to commit. Must be a multiple of the MMU page size. |
|
1337 |
||
1338 |
@return KErrNone, if successful; otherwise one of the other system wide error codes. |
|
1339 |
||
1340 |
@pre Calling thread must be in a critical section. |
|
1341 |
@pre No fast mutex can be held. |
|
1342 |
@pre Call in a thread context. |
|
1343 |
@pre Kernel must be unlocked. |
|
1344 |
@pre interrupts enabled |
|
1345 |
||
1346 |
@post Calling thread is in a critical section. |
|
1347 |
*/ |
|
1348 |
EXPORT_C TInt Kern::ChunkCommit(DChunk* aChunk, TInt aOffset, TInt aSize) |
|
1349 |
{ |
|
1350 |
CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Kern::ChunkCommit"); |
|
1351 |
__KTRACE_OPT(KMMU,Kern::Printf("Kern::ChunkCommit aChunk=%08x, aOffset=%08x, aSize=%08x", aChunk, aOffset, aSize)); |
|
1352 |
__ASSERT_DEBUG(aChunk->iChunkType == ESharedKernelSingle || aChunk->iChunkType == ESharedKernelMultiple, K::Fault(K::EChunkCommitBadType)); |
|
1353 |
TInt r = aChunk->Commit(aOffset, aSize); |
|
1354 |
__KTRACE_OPT(KMMU,Kern::Printf("Kern::ChunkCommit returns %d",r)); |
|
1355 |
return r; |
|
1356 |
} |
|
1357 |
||
1358 |
||
1359 |
/** |
|
1360 |
Commit RAM to a shared chunk which was previously created with Kern::ChunkCreate(). |
|
1361 |
The memory pages to commit are obtained from the system's free pool and will have |
|
1362 |
physically contiguous addresses. |
|
1363 |
||
1364 |
This method may only be used if the chunk was created with |
|
1365 |
TChunkCreateInfo::iOwnsMemory set to true. |
|
1366 |
||
1367 |
@param aChunk Pointer to the chunk. |
|
1368 |
||
1369 |
@param aOffset The offset (in bytes) from start of chunk, which indicates |
|
1370 |
the start of the memory region to be committed. Must be a |
|
1371 |
multiple of the MMU page size. |
|
1372 |
||
1373 |
@param aSize Number of bytes to commit. Must be a multiple of the MMU |
|
1374 |
page size. |
|
1375 |
||
1376 |
@param aPhysicalAddress On return, this is set to the physical address of the first |
|
1377 |
page of memory which was committed. I.e. the page at |
|
1378 |
aOffset. |
|
1379 |
||
1380 |
@return KErrNone, if successful; |
|
1381 |
KErrNoMemory, if there is insufficient free memory, or there is not a |
|
1382 |
contiguous region of the requested size; |
|
1383 |
otherwise one of the other system wide error codes. |
|
1384 |
||
1385 |
@pre Calling thread must be in a critical section. |
|
1386 |
@pre No fast mutex can be held. |
|
1387 |
@pre Call in a thread context. |
|
1388 |
@pre Kernel must be unlocked. |
|
1389 |
@pre interrupts enabled |
|
1390 |
||
1391 |
@post Calling thread is in a critical section. |
|
1392 |
*/ |
|
1393 |
EXPORT_C TInt Kern::ChunkCommitContiguous(DChunk* aChunk, TInt aOffset, TInt aSize, TUint32& aPhysicalAddress) |
|
1394 |
{ |
|
1395 |
CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Kern::ChunkCommitContiguous"); |
|
1396 |
__KTRACE_OPT(KMMU,Kern::Printf("Kern::ChunkCommitContiguous aChunk=%08x, aOffset=%08x, aSize=%08x", aChunk, aOffset, aSize)); |
|
1397 |
__ASSERT_DEBUG(aChunk->iChunkType == ESharedKernelSingle || aChunk->iChunkType == ESharedKernelMultiple, K::Fault(K::EChunkCommitBadType)); |
|
1398 |
TInt r = aChunk->Commit(aOffset, aSize, DChunk::ECommitContiguous, &aPhysicalAddress); |
|
1399 |
__KTRACE_OPT(KMMU,Kern::Printf("Kern::ChunkCommitContiguous returns %d aPhysicalAddress=%08x",r,aPhysicalAddress)); |
|
1400 |
return r; |
|
1401 |
} |
|
1402 |
||
1403 |
||
1404 |
/** |
|
1405 |
Commit memory to a shared chunk which was previously created with |
|
1406 |
Kern::ChunkCreate(). |
|
1407 |
||
1408 |
The physical region committed is that which starts at the supplied physical address. |
|
1409 |
Typically, this region either represents memory mapped i/o, or RAM which has been |
|
1410 |
set aside for special use at system boot time. |
|
1411 |
||
1412 |
This method may only be used if the chunk was created with |
|
1413 |
TChunkCreateInfo::iOwnsMemory set to false. |
|
1414 |
||
1415 |
@param aChunk Pointer to the chunk. |
|
1416 |
||
1417 |
@param aOffset The offset (in bytes) from start of chunk, which indicates |
|
1418 |
the start of the memory region to be committed. Must be a |
|
1419 |
multiple of the MMU page size. |
|
1420 |
||
1421 |
@param aSize Number of bytes to commit. Must be a multiple of the MMU |
|
1422 |
page size. |
|
1423 |
||
1424 |
@param aPhysicalAddress The physical address of the memory to be commited to the |
|
1425 |
chunk. Must be a multiple of the MMU page size. |
|
1426 |
||
1427 |
@return KErrNone, if successful; otherwise one of the other system wide error codes. |
|
1428 |
||
1429 |
@pre Calling thread must be in a critical section. |
|
1430 |
@pre No fast mutex can be held. |
|
1431 |
@pre Call in a thread context. |
|
1432 |
@pre Kernel must be unlocked. |
|
1433 |
@pre interrupts enabled |
|
1434 |
||
1435 |
@post Calling thread is in a critical section. |
|
1436 |
*/ |
|
1437 |
EXPORT_C TInt Kern::ChunkCommitPhysical(DChunk* aChunk, TInt aOffset, TInt aSize, TUint32 aPhysicalAddress) |
|
1438 |
{ |
|
1439 |
CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Kern::ChunkCommitPhysical(DChunk* aChunk, TInt aOffset, TInt aSize, TUint32 aPhysicalAddress)"); |
|
1440 |
__KTRACE_OPT(KMMU,Kern::Printf("Kern::ChunkCommitPhysical aChunk=%08x, aOffset=%08x, aSize=%08x aPhysicalAddress=%08x", aChunk, aOffset, aSize, aPhysicalAddress)); |
|
1441 |
__ASSERT_DEBUG(aChunk->iChunkType == ESharedKernelSingle || aChunk->iChunkType == ESharedKernelMultiple, K::Fault(K::EChunkCommitBadType)); |
|
1442 |
TInt r = aChunk->Commit(aOffset, aSize, DChunk::ECommitContiguousPhysical, (TUint32*)aPhysicalAddress); |
|
1443 |
__KTRACE_OPT(KMMU,Kern::Printf("Kern::ChunkCommit returns %d",r)); |
|
1444 |
return r; |
|
1445 |
} |
|
1446 |
||
1447 |
||
1448 |
/** |
|
1449 |
Commit memory to a shared chunk which was previously created with |
|
1450 |
Kern::ChunkCreate(). |
|
1451 |
||
1452 |
The physical region committed is determined by the list of physical addresses |
|
1453 |
supplied to this function. Typically, this region either represents memory mapped |
|
1454 |
i/o, or RAM which has been set aside for special use at system boot time. |
|
1455 |
||
1456 |
This method may only be used if the chunk was created with |
|
1457 |
TChunkCreateInfo::iOwnsMemory set to false. |
|
1458 |
||
1459 |
@param aChunk Pointer to the chunk. |
|
1460 |
||
1461 |
@param aOffset The offset (in bytes) from start of chunk, which indicates |
|
1462 |
the start of the memory region to be committed. Must be a |
|
1463 |
multiple of the MMU page size. |
|
1464 |
||
1465 |
@param aSize Number of bytes to commit. Must be a multiple of the MMU |
|
1466 |
page size. |
|
1467 |
||
1468 |
@param aPhysicalAddress A pointer to a list of physical addresses, one address for |
|
1469 |
each page of memory committed. Each physical address must be |
|
1470 |
a multiple of the MMU page size. |
|
1471 |
||
1472 |
@return KErrNone, if successful; otherwise one of the other system wide error codes. |
|
1473 |
||
1474 |
@pre Calling thread must be in a critical section. |
|
1475 |
@pre No fast mutex can be held. |
|
1476 |
@pre Call in a thread context. |
|
1477 |
@pre Kernel must be unlocked. |
|
1478 |
@pre interrupts enabled |
|
1479 |
||
1480 |
@post Calling thread is in a critical section. |
|
1481 |
*/ |
|
1482 |
EXPORT_C TInt Kern::ChunkCommitPhysical(DChunk* aChunk, TInt aOffset, TInt aSize, const TUint32* aPhysicalAddressList) |
|
1483 |
{ |
|
1484 |
CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Kern::ChunkCommitPhysical(DChunk* aChunk, TInt aOffset, TInt aSize, const TUint32* aPhysicalAddressList)"); |
|
1485 |
__KTRACE_OPT(KMMU,Kern::Printf("Kern::ChunkCommitPhysical aChunk=%08x, aOffset=%08x, aSize=%08x aPhysicalAddressList=%08x", aChunk, aOffset, aSize, aPhysicalAddressList)); |
|
1486 |
__ASSERT_DEBUG(aChunk->iChunkType == ESharedKernelSingle || aChunk->iChunkType == ESharedKernelMultiple, K::Fault(K::EChunkCommitBadType)); |
|
1487 |
TInt r = aChunk->Commit(aOffset, aSize, DChunk::ECommitDiscontiguousPhysical, (TUint32*)aPhysicalAddressList); |
|
1488 |
__KTRACE_OPT(KMMU,Kern::Printf("Kern::ChunkCommit returns %d",r)); |
|
1489 |
return r; |
|
1490 |
} |
|
1491 |
||
1492 |
||
1493 |
/** |
|
1494 |
Close a chunk created with Kern::ChunkCreate(). |
|
1495 |
||
1496 |
If the reference count of the chunk has gone to zero, any memory |
|
1497 |
committed to the chunk will be decommited immediately but the chunk |
|
1498 |
object will be deleted asynchronously. |
|
1499 |
||
1500 |
@param aChunk Pointer to the chunk. |
|
1501 |
||
1502 |
@return True if the reference count of the chunk has gone to zero. |
|
1503 |
False otherwise. |
|
1504 |
||
1505 |
@pre Calling thread must be in a critical section. |
|
1506 |
@pre No fast mutex can be held. |
|
1507 |
@pre Call in a thread context. |
|
1508 |
@pre Kernel must be unlocked. |
|
1509 |
@pre interrupts enabled |
|
1510 |
*/ |
|
1511 |
EXPORT_C TBool Kern::ChunkClose(DChunk* aChunk) |
|
1512 |
{ |
|
1513 |
__KTRACE_OPT(KMMU,Kern::Printf("Kern::ChunkClose aChunk=%08x", aChunk)); |
|
1514 |
||
1515 |
TBool r = (aChunk->Dec() == 1); |
|
1516 |
__KTRACE_OPT(KMMU,Kern::Printf("Kern::ChunkClose returns %d",r)); |
|
1517 |
if (r) |
|
1518 |
{ |
|
1519 |
// Decommit all the memory from the chunk which is safe as no further |
|
1520 |
// reference can be made to the chunk now its access count is 0. Decommit |
|
1521 |
// is required to ensure any committed pages have their usage count |
|
1522 |
// decremented immediately to allow any physically committed memory to |
|
1523 |
// be freed after this method which may occur before aChunk is deleted. |
|
1524 |
aChunk->Decommit(0, aChunk->MaxSize()); |
|
1525 |
aChunk->AsyncDelete(); |
|
1526 |
} |
|
1527 |
return r; |
|
1528 |
} |
|
1529 |
||
1530 |
||
1531 |
/** |
|
1532 |
Open a shared chunk in which the given address lies. |
|
1533 |
||
1534 |
@param aThread The thread in whose process the given address lies. |
|
1535 |
If aThread is zero, then the current thread is used. |
|
1536 |
||
1537 |
@param aAddress An address in the given threads process. |
|
1538 |
||
1539 |
@param aWrite A flag which is true if the chunk memory is intended to be |
|
1540 |
written to, false otherwise. |
|
1541 |
||
1542 |
@param aOffset On return, this is set to the offset within the chunk which |
|
1543 |
correspons to aAddress. |
|
1544 |
||
1545 |
@return If the supplied address is within a shared chunk mapped into aThread's |
|
1546 |
process, then the returned value is a pointer to this chunk. |
|
1547 |
Otherwise zero is returned. |
|
1548 |
||
1549 |
@pre Calling thread must be in a critical section. |
|
1550 |
@pre No fast mutex can be held. |
|
1551 |
@pre Call in a thread context. |
|
1552 |
@pre Kernel must be unlocked. |
|
1553 |
@pre interrupts enabled |
|
1554 |
||
1555 |
@post If a chunk pointer is returned, then the access count on this chunk has been |
|
1556 |
incremented. I.e. Open() has been called on it. |
|
1557 |
*/ |
|
1558 |
EXPORT_C DChunk* Kern::OpenSharedChunk(DThread* aThread,const TAny* aAddress, TBool aWrite, TInt& aOffset) |
|
1559 |
{ |
|
1560 |
CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Kern::OpenSharedChunk(DThread* aThread,const TAny* aAddress, TBool aWrite, TInt& aOffset)"); |
|
1561 |
__KTRACE_OPT(KMMU,Kern::Printf("Kern::OpenSharedChunk aThread=%08x, aAddress=%08x, aWrite=%d", aThread, aAddress, aWrite)); |
|
1562 |
if(!aThread) |
|
1563 |
aThread = &Kern::CurrentThread(); |
|
1564 |
DChunk* chunk = aThread->OpenSharedChunk(aAddress,aWrite,aOffset); |
|
1565 |
__KTRACE_OPT(KMMU,Kern::Printf("Kern::OpenSharedChunk returns %08x aOffset=%08x",chunk,aOffset)); |
|
1566 |
return chunk; |
|
1567 |
} |
|
1568 |
||
1569 |
||
1570 |
/** |
|
1571 |
Open a shared chunk using a given handle. |
|
1572 |
||
1573 |
Typically, this handle would be a value supplied by a user application which |
|
1574 |
obtained the value by using RChunk::Handle(). |
|
1575 |
||
1576 |
@param aThread The thread in which the given handle is valid. |
|
1577 |
||
1578 |
@param aChunkHandle The handle value. |
|
1579 |
||
1580 |
@param aWrite A flag which is true if the chunk memory is intended to be |
|
1581 |
written to, false otherwise. |
|
1582 |
||
1583 |
@return If the handle is a valid chunk handle, and it is of a shared chunk type, |
|
1584 |
then the returned value is a pointer to this chunk. |
|
1585 |
Otherwise zero is returned. |
|
1586 |
||
1587 |
@pre Calling thread must be in a critical section. |
|
1588 |
@pre No fast mutex can be held. |
|
1589 |
||
1590 |
@post If a chunk pointer is returned, then the access count on this chunk has been |
|
1591 |
incremented. I.e. Open() has been called on it. |
|
1592 |
*/ |
|
1593 |
EXPORT_C DChunk* Kern::OpenSharedChunk(DThread* aThread, TInt aChunkHandle, TBool aWrite) |
|
1594 |
{ |
|
1595 |
CHECK_PRECONDITIONS(MASK_CRITICAL|MASK_NO_FAST_MUTEX,"Kern::OpenSharedChunk(DThread* aThread, TInt aChunkHandle, TBool aWrite)"); |
|
1596 |
__KTRACE_OPT(KMMU,Kern::Printf("Kern::OpenSharedChunk aThread=%08x, aHandle=%08x, aWrite=%d", aThread, aChunkHandle, aWrite)); |
|
1597 |
if(!aThread) |
|
1598 |
aThread = &Kern::CurrentThread(); |
|
1599 |
NKern::LockSystem(); |
|
1600 |
DChunk* chunk = aThread->OpenSharedChunk(aChunkHandle,aWrite); |
|
1601 |
NKern::UnlockSystem(); |
|
1602 |
__KTRACE_OPT(KMMU,Kern::Printf("Kern::OpenSharedChunk returns %08x",chunk)); |
|
1603 |
return chunk; |
|
1604 |
} |
|
1605 |
||
1606 |
||
1607 |
/** |
|
1608 |
@pre System lock must be held. |
|
1609 |
*/ |
|
1610 |
DChunk* DThread::OpenSharedChunk(TInt aHandle, TBool /*aWrite*/) |
|
1611 |
{ |
|
1612 |
DChunk* chunk = (DChunk*)ObjectFromHandle(aHandle,EChunk); |
|
1613 |
if( chunk |
|
1614 |
&& (chunk->iChunkType==ESharedKernelSingle || chunk->iChunkType==ESharedKernelMultiple) |
|
1615 |
&& chunk->Open()==KErrNone) |
|
1616 |
return chunk; |
|
1617 |
return 0; |
|
1618 |
} |
|
1619 |
||
1620 |
||
1621 |
/** |
|
1622 |
Get the linear address for a region in a shared chunk. |
|
1623 |
||
1624 |
The chunk must be of a shared chunk type and the specified region must |
|
1625 |
contain committed memory. |
|
1626 |
||
1627 |
@param aChunk The chunk |
|
1628 |
||
1629 |
@param aOffset The start of the region as an offset in bytes from the |
|
1630 |
start of the chunk. |
|
1631 |
||
1632 |
@param aSize The size of the region in bytes. |
|
1633 |
||
1634 |
@param aKernelAddress On success, this value is set to the linear address in the |
|
1635 |
kernel process which coresponds to first byte in the |
|
1636 |
specified region. |
|
1637 |
||
1638 |
@return KErrNone if successful. |
|
1639 |
KErrAccessDenied if the chunk isn't a shared chunk type. |
|
1640 |
KErrArgument if the region isn't within the chunk. |
|
1641 |
KErrNotFound if the whole region doesn't contain comitted memory. |
|
1642 |
||
1643 |
@pre No fast mutex can be held. |
|
1644 |
*/ |
|
1645 |
EXPORT_C TInt Kern::ChunkAddress(DChunk* aChunk, TInt aOffset, TInt aSize, TLinAddr& aKernelAddress) |
|
1646 |
{ |
|
1647 |
CHECK_PRECONDITIONS(MASK_NO_FAST_MUTEX,"Kern::ChunkAddress"); |
|
1648 |
__KTRACE_OPT(KMMU,Kern::Printf("Kern::ChunkAddress aChunk=%08x, aOffset=%08x, aSize=%d", aChunk, aOffset, aSize)); |
|
1649 |
TInt r = aChunk->Address(aOffset,aSize,aKernelAddress); |
|
1650 |
__KTRACE_OPT(KMMU,Kern::Printf("Kern::ChunkAddress returns %d aKernelAddress=%08x",r,aKernelAddress)); |
|
1651 |
return r; |
|
1652 |
} |
|
1653 |
||
1654 |
||
1655 |
/** |
|
1656 |
Get the linear address for the base of a shared chunk within a user process. |
|
1657 |
Note, this address may become invalid if the process closes then re-opens the chunk. |
|
1658 |
||
1659 |
@param aChunk The chunk |
|
1660 |
||
1661 |
@param aThread The thread in whose process the returned address lies. |
|
1662 |
||
1663 |
@return the base address of the shared chunk in the specified user process. |
|
1664 |
||
1665 |
@pre No fast mutex can be held. |
|
1666 |
*/ |
|
1667 |
EXPORT_C TUint8* Kern::ChunkUserBase(DChunk* aChunk, DThread* aThread) |
|
1668 |
{ |
|
8
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
1669 |
CHECK_PRECONDITIONS(MASK_NO_FAST_MUTEX,"Kern::ChunkUserBase"); |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
1670 |
__KTRACE_OPT(KMMU,Kern::Printf("Kern::ChunkUserBase aChunk=%08x, aThread=%08x", aChunk, aThread)); |
0 | 1671 |
NKern::LockSystem(); |
1672 |
TUint8* r = aChunk->Base(aThread->iOwningProcess); |
|
1673 |
NKern::UnlockSystem(); |
|
1674 |
__KTRACE_OPT(KMMU,Kern::Printf("Kern::ChunkUserBase returns %08x", r)); |
|
1675 |
return r; |
|
1676 |
} |
|
1677 |
||
1678 |
||
1679 |
||
1680 |
/** |
|
1681 |
Get the physical address for a region in a shared chunk. |
|
1682 |
||
1683 |
The chunk must be of a shared chunk type and the specified region must |
|
1684 |
contain committed memory. |
|
1685 |
||
1686 |
@param aChunk The chunk |
|
1687 |
||
1688 |
@param aOffset The start of the region as an offset in bytes from the |
|
1689 |
start of the chunk. |
|
1690 |
||
1691 |
@param aSize The size of the region in bytes. |
|
1692 |
||
1693 |
@param aKernelAddress On success, this value is set to the linear address in the |
|
1694 |
kernel process which coresponds to first byte in the |
|
1695 |
specified region. |
|
1696 |
||
1697 |
@param aMapAttr On success, this is set to the mmu mapping attributes used |
|
1698 |
for the chunk. This is a value constructed from the bit |
|
1699 |
masks in the enumeration TMappingAttributes. |
|
1700 |
The typical use for this value is to use it as an argument |
|
1701 |
to the Cache::SyncMemoryBeforeDmaWrite() and |
|
1702 |
Cache::SyncMemoryBeforeDmaRead() methods. |
|
1703 |
||
1704 |
@param aPhysicalAddress On success, this value is set to one of two values. |
|
1705 |
If the specified region is physically contiguous, the value |
|
1706 |
is the physical address of the first byte in the region. |
|
1707 |
If the region is discontiguous, the value is set to KPhysAddrInvalid. |
|
1708 |
||
1709 |
@param aPageList If not zero, this points to an array of TUint32 |
|
1710 |
(or TPhysAddr) objects. The length of the array must be at |
|
1711 |
least (aSize + MMU_page_size-2)/MMU_page_size + 1, |
|
1712 |
where MMU_page_size = Kern::RoundToPageSize(1). |
|
1713 |
On success, this array will be filled with the addresses of |
|
1714 |
the physical pages which contain the specified region. |
|
1715 |
These addresses are the start of each page, (they are a |
|
1716 |
multiple of the physical page size), therefore the byte |
|
1717 |
corresponding to aOffset is at physical address |
|
1718 |
aPageList[0]+aOffset%MMU_page_size. |
|
1719 |
If aPageList is zero (the default), then the function will fail with |
|
1720 |
KErrNotFound if the specified region is not physically contiguous. |
|
1721 |
||
1722 |
@return 0 if successful and the whole region is physically contiguous. |
|
1723 |
1 if successful but the region isn't physically contiguous. |
|
1724 |
KErrAccessDenied if the chunk isn't a shared chunk type. |
|
1725 |
KErrArgument if the region isn't within the chunk. |
|
1726 |
KErrNotFound if the whole region doesn't contain comitted memory |
|
1727 |
or aPageList==0 and the specified region is not physically contiguous. |
|
1728 |
||
1729 |
@pre No fast mutex can be held. |
|
1730 |
*/ |
|
1731 |
EXPORT_C TInt Kern::ChunkPhysicalAddress(DChunk* aChunk, TInt aOffset, TInt aSize, TLinAddr& aKernelAddress, TUint32& aMapAttr, TUint32& aPhysicalAddress, TUint32* aPageList) |
|
1732 |
{ |
|
1733 |
CHECK_PRECONDITIONS(MASK_NO_FAST_MUTEX,"Kern::ChunkPhysicalAddress"); |
|
1734 |
__KTRACE_OPT(KMMU,Kern::Printf("Kern::ChunkPhysicalAddress aChunk=%08x, aOffset=%08x, aSize=%d", aChunk, aOffset, aSize)); |
|
1735 |
TInt r = aChunk->PhysicalAddress(aOffset,aSize,aKernelAddress,aPhysicalAddress,aPageList); |
|
1736 |
if(r >= 0) /* KErrNone or 1 (i.e. not phys contig) are both successful returns */ |
|
1737 |
aMapAttr = aChunk->iMapAttr; |
|
1738 |
__KTRACE_OPT(KMMU,Kern::Printf("Kern::ChunkPhysicalAddress returns %d aKernelAddress=%08x aPhysicalAddress=%08x",r,aKernelAddress,aPhysicalAddress)); |
|
1739 |
return r; |
|
1740 |
} |
|
1741 |
||
1742 |
/** |
|
1743 |
Returns the list of physical addresses of the specified virtual memory region - if the whole region is safe |
|
1744 |
to DMA to/from. A region of virtual space is considered safe if it belongs to a chunk which is marked by |
|
1745 |
FileServer as being trusted to perform DMA to/from. File system must ensure physical pages are not |
|
1746 |
decomitted or unlocked for demand paging until DMA transfer is completed. |
|
1747 |
This will also lock the pages to prevent them of being moved by ram defrag. |
|
1748 |
||
1749 |
@see UserSvr::RegisterTrustedChunk |
|
1750 |
@see Kern::ReleaseMemoryFromDMA |
|
1751 |
||
1752 |
@param aThread The thread in whose process the given address lies. If zero, the current thread is used. |
|
22
2f92ad2dc5db
Revision: 201013
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
8
diff
changeset
|
1753 |
@param aAddress An address in the given thread's process. |
0 | 1754 |
@param aSize The size of the region. |
1755 |
@param aPageList Points to an array of TUint32 (or TPhysAddr) objects. The length of the array must |
|
1756 |
be at least aSize/MMU_page_size+1, where MMU_page_size = Kern::RoundToPageSize(1). |
|
1757 |
On success, this array will be filled with the addresses of the physical pages |
|
1758 |
which contain the specified region. These addresses are the start of each page, |
|
1759 |
(they are a multiple of the physical page size), therefore the byte corresponding |
|
1760 |
to aOffset is at physical address aPageList[0]+aOffset%MMU_page_size. |
|
1761 |
||
22
2f92ad2dc5db
Revision: 201013
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
8
diff
changeset
|
1762 |
@return KErrNone if successful. |
0 | 1763 |
KErrAccessDenied if any part of region doesn't belong to "trusted" chunk. |
1764 |
Other if memory region is invalid, or mapped in 1MB sections or large pages. |
|
1765 |
||
1766 |
@pre Calling thread must be in critical section. |
|
1767 |
@pre No fast mutex held. |
|
1768 |
||
1769 |
@internalComponent |
|
1770 |
*/ |
|
1771 |
EXPORT_C TInt Kern::PrepareMemoryForDMA(DThread* aThread, TAny* aAddress, TInt aSize, TPhysAddr* aPageList) |
|
1772 |
{ |
|
1773 |
CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Kern::PrepareMemoryForDMA"); |
|
1774 |
__KTRACE_OPT(KMMU2,Kern::Printf("Kern::PrepareMemoryForDMA T:%x, A:%x, S:%x, Prt:%x",aThread, aAddress, aSize, aPageList)); |
|
1775 |
if(!aThread) |
|
1776 |
aThread = &Kern::CurrentThread(); |
|
1777 |
return aThread->PrepareMemoryForDMA(aAddress, aSize, aPageList); |
|
1778 |
} |
|
1779 |
||
1780 |
/** |
|
1781 |
Unlocks the physical pages that have been locked by PrepareMemoryForDMA. |
|
22
2f92ad2dc5db
Revision: 201013
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
8
diff
changeset
|
1782 |
All input parameters are the same as those in PrepareMemoryForDMA. |
0 | 1783 |
|
1784 |
@see Kern::PrepareMemoryForDMA |
|
1785 |
||
1786 |
@param aThread The thread in whose process the given address lies. If zero, the current thread is used. |
|
22
2f92ad2dc5db
Revision: 201013
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
8
diff
changeset
|
1787 |
@param aAddress An address in the given thread's process. |
0 | 1788 |
@param aSize The size of the region. |
1789 |
@param aPageList Points to the list of pages returned by PrepareMemoryForDMA. |
|
1790 |
||
22
2f92ad2dc5db
Revision: 201013
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
8
diff
changeset
|
1791 |
@return KErrNone if successful. |
0 | 1792 |
KErrArgument if the list of physical pages is invalid. |
1793 |
||
1794 |
@pre Calling thread must be in critical section. |
|
1795 |
@pre No fast mutex held. |
|
1796 |
||
1797 |
@internalComponent |
|
1798 |
*/ |
|
1799 |
EXPORT_C TInt Kern::ReleaseMemoryFromDMA(DThread* aThread, TAny* aAddress, TInt aSize, TPhysAddr* aPageList) |
|
1800 |
{ |
|
1801 |
CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Kern::ReleaseMemoryFromDMA"); |
|
1802 |
__KTRACE_OPT(KMMU2,Kern::Printf("Kern::ReleaseMemoryFromDMA T:%x, A:%x, S:%x, Prt:%x",aThread, aAddress, aSize, aPageList)); |
|
1803 |
if(!aThread) |
|
1804 |
aThread = &Kern::CurrentThread(); |
|
1805 |
return aThread->ReleaseMemoryFromDMA(aAddress, aSize, aPageList); |
|
1806 |
} |
|
1807 |
||
1808 |
/** |
|
1809 |
Installs Trace Handler Hook. |
|
1810 |
@param aHandler Trace Handler Hook. Will be called on any debug log .It includes user-side |
|
1811 |
(@see RDebug::Print), kernel (@see Kern::Printf) and PlatSec logging. |
|
1812 |
Set to NULL to uninstall the hook. |
|
1813 |
@see TTraceHandler |
|
1814 |
@return Previous hook or NULL. |
|
1815 |
*/ |
|
1816 |
EXPORT_C TTraceHandler Kern::SetTraceHandler(TTraceHandler aHandler) |
|
1817 |
{ |
|
1818 |
return (TTraceHandler) SetHook(EHookTrace, (TKernelHookFn)aHandler, ETrue); |
|
1819 |
} |
|
1820 |
||
1821 |
/******************************************** |
|
1822 |
* Kernel event dispatcher |
|
1823 |
********************************************/ |
|
1824 |
||
1825 |
/** Returns whether the kernel has been built with __DEBUGGER_SUPPORT__ defined. */ |
|
1826 |
||
1827 |
EXPORT_C TBool DKernelEventHandler::DebugSupportEnabled() |
|
1828 |
{ |
|
1829 |
#ifdef __DEBUGGER_SUPPORT__ |
|
1830 |
return ETrue; |
|
1831 |
#else |
|
1832 |
return EFalse; |
|
1833 |
#endif |
|
1834 |
} |
|
1835 |
||
1836 |
||
1837 |
/** Constructs an event handler. |
|
1838 |
The handler is not queued. |
|
1839 |
@param aCb Pointer to C callback function called when an event occurs. |
|
1840 |
@param aPrivateData Data to be passed to the callback function. |
|
1841 |
||
1842 |
@pre Calling thread must be in a critical section. |
|
1843 |
@pre No fast mutex can be held. |
|
1844 |
@pre Call in a thread context. |
|
1845 |
@pre Kernel must be unlocked |
|
1846 |
@pre interrupts enabled |
|
1847 |
*/ |
|
1848 |
||
1849 |
EXPORT_C DKernelEventHandler::DKernelEventHandler(TCallback aCb, TAny* aPrivateData) |
|
1850 |
: iAccessCount(1), |
|
1851 |
iCb(aCb), |
|
1852 |
iPrivateData(aPrivateData) |
|
1853 |
{ |
|
1854 |
CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"DKernelEventHandler::DKernelEventHandler"); |
|
1855 |
__ASSERT_DEBUG(aCb != NULL, K::Fault(K::EDebugEventHandlerBadCallBack)); |
|
1856 |
} |
|
1857 |
||
1858 |
/** Adds handler in handler queue. |
|
1859 |
@param aPolicy Selects where the handler should be inserted. |
|
1860 |
@return standard error code |
|
1861 |
||
1862 |
@pre No fast mutex can be held. |
|
1863 |
@pre Call in a thread context. |
|
1864 |
@pre Kernel must be unlocked |
|
1865 |
@pre interrupts enabled |
|
1866 |
*/ |
|
1867 |
||
1868 |
EXPORT_C TInt DKernelEventHandler::Add(TAddPolicy /*aPolicy*/) |
|
1869 |
{ |
|
1870 |
CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"DKernelEventHandler::Add"); |
|
1871 |
NKern::LockSystem(); |
|
1872 |
if (!iLink.iNext) |
|
1873 |
{ |
|
1874 |
HandlersQ.Add(&iLink); |
|
1875 |
} |
|
1876 |
NKern::UnlockSystem(); |
|
1877 |
return KErrNone; |
|
1878 |
} |
|
1879 |
||
1880 |
/** Decrements access count. |
|
1881 |
Removes from queue and asynchronously destruct it if access count reaches zero. |
|
1882 |
@return original access count. |
|
1883 |
||
1884 |
@pre Calling thread must be in a critical section. |
|
1885 |
@pre No fast mutex can be held. |
|
1886 |
@pre Call in a thread context. |
|
1887 |
@pre Kernel must be unlocked |
|
1888 |
@pre interrupts enabled |
|
1889 |
*/ |
|
1890 |
||
1891 |
EXPORT_C TInt DKernelEventHandler::Close() |
|
1892 |
{ |
|
1893 |
CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"DKernelEventHandler::Close"); |
|
1894 |
NKern::LockSystem(); |
|
1895 |
TInt r=iAccessCount; |
|
1896 |
if (r>0 && --iAccessCount==0) |
|
1897 |
{ |
|
1898 |
if (iLink.iNext) |
|
1899 |
{ |
|
1900 |
iLink.Deque(); |
|
1901 |
iLink.iNext=NULL; |
|
1902 |
} |
|
1903 |
} |
|
1904 |
NKern::UnlockSystem(); |
|
1905 |
if (r==1) |
|
1906 |
AsyncDelete(); |
|
1907 |
return r; |
|
1908 |
} |
|
1909 |
||
1910 |
/** Sends event to all handlers in queue. |
|
1911 |
@internalTechnology |
|
1912 |
*/ |
|
1913 |
||
1914 |
TUint DKernelEventHandler::Dispatch(TKernelEvent aType, TAny* a1, TAny* a2) |
|
1915 |
{ |
|
1916 |
TUint action = ERunNext; |
|
1917 |
NKern::ThreadEnterCS(); |
|
1918 |
NKern::LockSystem(); |
|
1919 |
SDblQueLink* pE=&HandlersQ.iA; |
|
1920 |
SDblQueLink* pL=pE->iNext; |
|
1921 |
DKernelEventHandler* pD=NULL; |
|
1922 |
while (pL!=pE) |
|
1923 |
{ |
|
1924 |
DKernelEventHandler* pH=_LOFF(pL,DKernelEventHandler,iLink); |
|
1925 |
++pH->iAccessCount; |
|
1926 |
NKern::UnlockSystem(); |
|
1927 |
if (pD) |
|
1928 |
{ |
|
1929 |
pD->AsyncDelete(); |
|
1930 |
pD=NULL; |
|
1931 |
} |
|
1932 |
action=pH->iCb(aType, a1, a2, pH->iPrivateData); |
|
1933 |
NKern::LockSystem(); |
|
1934 |
SDblQueLink* pN=pL->iNext; |
|
1935 |
if (--pH->iAccessCount==0) |
|
1936 |
{ |
|
1937 |
pL->Deque(); |
|
1938 |
pL->iNext=NULL; |
|
1939 |
pD=pH; |
|
1940 |
} |
|
1941 |
if (!(action & ERunNext)) |
|
1942 |
break; |
|
1943 |
pL=pN; |
|
1944 |
} |
|
1945 |
NKern::UnlockSystem(); |
|
1946 |
if (pD) |
|
1947 |
pD->AsyncDelete(); |
|
1948 |
NKern::ThreadLeaveCS(); |
|
1949 |
return action; |
|
1950 |
} |
|
1951 |
||
1952 |
||
1953 |
/****************************************************************************** |
|
1954 |
* Memory saving fast deterministic thread wait queue |
|
1955 |
******************************************************************************/ |
|
1956 |
TThreadWaitList::TList* TThreadWaitList::FirstFree; |
|
1957 |
TInt TThreadWaitList::NLists; |
|
1958 |
TInt TThreadWaitList::NWaitObj; |
|
1959 |
TInt TThreadWaitList::NThrd; |
|
1960 |
||
1961 |
TThreadWaitList::~TThreadWaitList() |
|
1962 |
{ |
|
1963 |
CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"~TThreadWaitList"); |
|
1964 |
__ASSERT_ALWAYS((iWaitPtr==EEmpty || iWaitPtr==EInitValue), K::Fault(K::EThreadWaitListDestroy)); |
|
1965 |
if (iWaitPtr==EEmpty) |
|
1966 |
Down(EFalse); |
|
1967 |
} |
|
1968 |
||
1969 |
TInt TThreadWaitList::Construct() |
|
1970 |
{ |
|
1971 |
CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"TThreadWaitList::Construct"); |
|
1972 |
TInt r = Up(EFalse); |
|
1973 |
if (r==KErrNone) |
|
1974 |
iWaitPtr = EEmpty; |
|
1975 |
return r; |
|
1976 |
} |
|
1977 |
||
1978 |
TInt TThreadWaitList::ThreadCreated() |
|
1979 |
{ |
|
1980 |
CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"TThreadWaitList::ThreadCreated"); |
|
1981 |
return Up(ETrue); |
|
1982 |
} |
|
1983 |
||
1984 |
void TThreadWaitList::ThreadDestroyed() |
|
1985 |
{ |
|
1986 |
CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"TThreadWaitList::ThreadDestroyed"); |
|
1987 |
Down(ETrue); |
|
1988 |
} |
|
1989 |
||
1990 |
TInt TThreadWaitList::Up(TBool aThread) |
|
1991 |
{ |
|
1992 |
TList* l = 0; |
|
1993 |
TInt r = 1; |
|
1994 |
do { |
|
1995 |
NKern::LockSystem(); |
|
1996 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf(">TThreadWaitList::Up W=%d T=%d L=%d l=%08x", NWaitObj, NThrd, NLists, l)); |
|
1997 |
TInt nw=NWaitObj, nt=NThrd; |
|
1998 |
aThread ? ++nt : ++nw; |
|
1999 |
TInt needed = Min(nt/2, nw); |
|
2000 |
if (needed<=NLists) |
|
2001 |
goto done; |
|
2002 |
else if (l) |
|
2003 |
{ |
|
2004 |
++NLists; |
|
2005 |
l->Next() = FirstFree; |
|
2006 |
FirstFree = l; |
|
2007 |
l = 0; |
|
2008 |
done: |
|
2009 |
NThrd=nt, NWaitObj=nw; |
|
2010 |
r = KErrNone; |
|
2011 |
} |
|
2012 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("<TThreadWaitList::Up W=%d T=%d L=%d l=%08x", NWaitObj, NThrd, NLists, l)); |
|
2013 |
NKern::UnlockSystem(); |
|
2014 |
if (r!=KErrNone) |
|
2015 |
{ |
|
2016 |
__ASSERT_ALWAYS(!l, K::Fault(K::EThreadWaitListUp)); |
|
2017 |
l = new TList; |
|
2018 |
if (!l) |
|
2019 |
r = KErrNoMemory; |
|
2020 |
} |
|
2021 |
} while (r>0); |
|
2022 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("<TThreadWaitList::Up l=%08x r=%d", l, r)); |
|
2023 |
delete l; |
|
2024 |
return r; |
|
2025 |
} |
|
2026 |
||
2027 |
void TThreadWaitList::Down(TBool aThread) |
|
2028 |
{ |
|
2029 |
TList* l = 0; |
|
2030 |
NKern::LockSystem(); |
|
2031 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf(">TThreadWaitList::Down W=%d T=%d L=%d", NWaitObj, NThrd, NLists)); |
|
2032 |
aThread ? --NThrd : --NWaitObj; |
|
2033 |
TInt needed = Min(NThrd/2, NWaitObj); |
|
2034 |
if (needed < NLists) |
|
2035 |
{ |
|
2036 |
--NLists; |
|
2037 |
l = FirstFree; |
|
2038 |
FirstFree = l->Next(); |
|
2039 |
} |
|
2040 |
__KTRACE_OPT(KSEMAPHORE,Kern::Printf("<TThreadWaitList::Down W=%d T=%d L=%d l=%08x", NWaitObj, NThrd, NLists, l)); |
|
2041 |
NKern::UnlockSystem(); |
|
2042 |
delete l; |
|
2043 |
} |
|
2044 |
||
2045 |
#ifndef __PRI_LIST_MACHINE_CODED__ |
|
2046 |
DThread* TThreadWaitList::First() const |
|
2047 |
{ |
|
2048 |
__DEBUG_ONLY(Check()); |
|
2049 |
if (iWaitPtr == EEmpty) |
|
2050 |
return 0; |
|
2051 |
if (!(iWaitPtr & EFlagList)) |
|
2052 |
return (DThread*)iWaitPtr; |
|
2053 |
TList* l = (TList*)(iWaitPtr&~EFlagList); |
|
2054 |
return _LOFF(l->First(),DThread,iWaitLink); |
|
2055 |
} |
|
2056 |
||
2057 |
TInt TThreadWaitList::HighestPriority() const |
|
2058 |
{ |
|
2059 |
__DEBUG_ONLY(Check()); |
|
2060 |
if (iWaitPtr==EEmpty) |
|
2061 |
return -1; |
|
2062 |
if (!(iWaitPtr & EFlagList)) |
|
2063 |
return ((DThread*)iWaitPtr)->iWaitLink.iPriority; |
|
2064 |
TList* l = (TList*)(iWaitPtr&~EFlagList); |
|
2065 |
return l->HighestPriority(); |
|
2066 |
} |
|
2067 |
||
2068 |
void TThreadWaitList::Add(DThread* aThread) |
|
2069 |
{ |
|
2070 |
__DEBUG_ONLY(Check()); |
|
2071 |
if (iWaitPtr==EEmpty) |
|
2072 |
{ |
|
2073 |
iWaitPtr = (TLinAddr)aThread; |
|
2074 |
return; |
|
2075 |
} |
|
2076 |
TList* l = 0; |
|
2077 |
if (iWaitPtr&EFlagList) |
|
2078 |
l = (TList*)(iWaitPtr&~EFlagList); |
|
2079 |
else |
|
2080 |
{ |
|
2081 |
DThread* t0 = (DThread*)iWaitPtr; |
|
2082 |
l = FirstFree; |
|
2083 |
FirstFree = l->Next(); |
|
2084 |
l->Next() = 0; |
|
2085 |
iWaitPtr = TLinAddr(l)|EFlagList; |
|
2086 |
l->Add(&t0->iWaitLink); |
|
2087 |
} |
|
2088 |
l->Add(&aThread->iWaitLink); |
|
2089 |
} |
|
2090 |
||
2091 |
void TThreadWaitList::Remove(DThread* aThread) |
|
2092 |
{ |
|
2093 |
__DEBUG_ONLY(Check()); |
|
2094 |
__ASSERT_DEBUG(iWaitPtr!=EEmpty && ((iWaitPtr&EFlagList)||(iWaitPtr==(TLinAddr)aThread)), K::Fault(K::EThreadWaitListRemove)); |
|
2095 |
if (!(iWaitPtr&EFlagList)) |
|
2096 |
{ |
|
2097 |
iWaitPtr = EEmpty; |
|
2098 |
return; |
|
2099 |
} |
|
2100 |
TList* l = (TList*)(iWaitPtr&~EFlagList); |
|
2101 |
l->Remove(&aThread->iWaitLink); |
|
2102 |
TUint p0 = l->iPresent[0]; |
|
2103 |
TUint p1 = l->iPresent[1]; |
|
2104 |
if ((p0&&p1) || (p0&(p0-1)) || (p1&(p1-1))) |
|
2105 |
return; |
|
2106 |
TPriListLink* wl = l->First(); |
|
2107 |
__ASSERT_DEBUG(wl, K::Fault(K::EThreadWaitListRemove2)); |
|
2108 |
if (wl->iNext != wl) |
|
2109 |
return; |
|
2110 |
DThread* t = _LOFF(wl,DThread,iWaitLink); |
|
2111 |
l->Remove(&t->iWaitLink); |
|
2112 |
iWaitPtr = (TLinAddr)t; |
|
2113 |
l->Next() = FirstFree; |
|
2114 |
FirstFree = l; |
|
2115 |
} |
|
2116 |
||
2117 |
void TThreadWaitList::ChangePriority(DThread* aThread, TInt aNewPriority) |
|
2118 |
{ |
|
2119 |
__DEBUG_ONLY(Check()); |
|
2120 |
__ASSERT_DEBUG(iWaitPtr!=EEmpty && ((iWaitPtr&EFlagList)||(iWaitPtr==(TLinAddr)aThread)), K::Fault(K::EThreadWaitListChangePriority)); |
|
2121 |
if (!(iWaitPtr & EFlagList)) |
|
2122 |
aThread->iWaitLink.iPriority = (TUint8)aNewPriority; |
|
2123 |
else |
|
2124 |
{ |
|
2125 |
TList* l = (TList*)(iWaitPtr&~EFlagList); |
|
2126 |
l->ChangePriority(&aThread->iWaitLink, aNewPriority); |
|
2127 |
} |
|
2128 |
} |
|
2129 |
#endif |
|
2130 |
||
2131 |
#ifdef _DEBUG |
|
2132 |
// Check the system lock is held and second phase construction has completed |
|
2133 |
// successfully |
|
2134 |
void TThreadWaitList::Check() const |
|
2135 |
{ |
|
2136 |
__ASSERT_SYSTEM_LOCK; |
|
2137 |
TUint32 mask = RHeap::ECellAlignment-1; |
|
2138 |
TUint32 lowbits = iWaitPtr & mask; |
|
2139 |
__ASSERT_ALWAYS(lowbits<=1, K::Fault(K::EThreadWaitListCheck)); |
|
2140 |
} |
|
2141 |
#endif |