author | hgs |
Thu, 10 Jun 2010 11:48:01 +0100 | |
changeset 149 | d9f1e5bfe28c |
parent 36 | 538db54a451d |
permissions | -rw-r--r-- |
0 | 1 |
// Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies). |
2 |
// All rights reserved. |
|
3 |
// This component and the accompanying materials are made available |
|
4 |
// under the terms of the License "Eclipse Public License v1.0" |
|
5 |
// which accompanies this distribution, and is available |
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 |
// |
|
8 |
// Initial Contributors: |
|
9 |
// Nokia Corporation - initial contribution. |
|
10 |
// |
|
11 |
// Contributors: |
|
12 |
// |
|
13 |
// Description: |
|
14 |
// e32/kernel/sshbuf.cpp |
|
15 |
// Shareable Data Buffers |
|
16 |
||
17 |
#include <kernel/sshbuf.h> |
|
18 |
#include <kernel/cache.h> |
|
19 |
#include "execs.h" |
|
20 |
#include <kernel/smap.h> |
|
21 |
||
22 |
/******************************************** |
|
23 |
* Kernel-side executive calls |
|
24 |
********************************************/ |
|
25 |
||
26 |
TInt ExecHandler::ShPoolCreate(const TShPoolInfo& aInfo, TUint aFlags) |
|
27 |
{ |
|
28 |
__KTRACE_OPT(KEXEC, Kern::Printf(">Exec::ShPoolCreate")); |
|
29 |
||
30 |
const TUint acceptableFlags = EShPoolWriteable | EShPoolAllocate; |
|
31 |
TUint validatedFlags = aFlags & acceptableFlags; |
|
32 |
aFlags &= ~acceptableFlags; |
|
33 |
||
34 |
if (aFlags != 0) |
|
35 |
{ |
|
36 |
Kern::Printf("Exec::ShPoolCreate: flags 0x%x after 0x%x", aFlags, validatedFlags); |
|
37 |
// SBZ bit set in flags passed to the exec call: panic the caller. |
|
38 |
K::PanicKernExec(EShBufExecBadParameter); |
|
39 |
} |
|
40 |
||
41 |
TShPoolCreateInfo uinfo; |
|
42 |
||
43 |
kumemget32(&uinfo.iInfo, &aInfo, sizeof(uinfo.iInfo)); |
|
44 |
||
45 |
NKern::ThreadEnterCS(); |
|
46 |
DShPool* pC = NULL; |
|
47 |
TInt r = K::ShPoolCreate(pC, uinfo); // should call the ShPoolCreate method on process |
|
48 |
||
49 |
if (r == KErrNone) |
|
50 |
{ |
|
51 |
// The flags are passed down as attributes to RequestUserHandle, Add, and AddToProcess |
|
52 |
r = K::MakeHandle(EOwnerProcess, pC, validatedFlags); // this will add the pool to the process |
|
53 |
||
54 |
if (r < KErrNone && pC) |
|
55 |
pC->Close(NULL); // can't have been added so NULL |
|
56 |
} |
|
57 |
||
58 |
NKern::ThreadLeaveCS(); |
|
59 |
||
60 |
__KTRACE_OPT(KEXEC, Kern::Printf("<Exec::ShPoolCreate returns %d", r)); |
|
61 |
return r; |
|
62 |
} |
|
63 |
||
64 |
TInt K::ShPoolCreate(DShPool*& aPool, TShPoolCreateInfo& aInfo) |
|
65 |
{ |
|
66 |
__KTRACE_OPT(KEXEC,Kern::Printf(">K::ShPoolCreate")); |
|
67 |
||
68 |
DThread* pT=TheCurrentThread; |
|
69 |
DProcess* pP=pT->iOwningProcess; |
|
70 |
||
71 |
TInt r = pP->NewShPool(aPool, aInfo); |
|
72 |
||
73 |
if (r != KErrNone) |
|
74 |
aPool = NULL; |
|
75 |
||
76 |
__KTRACE_OPT(KEXEC, Kern::Printf("<K::ShPoolCreate returns %d %08x", r, aPool)); |
|
77 |
return r; |
|
78 |
} |
|
79 |
||
80 |
TInt DShPool::ModifyClientFlags(DProcess* aProcess, TUint aSetMask, TUint aClearMask) |
|
81 |
{ |
|
82 |
LockPool(); |
|
83 |
DShPoolClient* client = reinterpret_cast<DShPoolClient*>(iClientMap->Find(reinterpret_cast<TUint>(aProcess))); |
|
84 |
||
85 |
if ((client != NULL) && client->iAccessCount) |
|
86 |
{ |
|
87 |
// access count must be non-zero otherwise the pool is in the process of being closed |
|
88 |
client->iFlags &= ~aClearMask; |
|
89 |
client->iFlags |= aSetMask; |
|
90 |
} |
|
91 |
UnlockPool(); |
|
92 |
||
93 |
return KErrNone; |
|
94 |
} |
|
95 |
||
96 |
TInt ExecHandler::ShPoolAlloc(TInt aHandle, TUint aFlags, SShBufBaseAndSize& aBaseAndSize) |
|
97 |
{ |
|
98 |
__KTRACE_OPT(KEXEC, Kern::Printf(">Exec::ShPoolAlloc (0x%08x, 0x%x)", aHandle, aFlags)); |
|
99 |
TUint attr = 0; |
|
100 |
DShBuf* pC = NULL; |
|
101 |
||
102 |
NKern::LockSystem(); |
|
103 |
DShPool* pool = reinterpret_cast<DShPool*>(K::ObjectFromHandle(aHandle, EShPool, attr)); |
|
104 |
/* K::ObjectFromHandle will panic on NULL */ |
|
105 |
||
106 |
pool->CheckedOpen(); |
|
107 |
||
108 |
NKern::ThreadEnterCS(); |
|
109 |
NKern::UnlockSystem(); |
|
110 |
||
111 |
TInt r = KErrAccessDenied; // for the case that (attr & EShPoolAllocate) == 0 |
|
112 |
SShBufBaseAndSize bs; |
|
113 |
||
114 |
if ((attr & EShPoolAllocate)) |
|
115 |
{ |
|
116 |
r = pool->Alloc(pC); |
|
117 |
||
118 |
if (r == KErrNone) |
|
119 |
{ |
|
120 |
attr |= RObjectIx::EReserved; |
|
121 |
||
122 |
if (aFlags & EShPoolAllocNoMap) |
|
123 |
{ |
|
124 |
attr |= EShPoolNoMapBuf; |
|
125 |
} |
|
126 |
||
127 |
r=K::MakeHandle(EOwnerProcess, pC, attr); // this will add the buffer to the process |
|
128 |
||
129 |
if (r < KErrNone && pC != NULL) |
|
130 |
pC->Close(NULL); // can't have been added so NULL |
|
131 |
else |
|
132 |
{ |
|
133 |
bs.iBase = reinterpret_cast<TUint>(pC->Base(TheCurrentThread->iOwningProcess)); |
|
134 |
bs.iSize = pool->BufSize(); |
|
135 |
} |
|
136 |
} |
|
137 |
} |
|
138 |
||
139 |
pool->Close(NULL); |
|
140 |
NKern::ThreadLeaveCS(); |
|
141 |
||
142 |
if (r > KErrNone) |
|
143 |
{ |
|
144 |
kumemput32(&aBaseAndSize, &bs, sizeof(bs)); |
|
145 |
} |
|
146 |
||
147 |
__KTRACE_OPT(KEXEC, Kern::Printf("<Exec::ShPoolAlloc returns %d 0x%08x", r, pC)); |
|
148 |
||
149 |
return r; |
|
150 |
} |
|
151 |
||
152 |
void ExecHandler::ShPoolGetInfo(DShPool* aShPool, TShPoolInfo& aInfo) |
|
153 |
{ |
|
154 |
__KTRACE_OPT(KEXEC,Kern::Printf(">Exec::ShPoolGetInfo")); |
|
155 |
TShPoolInfo info; |
|
156 |
aShPool->GetInfo(info); |
|
157 |
NKern::UnlockSystem(); |
|
158 |
kumemput32(&aInfo,&info,sizeof(info)); |
|
159 |
} |
|
160 |
||
161 |
TUint ExecHandler::ShPoolFreeCount(DShPool* aShPool) |
|
162 |
{ |
|
163 |
__KTRACE_OPT(KEXEC,Kern::Printf(">Exec::ShPoolFreeCount (0x%08x)", aShPool)); |
|
164 |
return aShPool->FreeCount(); |
|
165 |
} |
|
166 |
||
167 |
TInt ExecHandler::ShPoolNotification(DShPool* aShPool, TShPoolNotifyType aType, TUint aThreshold, TRequestStatus& aStatus) |
|
168 |
{ |
|
169 |
__KTRACE_OPT(KEXEC, Kern::Printf(">Exec::ShPoolNotification (0x%08x, %d, 0x%x)", aShPool, aType, &aStatus)); |
|
170 |
aShPool->CheckedOpen(); |
|
171 |
NKern::ThreadEnterCS(); |
|
172 |
NKern::UnlockSystem(); |
|
173 |
TInt r = aShPool->AddNotification(aType, aThreshold, aStatus); |
|
174 |
aShPool->Close(NULL); |
|
175 |
NKern::ThreadLeaveCS(); |
|
176 |
||
177 |
if (r == KErrArgument) |
|
178 |
K::PanicKernExec(EShBufExecBadNotification); |
|
179 |
||
180 |
return r; |
|
181 |
} |
|
182 |
||
183 |
TInt ExecHandler::ShPoolNotificationCancel(DShPool* aShPool, TShPoolNotifyType aType, TRequestStatus& aStatus) |
|
184 |
{ |
|
185 |
__KTRACE_OPT(KEXEC, Kern::Printf(">Exec::ShPoolNotificationCancel (0x%08x)", aShPool)); |
|
186 |
||
187 |
aShPool->CheckedOpen(); |
|
188 |
NKern::ThreadEnterCS(); |
|
189 |
NKern::UnlockSystem(); |
|
190 |
TInt r = aShPool->RemoveNotification(aType, aStatus); |
|
191 |
aShPool->Close(NULL); |
|
192 |
NKern::ThreadLeaveCS(); |
|
193 |
||
194 |
if (r == KErrArgument) |
|
195 |
K::PanicKernExec(EShBufExecBadNotification); |
|
196 |
||
197 |
return r; |
|
198 |
} |
|
199 |
||
200 |
TInt ExecHandler::ShPoolBufferWindow(DShPool* aShPool, TInt aWindowSize, TBool aAutoMap) |
|
201 |
{ |
|
202 |
__KTRACE_OPT(KEXEC,Kern::Printf(">Exec::ShPoolBufferWindow (0x%08x)", aShPool)); |
|
203 |
||
204 |
DProcess* pP = TheCurrentThread->iOwningProcess; |
|
205 |
aShPool->CheckedOpen(); |
|
206 |
NKern::ThreadEnterCS(); |
|
207 |
NKern::UnlockSystem(); |
|
208 |
TInt r = aShPool->SetBufferWindow(pP, aWindowSize); |
|
209 |
||
210 |
if (r == KErrNone) |
|
211 |
{ |
|
212 |
r = aShPool->ModifyClientFlags(pP, aAutoMap ? EShPoolAutoMapBuf : 0, aAutoMap ? 0 : EShPoolAutoMapBuf); |
|
213 |
} |
|
214 |
aShPool->Close(NULL); |
|
215 |
NKern::ThreadLeaveCS(); |
|
216 |
return r; |
|
217 |
} |
|
218 |
||
219 |
TInt ExecHandler::ShBufMap(DShBuf* aBuf, TBool aReadOnly, SShBufBaseAndSize& aBaseAndSize) |
|
220 |
{ |
|
221 |
__KTRACE_OPT(KEXEC, Kern::Printf(">Exec::ShBufMap (0x%08x)", aBuf)); |
|
222 |
||
223 |
DProcess* pP = TheCurrentThread->iOwningProcess; |
|
224 |
DShPool* pool = aBuf->iPool; |
|
225 |
SShBufBaseAndSize bs; |
|
226 |
||
227 |
TInt r = KErrAccessDenied; |
|
228 |
||
229 |
aBuf->CheckedOpen(); |
|
230 |
NKern::ThreadEnterCS(); |
|
231 |
NKern::UnlockSystem(); |
|
232 |
||
233 |
||
234 |
pool->LockPool(); |
|
235 |
DShPoolClient* client = reinterpret_cast<DShPoolClient*>(pool->iClientMap->Find(reinterpret_cast<TUint>(pP))); |
|
236 |
TUint attr = client->iFlags; |
|
237 |
pool->UnlockPool(); |
|
238 |
||
239 |
// if the buffer is being mapped as writeable check that the pool has the corresponding |
|
240 |
// capabilities |
|
241 |
||
242 |
if (aReadOnly) |
|
243 |
attr &= ~EShPoolWriteable; |
|
244 |
||
245 |
r = aBuf->Map(attr, pP, bs.iBase); |
|
246 |
bs.iSize = aBuf->Size(); |
|
247 |
||
248 |
aBuf->Close(NULL); |
|
249 |
NKern::ThreadLeaveCS(); |
|
250 |
||
251 |
if (r == KErrNone) |
|
252 |
{ |
|
253 |
kumemput32(&aBaseAndSize, &bs, sizeof(bs)); |
|
254 |
} |
|
255 |
||
256 |
__KTRACE_OPT(KEXEC, Kern::Printf("<Exec::ShBufMap returns %d 0x%08x",r, bs.iBase)); |
|
257 |
return r; |
|
258 |
} |
|
259 |
||
260 |
TInt ExecHandler::ShBufUnMap(DShBuf* aShBuf) |
|
261 |
{ |
|
262 |
__KTRACE_OPT(KEXEC, Kern::Printf(">Exec::ShBufUnMap (0x%08x)", aShBuf)); |
|
263 |
||
264 |
aShBuf->CheckedOpen(); |
|
265 |
NKern::ThreadEnterCS(); |
|
266 |
DProcess* pP = TheCurrentThread->iOwningProcess; |
|
267 |
NKern::UnlockSystem(); |
|
268 |
TInt r = aShBuf->UnMap(pP); |
|
269 |
aShBuf->Close(NULL); |
|
270 |
NKern::ThreadLeaveCS(); |
|
271 |
return r; |
|
272 |
} |
|
273 |
||
274 |
TInt ExecHandler::ShBufBaseAndSize(DShBuf* aShBuf, SShBufBaseAndSize& aBaseAndSize) |
|
275 |
{ |
|
276 |
__KTRACE_OPT(KEXEC, Kern::Printf(">Exec::ShBufBaseAndSize (0x%08x)", aShBuf)); |
|
277 |
||
278 |
aShBuf->CheckedOpen(); |
|
279 |
NKern::ThreadEnterCS(); |
|
280 |
NKern::UnlockSystem(); |
|
281 |
SShBufBaseAndSize bs; |
|
282 |
||
283 |
bs.iBase = reinterpret_cast<TUint>(aShBuf->Base(TheCurrentThread->iOwningProcess)); |
|
284 |
bs.iSize = aShBuf->Size(); |
|
285 |
aShBuf->Close(NULL); |
|
286 |
NKern::ThreadLeaveCS(); |
|
287 |
||
288 |
kumemput32(&aBaseAndSize, &bs, sizeof(bs)); |
|
289 |
||
290 |
__KTRACE_OPT(KEXEC, Kern::Printf("<Exec::ShBufBaseAndSize (0x%08x, 0x%08x, 0x%08x)", aShBuf, bs.iBase, bs.iSize)); |
|
291 |
||
292 |
return KErrNone; |
|
293 |
} |
|
294 |
||
295 |
/******************************************** |
|
296 |
* DShPool and DShBuf |
|
297 |
********************************************/ |
|
298 |
EXPORT_C TShPoolInfo::TShPoolInfo() |
|
299 |
{ |
|
300 |
memclr(this, sizeof(TShPoolInfo)); |
|
301 |
} |
|
302 |
||
303 |
EXPORT_C TShPoolCreateInfo::TShPoolCreateInfo(TShPoolPageAlignedBuffers aFlag, TUint aBufSize, TUint aInitialBufs) |
|
304 |
{ |
|
305 |
iInfo.iBufSize = aBufSize; |
|
306 |
iInfo.iInitialBufs = aInitialBufs; |
|
307 |
iInfo.iFlags = aFlag; |
|
308 |
SetSizingAttributes(aInitialBufs,0,0,0); |
|
309 |
iPhysAddr.iPhysAddrList = 0; |
|
310 |
iPages = 0; |
|
311 |
} |
|
312 |
||
313 |
EXPORT_C TShPoolCreateInfo::TShPoolCreateInfo(TShPoolNonPageAlignedBuffers aFlag, TUint aBufSize, TUint aInitialBufs, TUint aAlignment) |
|
314 |
{ |
|
315 |
iInfo.iBufSize = aBufSize; |
|
316 |
iInfo.iInitialBufs = aInitialBufs; |
|
317 |
iInfo.iAlignment = aAlignment; |
|
318 |
iInfo.iFlags = aFlag; |
|
319 |
SetSizingAttributes(aInitialBufs,0,0,0); |
|
320 |
iPhysAddr.iPhysAddrList = 0; |
|
321 |
iPages = 0; |
|
322 |
} |
|
323 |
||
324 |
/** |
|
325 |
Sets the pool to be created in a specific physical address range |
|
326 |
||
327 |
@param aFlag Indicates pool to be created within a specific physical address range |
|
328 |
@param aBufSize Size of a single buffer within the pool |
|
329 |
@param aInitialBufs Initial number of buffers allocated to the pool |
|
330 |
@param aAlignment Alignment of the start of each buffer in the pool |
|
331 |
@param aPages Number of pages to commit. Must be a multiple of the MMU |
|
332 |
@param aPhysicalAddressList A pointer to a list of physical addresses, one address for |
|
333 |
each page of memory committed. Each physical address must be |
|
334 |
a multiple of the MMU page size. |
|
335 |
@see DShPool::Create() |
|
336 |
*/ |
|
337 |
EXPORT_C TShPoolCreateInfo::TShPoolCreateInfo(TShPoolMemoryDevice aFlag, TUint aBufSize, TUint aInitialBufs, TUint aAlignment, TUint aPages, TPhysAddr* aPhysicalAddressList) |
|
338 |
{ |
|
339 |
iInfo.iBufSize = aBufSize; |
|
340 |
iInfo.iInitialBufs = aInitialBufs; |
|
341 |
iInfo.iAlignment = aAlignment; |
|
342 |
iPhysAddr.iPhysAddrList = aPhysicalAddressList; |
|
343 |
iPages = aPages; |
|
344 |
iInfo.iFlags = aFlag; |
|
345 |
SetSizingAttributes(aInitialBufs,0,0,0); |
|
346 |
} |
|
347 |
||
348 |
/** |
|
349 |
Sets the pool to be created in a specific physical address range |
|
350 |
||
351 |
@param aFlag Indicates pool to be created within a specific physical address range |
|
352 |
@param aBufSize Size of a single buffer within the pool |
|
353 |
@param aInitialBufs Initial number of buffers allocated to the pool |
|
354 |
@param aAlignment Alignment of the start of each buffer in the pool |
|
355 |
@param aPages Number of pages to commit. Must be a multiple of the MMU |
|
356 |
@param aPhysicalAddress A physical address, Each physical address must be |
|
357 |
a multiple of the MMU page size. |
|
358 |
@see DShPool::Create() |
|
359 |
*/ |
|
360 |
EXPORT_C TShPoolCreateInfo::TShPoolCreateInfo(TShPoolMemoryDevice aFlag, TUint aBufSize, TUint aInitialBufs, TUint aAlignment, TUint aPages, TPhysAddr aPhysicalAddress) |
|
361 |
{ |
|
362 |
iInfo.iBufSize = aBufSize; |
|
363 |
iInfo.iInitialBufs = aInitialBufs; |
|
364 |
iInfo.iAlignment = aAlignment; |
|
365 |
iPhysAddr.iPhysAddr = aPhysicalAddress; |
|
366 |
iPages = aPages; |
|
367 |
iInfo.iFlags = aFlag; |
|
368 |
SetContiguous(); |
|
369 |
SetSizingAttributes(aInitialBufs,0,0,0); |
|
370 |
} |
|
371 |
||
372 |
EXPORT_C TInt TShPoolCreateInfo::SetSizingAttributes(TUint aMaxBufs, TUint aGrowTriggerRatio, |
|
373 |
TUint aGrowByRatio, TUint aShrinkHysteresisRatio) |
|
374 |
{ |
|
375 |
if (aGrowTriggerRatio == 0 || aGrowByRatio == 0) // No automatic growing/shrinking |
|
376 |
{ |
|
377 |
aGrowTriggerRatio = aGrowByRatio = 0; |
|
378 |
if (aMaxBufs != iInfo.iInitialBufs) |
|
379 |
return KErrArgument; |
|
380 |
} |
|
381 |
else |
|
382 |
{ |
|
383 |
// aGrowTriggerRatio must be < 1.0 (i.e. 256) |
|
384 |
// aShrinkHysteresisRatio must be > 1.0 |
|
385 |
if (aGrowTriggerRatio >= 256 || aShrinkHysteresisRatio <= 256) |
|
386 |
return KErrArgument; |
|
387 |
||
388 |
if ((iInfo.iFlags & EShPoolContiguous) && (iInfo.iFlags & EShPoolNonPageAlignedBuffer)) |
|
389 |
return KErrNotSupported; |
|
390 |
} |
|
391 |
||
392 |
iInfo.iMaxBufs = aMaxBufs; |
|
393 |
iInfo.iGrowTriggerRatio = aGrowTriggerRatio; |
|
394 |
iInfo.iGrowByRatio = aGrowByRatio; |
|
395 |
iInfo.iShrinkHysteresisRatio = aShrinkHysteresisRatio; |
|
396 |
||
397 |
return KErrNone; |
|
398 |
} |
|
399 |
||
400 |
EXPORT_C TInt TShPoolCreateInfo::SetExclusive() |
|
401 |
{ |
|
402 |
iInfo.iFlags |= EShPoolExclusiveAccess; |
|
403 |
||
404 |
return KErrNone; |
|
405 |
} |
|
406 |
||
407 |
EXPORT_C TInt TShPoolCreateInfo::SetGuardPages() |
|
408 |
{ |
|
409 |
iInfo.iFlags |= EShPoolGuardPages; |
|
410 |
||
411 |
return KErrNone; |
|
412 |
} |
|
413 |
||
414 |
EXPORT_C TInt TShPoolCreateInfo::SetContiguous() |
|
415 |
{ |
|
416 |
iInfo.iFlags |= EShPoolContiguous; |
|
417 |
||
418 |
return KErrNone; |
|
419 |
} |
|
420 |
||
421 |
/** |
|
422 |
Creates a pool of buffers with the specified attributes |
|
423 |
||
424 |
@param aPool[out] Returns a pointer to the pool. |
|
425 |
@param aInfo Reference to pool create information. |
|
426 |
@param aMap ETrue specifies that an allocated or received buffer |
|
427 |
will be automatically mapped into the process's address space. |
|
428 |
@param aFlags Flags to modify the behaviour of the handle. This should be a bit-wise |
|
429 |
OR of values from TShPoolHandleFlags. |
|
430 |
@pre Calling thread must be in a critical section. |
|
431 |
||
432 |
@return the KErrNone if successful, otherwise one of the system-wide error codes. |
|
433 |
*/ |
|
434 |
EXPORT_C TInt Kern::ShPoolCreate(TShPool*& aPool, TShPoolCreateInfo& aInfo, TBool aMap, TUint aFlags) |
|
435 |
{ |
|
436 |
__KTRACE_OPT(KMMU, Kern::Printf(">Kern::ShPoolCreate, (aInfo 0x%08x)", &aInfo)); |
|
437 |
CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "Kern::ShPoolCreate"); |
|
438 |
||
439 |
aPool = NULL; |
|
440 |
||
441 |
const TUint acceptableFlags = EShPoolWriteable | EShPoolAllocate; |
|
442 |
||
443 |
TUint validatedFlags = aFlags & acceptableFlags; |
|
444 |
||
445 |
aFlags &= ~acceptableFlags; |
|
446 |
||
447 |
if (aFlags != 0) |
|
448 |
{ |
|
449 |
Kern::Printf("Kern::ShPoolCreate: flags 0x%x after 0x%x", aFlags, validatedFlags); |
|
450 |
return KErrArgument; |
|
451 |
} |
|
452 |
||
453 |
DShPool* pool; |
|
454 |
TInt r = K::TheKernelProcess->NewShPool(pool, aInfo); |
|
455 |
if (r == KErrNone) |
|
456 |
{ |
|
457 |
if (aMap) validatedFlags |= EShPoolAutoMapBuf; |
|
458 |
||
459 |
r = pool->AddToProcess(K::TheKernelProcess, validatedFlags); |
|
460 |
if (r == KErrNone) |
|
461 |
{ |
|
462 |
aPool = reinterpret_cast<TShPool*>(pool); |
|
463 |
} |
|
464 |
else |
|
465 |
{ |
|
466 |
pool->Close(NULL); // can't have been added so NULL |
|
467 |
} |
|
468 |
} |
|
469 |
||
470 |
__KTRACE_OPT(KMMU, Kern::Printf("<Kern::ShPoolCreate, pool(%x)", aPool)); |
|
471 |
return r; |
|
472 |
} |
|
473 |
||
474 |
/** |
|
475 |
Opens a pool of buffers in the kernel address space using a user process |
|
476 |
handle |
|
477 |
||
478 |
@param aPool[out] Returns pointer to the pool. |
|
479 |
@param aThread Pointer to user process thread, if null |
|
480 |
current thread is used. |
|
481 |
@param aHandle User process handle to open pool from |
|
482 |
@param aMap ETrue specifies that an allocated or received buffer |
|
483 |
will be automatically mapped into the process's address space. |
|
484 |
@param aFlags Flags to modify the behaviour of the handle. This should be a bit-wise |
|
485 |
OR of values from TShPoolHandleFlags. |
|
486 |
@pre Calling thread must be in a critical section. |
|
487 |
||
488 |
@return the KErrNone if successful, otherwise one of the system-wide error codes. |
|
489 |
*/ |
|
490 |
EXPORT_C TInt Kern::ShPoolOpen(TShPool*& aPool, DThread* aThread, TInt aHandle, TBool aMap, TUint aFlags) |
|
491 |
{ |
|
492 |
__KTRACE_OPT(KMMU, Kern::Printf(">Kern::ShPoolOpen, 0x%08x %d", aThread, aHandle)); |
|
493 |
CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "Kern::ShPoolOpen"); |
|
494 |
||
495 |
const TUint acceptableFlags = EShPoolWriteable | EShPoolAllocate; |
|
496 |
||
497 |
TUint validatedFlags = aFlags & acceptableFlags; |
|
498 |
||
499 |
aFlags &= ~acceptableFlags; |
|
500 |
||
501 |
if (aFlags != 0) |
|
502 |
{ |
|
503 |
Kern::Printf("Kern::ShPoolOpen: flags 0x%x after 0x%x", aFlags, validatedFlags); |
|
504 |
return KErrArgument; |
|
505 |
} |
|
506 |
||
507 |
TInt r = KErrNotFound; |
|
508 |
aPool = NULL; |
|
509 |
TUint attr; |
|
510 |
||
511 |
NKern::LockSystem(); |
|
512 |
DShPool* pool = reinterpret_cast<DShPool*>(aThread->ObjectFromHandle(aHandle, EShPool, attr)); |
|
513 |
if (pool != NULL) |
|
514 |
{ |
|
515 |
r = pool->Open(); |
|
516 |
} |
|
517 |
||
518 |
NKern::UnlockSystem(); |
|
519 |
||
520 |
attr |= validatedFlags; |
|
521 |
||
522 |
if (r == KErrNone) |
|
523 |
{ |
|
524 |
if (aMap) attr |= EShPoolAutoMapBuf; |
|
525 |
||
526 |
r = pool->AddToProcess(K::TheKernelProcess, attr); |
|
527 |
if (r == KErrNone) |
|
528 |
{ |
|
529 |
aPool = reinterpret_cast<TShPool*>(pool); |
|
530 |
} |
|
531 |
else |
|
532 |
{ |
|
533 |
pool->Close(NULL); // can't have been added so NULL |
|
534 |
} |
|
535 |
} |
|
536 |
||
537 |
return r; |
|
538 |
} |
|
539 |
||
540 |
/** |
|
541 |
Closes a pool of buffers in the kernel address |
|
542 |
||
543 |
@param aPool Pointer to the pool. |
|
544 |
||
545 |
@return ETrue if the reference count of the pool has gone to zero, |
|
546 |
otherwise one of the system-wide error codes. |
|
547 |
*/ |
|
548 |
EXPORT_C TInt Kern::ShPoolClose(TShPool* aPool) |
|
549 |
||
550 |
{ |
|
551 |
__KTRACE_OPT(KMMU, Kern::Printf(">Kern::ShPoolClose(%x)", aPool)); |
|
552 |
||
553 |
TInt r = reinterpret_cast<DShPool*>(aPool)->Close(K::TheKernelProcess); |
|
554 |
||
555 |
__KTRACE_OPT(KMMU, Kern::Printf("<Kern::ShPoolClose(%x)", aPool)); |
|
556 |
return r; |
|
557 |
} |
|
558 |
||
559 |
||
560 |
/** |
|
561 |
Creates a user process handle to a pool of buffers in the kernel address |
|
562 |
||
563 |
@param aPool Pointer to the pool. |
|
564 |
@param aThread Pointer to user process thread, if null |
|
565 |
current thread is used. |
|
566 |
@param aFlags Handle flags (attributes) |
|
567 |
||
568 |
@pre Calling thread must be in a critical section. |
|
569 |
||
570 |
@return the handle if successful, otherwise one of the system-wide error codes. |
|
571 |
*/ |
|
572 |
EXPORT_C TInt Kern::ShPoolMakeHandleAndOpen(TShPool* aPool, DThread* aThread, TUint aFlags) |
|
573 |
{ |
|
574 |
CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "Kern::ShPoolMakeHandleAndOpen"); |
|
575 |
||
576 |
if (!aThread) |
|
577 |
aThread = TheCurrentThread; |
|
578 |
||
579 |
TInt h; |
|
580 |
TInt r = aThread->MakeHandleAndOpen(EOwnerProcess, reinterpret_cast<DShPool*>(aPool), h, aFlags); |
|
581 |
return (r == KErrNone) ? h : r; |
|
582 |
} |
|
583 |
/** |
|
584 |
Allocates a shared data buffer. |
|
585 |
||
586 |
By default this method will return immediately with KErrNoMemory if no buffer is |
|
587 |
available on the pool's free list, even if the pool could grow automatically. |
|
588 |
||
589 |
By default it will also map the allocated buffer into the calling process's address space. |
|
590 |
||
591 |
Setting EShPoolAllocCanWait in the flags indicates that the caller is prepared to |
|
592 |
wait while the pool is grown if a buffer is not immediately available on the free list. |
|
593 |
||
594 |
Setting EShPoolAllocNoMap in the flags indicates that the caller does not want the |
|
595 |
buffer to be automatically mapped into its address space. This can improve performance |
|
596 |
on buffers from page-aligned pools if the caller will not need to access the data in the |
|
597 |
buffer (e.g. if it will just be passing it on to another component). This only prevents |
|
598 |
mapping if the pool is set to not automatically map buffers into processes' address space. |
|
599 |
||
600 |
@param aPool Pointer to the pool |
|
601 |
@param aBuf Pointer to buffer |
|
602 |
@param aFlags Bitwise OR of values from TShPoolAllocFlags to specify non-default behaviour. |
|
603 |
||
604 |
@pre Calling thread must be in a critical section. |
|
605 |
||
606 |
@return KErrNone if successful, otherwise one of the system-wide error codes. |
|
607 |
||
608 |
@see TShPoolAllocFlags |
|
609 |
*/ |
|
610 |
EXPORT_C TInt Kern::ShPoolAlloc(TShPool* aPool, TShBuf*& aBuf, TUint aFlags) |
|
611 |
{ |
|
612 |
__KTRACE_OPT(KMMU, Kern::Printf(">Kern::ShPoolAlloc(%x)", aPool)); |
|
613 |
CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "Kern::ShPoolAlloc"); |
|
614 |
||
615 |
aBuf = NULL; |
|
616 |
||
617 |
const TUint acceptableFlags = EShPoolAllocNoMap; |
|
618 |
||
619 |
TUint validatedFlags = aFlags & acceptableFlags; |
|
620 |
||
621 |
aFlags &= ~acceptableFlags; |
|
622 |
||
623 |
if (aFlags != 0) |
|
624 |
{ |
|
625 |
Kern::Printf("Kern::ShPoolAlloc: flags 0x%x after 0x%x", aFlags, validatedFlags); |
|
626 |
return KErrArgument; |
|
627 |
} |
|
628 |
||
629 |
DShBuf* buf; |
|
630 |
TInt r = reinterpret_cast<DShPool*>(aPool)->Alloc(buf); |
|
631 |
if (r == KErrNone) |
|
632 |
{ |
|
633 |
if (buf->iPool->iPoolFlags & EShPoolPageAlignedBuffer) |
|
634 |
{ |
|
635 |
TUint attr = 0; |
|
636 |
if (validatedFlags & EShPoolAllocNoMap) |
|
637 |
{ |
|
638 |
attr = EShPoolNoMapBuf; |
|
639 |
} |
|
640 |
||
641 |
r = buf->AddToProcess(K::TheKernelProcess, attr); |
|
642 |
} |
|
643 |
if (r == KErrNone) |
|
644 |
{ |
|
645 |
aBuf = reinterpret_cast<TShBuf*>(buf); |
|
646 |
} |
|
647 |
else |
|
648 |
{ |
|
649 |
buf->Close(NULL); |
|
650 |
} |
|
651 |
} |
|
652 |
||
653 |
__KTRACE_OPT(KMMU, Kern::Printf("<Kern::ShPoolAlloc(%x)", aPool)); |
|
654 |
return r; |
|
655 |
} |
|
656 |
||
657 |
/** |
|
658 |
Retrieves information about the pool. |
|
659 |
||
660 |
@param aPool Pointer to the pool |
|
661 |
@param aInfo[out] Returns a reference to pool info. |
|
662 |
*/ |
|
663 |
EXPORT_C void Kern::ShPoolGetInfo(TShPool* aPool, TShPoolInfo& aInfo) |
|
664 |
{ |
|
665 |
__KTRACE_OPT(KMMU, Kern::Printf(">Kern::ShPoolGetInfo(%x)", aPool)); |
|
666 |
||
667 |
reinterpret_cast<DShPool*>(aPool)->GetInfo(aInfo); |
|
668 |
||
669 |
__KTRACE_OPT(KMMU, Kern::Printf("<Kern::ShPoolGetInfo(%x)", aPool)); |
|
670 |
} |
|
671 |
||
672 |
/** |
|
673 |
@param aPool Pointer to the pool |
|
674 |
@return the size of each buffer in the pool. |
|
675 |
*/ |
|
676 |
EXPORT_C TUint Kern::ShPoolBufSize(TShPool* aPool) |
|
677 |
{ |
|
678 |
__KTRACE_OPT(KMMU, Kern::Printf(">Kern::ShPoolBufSize(%x)", aPool)); |
|
679 |
||
680 |
return reinterpret_cast<DShPool*>(aPool)->BufSize(); |
|
681 |
} |
|
682 |
||
683 |
/** |
|
684 |
@param aPool Pointer to the pool |
|
685 |
@return the number of free buffers in the pool |
|
686 |
*/ |
|
687 |
EXPORT_C TUint Kern::ShPoolFreeCount(TShPool* aPool) |
|
688 |
{ |
|
689 |
__KTRACE_OPT(KMMU, Kern::Printf(">Kern::ShPoolFreeCount(%x)", aPool)); |
|
690 |
||
691 |
return reinterpret_cast<DShPool*>(aPool)->FreeCount(); |
|
692 |
} |
|
693 |
||
694 |
/** |
|
695 |
Specifies how many buffers of a page-aligned pool this process will require |
|
696 |
concurrent access to. |
|
697 |
||
698 |
This determines how much of the kernel's address space will be allocated for |
|
699 |
buffers of this pool. |
|
700 |
||
701 |
If the pool and it's corresponding buffers are not going to be mapped in the |
|
702 |
kernel's address space then it is not necessary to call this API |
|
703 |
||
704 |
@param aPool Pointer to the pool |
|
705 |
@param aCount Specifies the number of buffers to map into the process's |
|
706 |
virtual address space |
|
707 |
(-1 specifies that all buffers will be mapped). |
|
708 |
||
709 |
@pre Calling thread must be in a critical section. |
|
710 |
@pre The pool's buffers must be page-aligned. |
|
711 |
||
712 |
@return KErrNone if successful, otherwise one of the system-wide error codes. |
|
713 |
*/ |
|
714 |
EXPORT_C TInt Kern::ShPoolSetBufferWindow(TShPool* aPool, TInt aWindowSize) |
|
715 |
{ |
|
716 |
__KTRACE_OPT(KMMU, Kern::Printf(">Kern::ShPoolSetBufferWindow(%x %d)", aPool, aWindowSize)); |
|
717 |
CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "Kern::ShPoolSetBufferWindow"); |
|
718 |
TInt r = reinterpret_cast<DShPool*>(aPool)->SetBufferWindow(K::TheKernelProcess, aWindowSize); |
|
719 |
return r; |
|
720 |
} |
|
721 |
||
722 |
/** |
|
723 |
Opens a buffer in the kernel address space using a user process handle. |
|
724 |
||
725 |
@param aBuf[out] Returns pointer to the buffer. |
|
726 |
@param aThread Pointer to user process thread, if null |
|
727 |
current thread is used. |
|
728 |
@param aHandle User process handle to open pool from |
|
729 |
||
730 |
@pre Calling thread must be in a critical section. |
|
731 |
||
732 |
@return the KErrNone if successful, otherwise one of the system-wide error codes. |
|
733 |
*/ |
|
734 |
EXPORT_C TInt Kern::ShBufOpen(TShBuf*& aBuf, DThread* aThread, TInt aHandle) |
|
735 |
{ |
|
736 |
__KTRACE_OPT(KMMU, Kern::Printf(">Kern::ShBufOpen(0x%08x %d)", aThread, aHandle)); |
|
737 |
CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "Kern::ShBufOpen"); |
|
738 |
||
739 |
TInt r = KErrNotFound; |
|
740 |
aBuf = NULL; |
|
741 |
||
742 |
NKern::LockSystem(); |
|
743 |
DShBuf* buf = reinterpret_cast<DShBuf*>(aThread->ObjectFromHandle(aHandle,EShBuf)); |
|
744 |
if (buf != NULL) |
|
745 |
{ |
|
746 |
r = buf->Open(); |
|
747 |
} |
|
748 |
||
749 |
NKern::UnlockSystem(); |
|
750 |
||
751 |
if (r == KErrNone) |
|
752 |
{ |
|
753 |
if (buf->iPool->iPoolFlags & EShPoolPageAlignedBuffer) |
|
754 |
{ |
|
755 |
r = buf->AddToProcess(K::TheKernelProcess, 0); |
|
756 |
} |
|
757 |
if (r == KErrNone) |
|
758 |
{ |
|
759 |
aBuf = reinterpret_cast<TShBuf*>(buf); |
|
760 |
} |
|
761 |
else |
|
762 |
{ |
|
763 |
buf->Close(NULL); |
|
764 |
} |
|
765 |
} |
|
766 |
||
767 |
return r; |
|
768 |
} |
|
769 |
||
770 |
/** |
|
771 |
Opens a buffer in the kernel address space using a user process handle. |
|
772 |
||
773 |
@param aBuf Pointer to the buffer. |
|
774 |
@param aPinObject The physical pin mapping. |
|
775 |
@param aReadOnly Indicates whether memory should be pinned as read only. |
|
776 |
@param aAddress[out] The value is the physical address of the first page |
|
777 |
in the region. |
|
778 |
@param aPages[out] If not zero, this points to an array of TPhysAddr |
|
779 |
objects. On success, this array will be filled |
|
780 |
with the addresses of the physical pages which |
|
781 |
contain the specified region. If aPages is |
|
782 |
zero, then the function will fail with |
|
783 |
KErrNotFound if the specified region is not |
|
784 |
physically contiguous. |
|
785 |
@param aMapAttr[out] Memory attributes defined by TMappingAttributes2. |
|
786 |
@param aColour[out] The mapping colour of the first physical page. |
|
787 |
||
788 |
@pre Calling thread must be in a critical section. |
|
789 |
||
790 |
@return the KErrNone if successful, otherwise one of the system-wide error codes. |
|
791 |
*/ |
|
792 |
EXPORT_C TInt Kern::ShBufPin(TShBuf* aBuf, TPhysicalPinObject* aPinObject, TBool aReadOnly, TPhysAddr& aAddress, TPhysAddr* aPages, TUint32& aMapAttr, TUint& aColour) |
|
793 |
{ |
|
794 |
__KTRACE_OPT(KMMU, Kern::Printf(">Kern::ShBufPin(%x)", aBuf)); |
|
795 |
CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "Kern::ShBufPin"); |
|
796 |
||
797 |
return reinterpret_cast<DShBuf*>(aBuf)->Pin(aPinObject, aReadOnly, aAddress, aPages, aMapAttr, aColour); |
|
798 |
} |
|
799 |
||
800 |
/** |
|
801 |
@param aBuf Pointer to the buffer. |
|
802 |
||
803 |
@return the size of the buffer |
|
804 |
*/ |
|
805 |
EXPORT_C TUint Kern::ShBufSize(TShBuf* aBuf) |
|
806 |
{ |
|
807 |
__KTRACE_OPT(KMMU, Kern::Printf(">Kern::ShBufSize(%x)", aBuf)); |
|
808 |
||
809 |
return reinterpret_cast<DShBuf*>(aBuf)->Size(); |
|
810 |
} |
|
811 |
||
812 |
/** |
|
813 |
@param aBuf Pointer to the buffer. |
|
814 |
||
815 |
@return A pointer to the start of the buffer. |
|
816 |
*/ |
|
817 |
EXPORT_C TUint8* Kern::ShBufPtr(TShBuf* aBuf) |
|
818 |
{ |
|
819 |
__KTRACE_OPT(KMMU, Kern::Printf(">Kern::ShBufPtr(%x)", aBuf)); |
|
820 |
||
821 |
return reinterpret_cast<DShBuf*>(aBuf)->Base(); |
|
822 |
} |
|
823 |
||
824 |
/** |
|
825 |
Closes a buffer in the kernel address |
|
826 |
||
827 |
@param aPool Pointer to the buffer. |
|
828 |
||
829 |
@return ETrue if the reference count of the pool has gone to zero, |
|
830 |
otherwise one of the system-wide error codes. |
|
831 |
*/ |
|
832 |
EXPORT_C TInt Kern::ShBufClose(TShBuf* aBuf) |
|
833 |
{ |
|
834 |
__KTRACE_OPT(KMMU, Kern::Printf(">Kern::ShBufClose(%x)", aBuf)); |
|
835 |
||
836 |
return reinterpret_cast<DShBuf*>(aBuf)->Close(K::TheKernelProcess); |
|
837 |
} |
|
838 |
||
839 |
/** |
|
840 |
Creates a user process handle to a buffer in the kernel address |
|
841 |
||
842 |
@param aBuf Pointer to the buffer. |
|
843 |
@param aThread Pointer to user process thread, if null |
|
844 |
current thread is used. |
|
845 |
||
846 |
@pre Calling thread must be in a critical section. |
|
847 |
||
848 |
@return the handle if successful, otherwise one of the system-wide error codes. |
|
849 |
*/ |
|
850 |
EXPORT_C TInt Kern::ShBufMakeHandleAndOpen(TShBuf* aBuf, DThread* aThread) |
|
851 |
{ |
|
36
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
852 |
CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "Kern::ShBufMakeHandleAndOpen"); |
0 | 853 |
|
854 |
if (!aThread) |
|
855 |
aThread = TheCurrentThread; |
|
856 |
||
857 |
TUint attr = (TUint)RObjectIx::EReserved; |
|
858 |
||
859 |
TInt h; |
|
860 |
TInt r = aThread->MakeHandleAndOpen(EOwnerProcess, reinterpret_cast<DShBuf*>(aBuf), h, attr); |
|
861 |
return (r == KErrNone) ? h : r; |
|
862 |
} |
|
863 |
||
864 |
// DShBuf implementation |
|
865 |
DShBuf::DShBuf(DShPool* aPool, TLinAddr aRelAddr) : iPool(aPool), iRelAddress(aRelAddr) |
|
866 |
{ |
|
867 |
} |
|
868 |
||
869 |
TInt DShBuf::Construct() |
|
870 |
{ |
|
871 |
return K::AddObject(this, EShBuf); |
|
872 |
} |
|
873 |
||
874 |
DShBuf::DShBuf(DShPool* aPool) : iPool(aPool), iRelAddress(0) |
|
875 |
{ |
|
876 |
} |
|
877 |
||
878 |
DShBuf::~DShBuf() |
|
879 |
{ |
|
880 |
} |
|
881 |
||
882 |
TInt DShBuf::RequestUserHandle(DThread* __DEBUG_ONLY(aThread), TOwnerType aType, TUint aAttr) |
|
883 |
{ |
|
884 |
__KTRACE_OPT(KMMU, Kern::Printf(">DShBuf::RequestUserHandle (aThread = 0x%08x, aType = 0x%08x)", aThread, aType)); |
|
885 |
||
886 |
if (aType != EOwnerProcess) |
|
887 |
{ |
|
888 |
__KTRACE_OPT(KFAIL, Kern::Printf("Tried to create thread handle to DShBuf")); |
|
889 |
return KErrNotSupported; |
|
890 |
} |
|
891 |
||
892 |
if ((aAttr & RObjectIx::EReserved) != RObjectIx::EReserved) |
|
893 |
{ |
|
894 |
return KErrNotSupported; |
|
895 |
} |
|
896 |
||
897 |
return KErrNone; |
|
898 |
} |
|
899 |
||
900 |
TInt DShBuf::Close(TAny* __DEBUG_ONLY(aPtr)) |
|
901 |
{ |
|
902 |
__KTRACE_OPT(KMMU, Kern::Printf(">DShBuf::Close (0x%08x)", aPtr)); |
|
903 |
||
904 |
if (AccessCount() == 1) |
|
905 |
{ |
|
906 |
__KTRACE_OPT(KMMU, Kern::Printf("Closing DShBuf")); |
|
907 |
iPool->Free(this); // put back on free list |
|
908 |
} |
|
909 |
else |
|
910 |
{ |
|
911 |
Dec(); |
|
912 |
} |
|
913 |
||
914 |
return KErrNone; |
|
915 |
} |
|
916 |
||
917 |
TInt DShBuf::Pin(TPhysicalPinObject* /* aPinObject */, TBool /* aReadOnly */, TPhysAddr& /* aAddress */, TPhysAddr* /* aPages */, TUint32& /* aMapAttr */, TUint& /* aColour */) |
|
918 |
{ |
|
919 |
return KErrNone; |
|
920 |
} |
|
921 |
||
922 |
||
923 |
// DShPool implementation |
|
924 |
||
925 |
DShPool::DShPool() : iNotifDfc(ManagementDfc,this, 3) |
|
926 |
{} |
|
927 |
||
928 |
DShPool::~DShPool() |
|
929 |
{ |
|
930 |
__KTRACE_OPT(KMMU, Kern::Printf(">DShPool::~DShPool")); |
|
931 |
||
932 |
if (iProcessLock) |
|
933 |
iProcessLock->Close(NULL); |
|
934 |
||
935 |
delete iClientMap; |
|
936 |
||
937 |
__KTRACE_OPT(KMMU, Kern::Printf("<DShPool::~DShPool")); |
|
938 |
} |
|
939 |
||
940 |
TInt DShPool::Create(DObject* aOwner, TShPoolCreateInfo& aInfo) |
|
941 |
{ |
|
942 |
__KTRACE_OPT(KMMU,Kern::Printf(">DShPool::Create (iBufSize = 0x%08x, iInitialBufs = %d, iMaxBufs = %d, iFlags = 0x%08x, iPhysAddr.iPhysAddrList = 0x%08x, iAlignment = %d)", aInfo.iInfo.iBufSize, aInfo.iInfo.iInitialBufs, aInfo.iInfo.iMaxBufs, aInfo.iInfo.iFlags, aInfo.iPhysAddr.iPhysAddrList, aInfo.iInfo.iAlignment)); |
|
943 |
||
944 |
TInt r = SetOwner(aOwner); |
|
945 |
if (r != KErrNone) |
|
946 |
return r; |
|
947 |
||
948 |
TUint32 pageSize = Kern::RoundToPageSize(1); |
|
949 |
TUint32 pageShift = __e32_find_ms1_32(pageSize); |
|
950 |
||
951 |
// Sanity-check arguments |
|
952 |
// Don't allow buffer size, growth threshold or alignment <= 0 |
|
953 |
// Alignment needs to be between 0 and 12 (don't allow > one page) |
|
954 |
// We will force alignment to be at least 4 bytes (others have suggested 64 bytes, cache line size) |
|
955 |
// We also require that there are an exact number of iBufSize's in iInitialBufs. |
|
956 |
// For EDevice pools, buffer size must be a multiple of alignment, and iGrowRatio must be 0. |
|
957 |
// |
|
958 |
||
959 |
// Remember buffer attributes |
|
960 |
iBufSize = aInfo.iInfo.iBufSize; |
|
961 |
iGrowTriggerRatio = aInfo.iInfo.iGrowTriggerRatio; |
|
962 |
iGrowByRatio = aInfo.iInfo.iGrowByRatio; |
|
963 |
iShrinkHysteresisRatio = aInfo.iInfo.iShrinkHysteresisRatio; |
|
964 |
iInitialBuffers = aInfo.iInfo.iInitialBufs; |
|
965 |
iMaxBuffers = aInfo.iInfo.iMaxBufs; |
|
966 |
iPoolFlags = aInfo.iInfo.iFlags; |
|
967 |
||
968 |
// No automatic growing and shrinking if the pool is already at its maximum size |
|
969 |
// or if either grow ratios are zero. |
|
970 |
if (iInitialBuffers == iMaxBuffers ||iGrowTriggerRatio == 0 || iGrowByRatio == 0) |
|
971 |
{ |
|
972 |
iGrowTriggerRatio = aInfo.iInfo.iGrowTriggerRatio = 0; |
|
973 |
iGrowByRatio = aInfo.iInfo.iGrowByRatio = 0; |
|
974 |
} |
|
975 |
else |
|
976 |
{ |
|
977 |
// Arbitrarily cap iGrowByRatio at something large (later on we will divide by |
|
978 |
// iGrowByRatio + 256, so that sum must not end up as 0x100000000). |
|
979 |
// iGrowTriggerRatio must represent a number < 1, or the pool will grow immediately. |
|
980 |
if (iGrowTriggerRatio > 256 || iGrowByRatio > (TUint)KMaxTInt32) |
|
981 |
return KErrArgument; |
|
982 |
||
983 |
// If growing or shrinking, hysteresis must be >= 1. Also cap arbitrarily. |
|
984 |
// (1.0 as fx24.8 == 256) |
|
985 |
if (iShrinkHysteresisRatio < 256 || iShrinkHysteresisRatio > (TUint)KMaxTInt32) |
|
986 |
return KErrArgument; |
|
987 |
} |
|
988 |
||
989 |
if (iPoolFlags & EShPoolPageAlignedBuffer) |
|
990 |
{ |
|
991 |
iAlignment = aInfo.iInfo.iAlignment = pageShift; |
|
992 |
} |
|
993 |
else |
|
994 |
{ |
|
995 |
// How we're going to cut the buffer up |
|
996 |
iAlignment = aInfo.iInfo.iAlignment; |
|
997 |
||
998 |
// Ensure buffers will be aligned on cache line boundaries, so that DMA |
|
999 |
// will work properly. |
|
1000 |
TUint minAlignment = __e32_find_ms1_32(Cache::DmaBufferAlignment()); |
|
1001 |
if (minAlignment < 5) // Absolute minimum 32-byte alignment |
|
1002 |
minAlignment = 5; |
|
1003 |
if (minAlignment > pageShift) // Absolute maximum page alignment |
|
1004 |
minAlignment = pageShift; |
|
1005 |
||
1006 |
if (iAlignment < minAlignment) iAlignment = minAlignment; |
|
1007 |
||
1008 |
// Can't have exclusive access on a non-page-aligned pool. |
|
1009 |
if (iPoolFlags & EShPoolExclusiveAccess) |
|
1010 |
return KErrArgument; |
|
1011 |
} |
|
1012 |
||
1013 |
// XXX implementation of exclusive access is Phase 2. |
|
1014 |
if (iPoolFlags & EShPoolExclusiveAccess) |
|
1015 |
return KErrNotSupported; |
|
1016 |
||
1017 |
iBufGap = (iBufSize + (1 << iAlignment) - 1) & ~((1 << iAlignment) - 1); |
|
1018 |
||
1019 |
if (iPoolFlags & EShPoolGuardPages) |
|
1020 |
{ |
|
1021 |
// must be aligned |
|
1022 |
if ((iPoolFlags & EShPoolPageAlignedBuffer) == 0) |
|
1023 |
return KErrArgument; |
|
1024 |
iBufGap += pageSize; |
|
1025 |
} |
|
1026 |
||
1027 |
// Checks that are valid for both ERAM and EDevice |
|
1028 |
if ((iMaxBuffers == 0) || (iBufSize == 0) || (iBufSize > (1 << 30)) || |
|
1029 |
(aInfo.iInfo.iAlignment > pageShift)) |
|
1030 |
{ |
|
1031 |
return KErrArgument; |
|
1032 |
} |
|
1033 |
||
1034 |
if (iPoolFlags & EShPoolPhysicalMemoryPool) |
|
1035 |
{ |
|
1036 |
// Checks that are only valid for EDevice |
|
1037 |
if (aInfo.iPhysAddr.iPhysAddrList == 0 || iGrowTriggerRatio != 0 || iGrowByRatio != 0 || |
|
1038 |
iBufSize > iBufGap || iMaxBuffers != iInitialBuffers) |
|
1039 |
{ |
|
1040 |
return KErrArgument; |
|
1041 |
} |
|
1042 |
||
1043 |
// check there are enough pages to fit all the buffers |
|
1044 |
iCommittedPages = ((iInitialBuffers * iBufGap) + pageSize - 1) >> pageShift; |
|
1045 |
||
1046 |
if (iCommittedPages > aInfo.iPages) |
|
1047 |
return KErrArgument; |
|
1048 |
||
1049 |
iCommittedPages = aInfo.iPages; |
|
1050 |
} |
|
1051 |
else |
|
1052 |
{ |
|
1053 |
// Checks that are only valid for ERAM |
|
1054 |
if (aInfo.iPhysAddr.iPhysAddrList != 0 || |
|
1055 |
iInitialBuffers > iMaxBuffers) |
|
1056 |
{ |
|
1057 |
return KErrArgument; |
|
1058 |
} |
|
1059 |
if ((iGrowTriggerRatio == 0 || iGrowByRatio == 0) && |
|
1060 |
(iInitialBuffers != iMaxBuffers)) |
|
1061 |
{ |
|
1062 |
return KErrArgument; |
|
1063 |
} |
|
1064 |
if ((iGrowTriggerRatio != 0 || iGrowByRatio != 0) && |
|
1065 |
(iPoolFlags & EShPoolNonPageAlignedBuffer) && |
|
1066 |
(iPoolFlags & EShPoolContiguous)) |
|
1067 |
{ |
|
1068 |
return KErrArgument; |
|
1069 |
} |
|
1070 |
} |
|
1071 |
||
1072 |
r = Kern::MutexCreate(iProcessLock, _L("ShPoolProcessLock"), KMutexOrdShPool); |
|
1073 |
||
1074 |
if (r == KErrNone) |
|
1075 |
r = DoCreate(aInfo); |
|
1076 |
||
1077 |
if (r == KErrNone) |
|
1078 |
r = CreateInitialBuffers(); |
|
1079 |
||
1080 |
if (r == KErrNone) |
|
1081 |
{ |
|
1082 |
iClientMap = new SMap(&iLock, iProcessLock); |
|
1083 |
if (iClientMap == NULL) |
|
1084 |
r = KErrNoMemory; |
|
1085 |
} |
|
1086 |
||
1087 |
// Pre-calculate the shrink-by ratio as 1 - 1 / (1 + G), where |
|
1088 |
// G is the grow-by ratio. The value of iGrowByRatio is capped above, |
|
1089 |
// so we know we won't be dividing by zero. |
|
1090 |
if (r == KErrNone) |
|
1091 |
{ |
|
1092 |
iShrinkByRatio = 256 - 65536 / (256 + iGrowByRatio); |
|
1093 |
||
1094 |
CalculateGrowShrinkTriggers(); |
|
1095 |
||
1096 |
iFreeSpaceThreshold = iMaxBuffers + 1; // make sure this won't fire just yet |
|
1097 |
||
1098 |
iNotifDfc.SetDfcQ(&iSharedDfcQue); |
|
1099 |
||
1100 |
r = K::AddObject(this, EShPool); |
|
1101 |
} |
|
1102 |
||
1103 |
__KTRACE_OPT(KMMU, Kern::Printf("<DShPool::Create returns %d", r)); |
|
1104 |
return r; |
|
1105 |
} |
|
1106 |
||
1107 |
TInt DShPool::Close(TAny* aPtr) |
|
1108 |
{ |
|
1109 |
__KTRACE_OPT(KMMU, Kern::Printf(">DShPool::Close (0x%08x)", aPtr)); |
|
1110 |
||
1111 |
if (AccessCount() > 1) |
|
1112 |
{ |
|
1113 |
Dec(); |
|
1114 |
return KErrNone; |
|
1115 |
} |
|
1116 |
||
1117 |
CompleteAllNotifications(); |
|
1118 |
||
1119 |
DeleteInitialBuffers(); |
|
1120 |
||
1121 |
SDblQueLink* pLink; |
|
1122 |
// free up any remaining buffers, these must have been dynamically allocated |
|
1123 |
// because we already deleted the initial buffers |
|
1124 |
while (!iFreeList.IsEmpty()) |
|
1125 |
{ |
|
1126 |
pLink = iFreeList.GetFirst(); |
|
1127 |
DShBuf* buf = _LOFF(pLink, DShBuf, iObjLink); |
|
1128 |
buf->DObject::Close(NULL); |
|
1129 |
} |
|
1130 |
||
1131 |
// free up any remaining buffers, these must have been dynamically allocated |
|
1132 |
// because we already deleted the initial buffers |
|
1133 |
while (!iAltFreeList.IsEmpty()) |
|
1134 |
{ |
|
1135 |
pLink = iAltFreeList.GetFirst(); |
|
1136 |
DShBuf* buf = _LOFF(pLink, DShBuf, iObjLink); |
|
1137 |
buf->DObject::Close(NULL); |
|
1138 |
} |
|
1139 |
||
1140 |
// call base class |
|
1141 |
||
1142 |
return DObject::Close(aPtr); |
|
1143 |
} |
|
1144 |
||
1145 |
void DShPool::GetInfo(TShPoolInfo& aInfo) |
|
1146 |
{ |
|
1147 |
__KTRACE_OPT(KMMU, Kern::Printf(">DShPool::GetInfo")); |
|
1148 |
aInfo.iBufSize = iBufSize; |
|
1149 |
aInfo.iInitialBufs = iInitialBuffers; |
|
1150 |
aInfo.iMaxBufs = iMaxBuffers; |
|
1151 |
aInfo.iGrowTriggerRatio = iGrowTriggerRatio; |
|
1152 |
aInfo.iGrowByRatio = iGrowByRatio; |
|
1153 |
aInfo.iShrinkHysteresisRatio = iShrinkHysteresisRatio; |
|
1154 |
aInfo.iAlignment = iAlignment; |
|
1155 |
aInfo.iFlags = iPoolFlags; |
|
1156 |
} |
|
1157 |
||
1158 |
// This method is called after we grow or shrink the pool. It re-calculates |
|
1159 |
// the actual numbers of trigger buffers for growing and shrinking based on |
|
1160 |
// the fx24.8 ratios. |
|
1161 |
// |
|
1162 |
// The triggers are set so that they can be blindly compared against, even if |
|
1163 |
// no automatic growing or shrinking is happening. |
|
1164 |
// |
|
1165 |
void DShPool::CalculateGrowShrinkTriggers() |
|
1166 |
{ |
|
1167 |
LockPool(); |
|
1168 |
||
1169 |
// If the pool is at its maximum size, we can't grow |
|
1170 |
if (iTotalBuffers >= iMaxBuffers || iGrowTriggerRatio == 0) |
|
1171 |
{ |
|
1172 |
iGrowTrigger = 0; |
|
1173 |
} |
|
1174 |
else |
|
1175 |
{ |
|
1176 |
iGrowTrigger = mult_fx248(iTotalBuffers, iGrowTriggerRatio); |
|
1177 |
||
1178 |
// Deal with rounding towards zero |
|
1179 |
if (iGrowTrigger == 0) |
|
1180 |
iGrowTrigger = 1; |
|
1181 |
} |
|
1182 |
||
1183 |
// If no growing has happened, we can't shrink |
|
1184 |
if (iTotalBuffers <= iInitialBuffers || iGrowTriggerRatio == 0 || (iPoolFlags & EShPoolSuppressShrink) != 0) |
|
1185 |
{ |
|
1186 |
iShrinkTrigger = iMaxBuffers; |
|
1187 |
} |
|
1188 |
else |
|
1189 |
{ |
|
1190 |
// To ensure that shrinking doesn't immediately happen after growing, the trigger |
|
1191 |
// amount is the grow trigger + the grow amount (which is the number of free buffers |
|
1192 |
// just after a grow) times the shrink hysteresis value. |
|
1193 |
iShrinkTrigger = mult_fx248(iTotalBuffers, iGrowTriggerRatio + iGrowByRatio); |
|
1194 |
iShrinkTrigger = mult_fx248(iShrinkTrigger, iShrinkHysteresisRatio); |
|
1195 |
||
1196 |
// Deal with rounding towards zero |
|
1197 |
if (iShrinkTrigger == 0) |
|
1198 |
iShrinkTrigger = 1; |
|
1199 |
||
1200 |
// If the shrink trigger ends up > the number of buffers currently in |
|
1201 |
// the pool, set it to that number (less 1, since the test is "> trigger"). |
|
1202 |
// This means the pool will only shrink when all the buffers have been freed. |
|
1203 |
if (iShrinkTrigger >= iTotalBuffers) |
|
1204 |
iShrinkTrigger = iTotalBuffers - 1; |
|
1205 |
} |
|
1206 |
||
1207 |
UnlockPool(); |
|
1208 |
} |
|
1209 |
||
1210 |
// Multiplies an unsigned integer by an fx24.8 fixed-point value |
|
1211 |
// Returns the value, or 0xFFFFFFFF if there was overflow. |
|
1212 |
// |
|
1213 |
TUint DShPool::mult_fx248(TUint n, TUint f) |
|
1214 |
{ |
|
1215 |
TUint64 r = (TUint64) n * f; |
|
1216 |
||
1217 |
I64LSR(r, 8); |
|
1218 |
||
1219 |
return r > KMaxTUint32 ? KMaxTUint32 : I64LOW(r); |
|
1220 |
} |
|
1221 |
||
1222 |
TInt DShPool::CreateInitialBuffers() |
|
1223 |
{ |
|
1224 |
// This virtual must always be implemented in the derived class |
|
1225 |
K::Fault(K::EShBufVirtualNotDefined); |
|
1226 |
||
1227 |
return KErrNone; |
|
1228 |
} |
|
1229 |
||
1230 |
TInt DShPool::DeleteInitialBuffers() |
|
1231 |
{ |
|
1232 |
// This virtual must always be implemented in the derived class |
|
1233 |
K::Fault(K::EShBufVirtualNotDefined); |
|
1234 |
||
1235 |
return KErrNone; |
|
1236 |
} |
|
1237 |
||
1238 |
TUint DShPool::FreeCount() |
|
1239 |
{ |
|
1240 |
return iFreeBuffers; |
|
1241 |
} |
|
1242 |
||
1243 |
void DShPool::ManagementDfc(TAny* aPool) |
|
1244 |
{ |
|
1245 |
__KTRACE_OPT(KMMU, Kern::Printf(">DShPool::ManagementDfc (aPool = 0x%08x)", aPool)); |
|
1246 |
||
1247 |
DShPool* pool = static_cast<DShPool*>(aPool); |
|
1248 |
||
1249 |
// DFC to auto alloc and complete space notifications |
|
1250 |
// It is queued as a result of alloc'ing and freeing |
|
1251 |
// it looks at the difference between total committed memory and used committed memory and eventually commits more memory (up to Max pool size) |
|
1252 |
// and calls CheckAndCompleteNotifications to complete any pending free space notifications. |
|
1253 |
// Even when it does not commit memory it must call CheckAndCompleteNotifications with the difference above, as it is also queued from Free |
|
1254 |
// and should notify clients that enough space became free |
|
1255 |
pool->UpdateFreeList(); |
|
1256 |
||
1257 |
pool->LockPool(); |
|
1258 |
if (pool->iFreeBuffers < pool->iGrowTrigger) // do not use <=, since iGrowTrigger of 0 |
|
1259 |
{ // => no growing. |
|
1260 |
pool->UnlockPool(); |
|
1261 |
__KTRACE_OPT(KMMU, Kern::Printf("GrowPool() because free %d < grow trigger %d", pool->iFreeBuffers, pool->iGrowTrigger)); |
|
1262 |
pool->GrowPool(); |
|
1263 |
} |
|
1264 |
else if (pool->iFreeBuffers > pool->iShrinkTrigger) // do not use >=, since iShrinkTrigger of 0 |
|
1265 |
{ // => no shrinking. |
|
1266 |
pool->UnlockPool(); |
|
1267 |
__KTRACE_OPT(KMMU, Kern::Printf("ShrinkPool() because free %d > shrink trigger %d", pool->iFreeBuffers, pool->iShrinkTrigger)); |
|
1268 |
pool->ShrinkPool(); |
|
1269 |
} |
|
1270 |
else |
|
1271 |
{ |
|
1272 |
pool->UnlockPool(); |
|
1273 |
} |
|
1274 |
||
1275 |
pool->CheckAndCompleteNotifications(EFalse); |
|
1276 |
||
1277 |
// We might be able to grow/shrink some more. Give the Management DFC another kick if necessary. |
|
1278 |
if (pool->HaveWorkToDo()) |
|
1279 |
{ |
|
1280 |
pool->KickManagementDfc(); |
|
1281 |
} |
|
1282 |
||
1283 |
pool->Close(NULL); |
|
1284 |
__KTRACE_OPT(KMMU, Kern::Printf("<DShPool::ManagementDfc (aPool = 0x%08x)", aPool)); |
|
1285 |
} |
|
1286 |
||
1287 |
||
1288 |
// Forward declaration needed by TShPoolNotificationCleanup |
|
1289 |
class TShPoolNotificationRequest; |
|
1290 |
||
1291 |
/** |
|
1292 |
@internalComponent |
|
1293 |
@prototype |
|
1294 |
*/ |
|
1295 |
class TShPoolNotificationCleanup : public TThreadCleanup |
|
1296 |
{ |
|
1297 |
public: |
|
1298 |
virtual void Cleanup(); |
|
1299 |
TShPoolNotificationRequest& NotificationRequest(); |
|
1300 |
}; |
|
1301 |
||
1302 |
/** |
|
1303 |
@internalComponent |
|
1304 |
@prototype |
|
1305 |
*/ |
|
1306 |
class TShPoolNotificationRequest : public TClientRequest |
|
1307 |
// notification requests need to be queued |
|
1308 |
{ |
|
1309 |
public: |
|
1310 |
TShPoolNotificationRequest(TShPoolNotifyType aType, TUint aThreshold, DThread* aThread, TRequestStatus* aStatus); |
|
1311 |
void Complete(TInt aReason); |
|
1312 |
void AddCleanup(); |
|
1313 |
void RemoveCleanup(); |
|
1314 |
||
1315 |
public: |
|
1316 |
TShPoolNotifyType iNotificationType; |
|
1317 |
TUint iThreshold; // Our key to order this queue in ascending order. When growing pool we follow list down to appropriate threshold and complete requests accordingly. |
|
1318 |
SDblQueLink iObjLink; |
|
1319 |
DShPool* iPool; // pointer to pool, not reference counted, removed when notification removed from pool, which always happens prior to pool deletion |
|
1320 |
DThread* iOwningThread; |
|
1321 |
TShPoolNotificationCleanup iCleanup; |
|
1322 |
||
1323 |
static NFastMutex ShPoolNotifierLock; // fast mutex to protect notifier list |
|
1324 |
||
1325 |
inline static void Lock() |
|
1326 |
{ NKern::FMWait(&ShPoolNotifierLock); } |
|
1327 |
inline static void Unlock() |
|
1328 |
{ NKern::FMSignal(&ShPoolNotifierLock); } |
|
1329 |
}; |
|
1330 |
||
1331 |
NFastMutex TShPoolNotificationRequest::ShPoolNotifierLock; // fast mutex to protect notifier list |
|
1332 |
||
1333 |
inline TShPoolNotificationRequest& TShPoolNotificationCleanup::NotificationRequest() |
|
1334 |
{ return *_LOFF(this, TShPoolNotificationRequest, iCleanup); } |
|
1335 |
||
1336 |
||
1337 |
TShPoolNotificationRequest::TShPoolNotificationRequest(TShPoolNotifyType aType, TUint aThreshold, DThread* aThread, TRequestStatus* aStatus) |
|
1338 |
: TClientRequest() |
|
1339 |
{ |
|
1340 |
iNotificationType = aType; |
|
1341 |
iThreshold = aThreshold; |
|
1342 |
iObjLink.iNext = NULL; |
|
1343 |
iPool = NULL; |
|
1344 |
iOwningThread = aThread; |
|
1345 |
SetStatus(aStatus); |
|
1346 |
} |
|
1347 |
||
1348 |
void TShPoolNotificationRequest::Complete(TInt aReason) |
|
1349 |
{ |
|
1350 |
NKern::LockSystem(); |
|
1351 |
RemoveCleanup(); |
|
1352 |
NKern::UnlockSystem(); |
|
1353 |
||
1354 |
Kern::QueueRequestComplete(iOwningThread, this, aReason); |
|
1355 |
DThread *pT = iOwningThread; |
|
1356 |
Close(); |
|
1357 |
pT->Close(NULL); |
|
1358 |
} |
|
1359 |
||
1360 |
void TShPoolNotificationRequest::AddCleanup() |
|
1361 |
{ |
|
1362 |
NKern::LockSystem(); |
|
1363 |
TheCurrentThread->AddCleanup(&iCleanup); |
|
1364 |
NKern::UnlockSystem(); |
|
1365 |
} |
|
1366 |
||
1367 |
void TShPoolNotificationRequest::RemoveCleanup() |
|
1368 |
{ |
|
1369 |
if (iCleanup.iThread != NULL) |
|
1370 |
{ |
|
1371 |
iCleanup.Remove(); |
|
1372 |
iCleanup.iThread = NULL; |
|
1373 |
} |
|
1374 |
} |
|
1375 |
||
1376 |
// Called when the thread that requested a notification exits. |
|
1377 |
// Called in the context of the exiting thread, with the system locked. |
|
1378 |
void TShPoolNotificationCleanup::Cleanup() |
|
1379 |
{ |
|
1380 |
__ASSERT_SYSTEM_LOCK; |
|
1381 |
TShPoolNotificationRequest& req = NotificationRequest(); |
|
1382 |
Remove(); // take this cleanup item off thread cleanup list |
|
1383 |
iThread = NULL; |
|
1384 |
NKern::UnlockSystem(); |
|
1385 |
// Notifier may still be on the pool notifier list |
|
1386 |
||
1387 |
TShPoolNotificationRequest::Lock(); |
|
1388 |
if (req.iObjLink.iNext) |
|
1389 |
{ |
|
1390 |
DShPool* pool = req.iPool; |
|
1391 |
SDblQueLink* anchor = NULL; |
|
1392 |
switch (req.iNotificationType) |
|
1393 |
{ |
|
1394 |
case EShPoolLowSpace: |
|
1395 |
anchor = &pool->iNotifLowReqQueue.iA; |
|
1396 |
break; |
|
1397 |
case EShPoolFreeSpace: |
|
1398 |
anchor = &pool->iNotifFreeReqQueue.iA; |
|
1399 |
break; |
|
1400 |
} |
|
1401 |
||
1402 |
req.iObjLink.Deque(); |
|
1403 |
req.iObjLink.iNext = NULL; |
|
1404 |
||
1405 |
// We "should not" have anything other than LowSpace or FreeSpace |
|
1406 |
// notification objects. |
|
1407 |
__NK_ASSERT_DEBUG(anchor != NULL); |
|
1408 |
||
1409 |
TBool empty = (anchor->iNext == anchor); |
|
1410 |
switch (req.iNotificationType) |
|
1411 |
{ |
|
1412 |
case EShPoolLowSpace: |
|
1413 |
pool->iLowSpaceThreshold = empty ? 0 : _LOFF(anchor->iNext, TShPoolNotificationRequest, iObjLink)->iThreshold + 1; |
|
1414 |
break; |
|
1415 |
case EShPoolFreeSpace: |
|
1416 |
pool->iFreeSpaceThreshold = empty ? pool->iMaxBuffers + 1 : _LOFF(anchor->iNext, TShPoolNotificationRequest, iObjLink)->iThreshold; |
|
1417 |
break; |
|
1418 |
} |
|
1419 |
||
1420 |
TShPoolNotificationRequest::Unlock(); |
|
1421 |
||
1422 |
DThread* pT = req.iOwningThread; |
|
1423 |
req.iOwningThread = NULL; |
|
1424 |
||
1425 |
req.Close(); |
|
1426 |
||
1427 |
// If the notification code isn't looking at this request, close the owning thread. |
|
1428 |
// This balances the Open() in AddNotification() |
|
1429 |
if (pT) |
|
1430 |
pT->Close(NULL); |
|
1431 |
} |
|
1432 |
else |
|
1433 |
{ |
|
1434 |
TShPoolNotificationRequest::Unlock(); |
|
1435 |
} |
|
1436 |
||
1437 |
NKern::LockSystem(); |
|
1438 |
} |
|
1439 |
||
1440 |
TInt DShPool::AddNotification(TShPoolNotifyType aType, TUint aThreshold, TRequestStatus& aStatus) |
|
1441 |
{ |
|
1442 |
__KTRACE_OPT(KMMU, Kern::Printf(">DShPool::AddNotification(%d, %d)", aType, aThreshold)); |
|
1443 |
||
1444 |
DThread* pT = TheCurrentThread; // This might involve calling a function, so only do it once |
|
1445 |
||
1446 |
TShPoolNotificationRequest* req; |
|
1447 |
switch (aType) |
|
1448 |
{ |
|
1449 |
case EShPoolLowSpace: |
|
1450 |
req = new TShPoolNotificationRequest(aType, aThreshold, pT, &aStatus); |
|
1451 |
break; |
|
1452 |
case EShPoolFreeSpace: |
|
1453 |
req = new TShPoolNotificationRequest(aType, aThreshold, pT, &aStatus); |
|
1454 |
break; |
|
1455 |
default: |
|
1456 |
return KErrArgument; |
|
1457 |
} |
|
1458 |
||
1459 |
if (req == NULL) |
|
1460 |
return KErrNoMemory; |
|
1461 |
||
1462 |
if ((aType == EShPoolLowSpace && iFreeBuffers <= req->iThreshold) || |
|
1463 |
(aType == EShPoolFreeSpace && iFreeBuffers > req->iThreshold)) |
|
1464 |
{ |
|
1465 |
// Complete immediately |
|
1466 |
Kern::QueueRequestComplete(pT, req, KErrNone); |
|
1467 |
req->Close(); |
|
1468 |
} |
|
1469 |
else |
|
1470 |
{ |
|
1471 |
// Add notifier to thread before adding it to pool, since thread can't die but |
|
1472 |
// notifier could complete as soon as we release ShPoolNotifierLock after adding to pool |
|
1473 |
pT->Open(); |
|
1474 |
req->AddCleanup(); |
|
1475 |
TShPoolNotificationRequest::Lock(); |
|
1476 |
req->iPool = this; // for QueueAnchor |
|
1477 |
||
1478 |
// add this request to its queue of requests |
|
1479 |
SDblQueLink* anchor; |
|
1480 |
SDblQueLink* pLink; |
|
1481 |
switch (aType) |
|
1482 |
{ |
|
1483 |
case EShPoolLowSpace: |
|
1484 |
anchor = &iNotifLowReqQueue.iA; |
|
1485 |
||
1486 |
for (pLink = anchor->iNext; |
|
1487 |
pLink != anchor && _LOFF(pLink, TShPoolNotificationRequest, iObjLink)->iThreshold >= aThreshold; |
|
1488 |
pLink = pLink->iNext) |
|
1489 |
{} /* nothing */ |
|
1490 |
||
1491 |
// Insert before first entry with strictly lower threshold; |
|
1492 |
// if no such entry, inserting before the anchor makes it last in the list |
|
1493 |
req->iObjLink.InsertBefore(pLink); |
|
1494 |
||
1495 |
// Remember the threshold of the first notification on the list (will be the highest). |
|
1496 |
iLowSpaceThreshold = _LOFF(anchor->iNext, TShPoolNotificationRequest, iObjLink)->iThreshold + 1; |
|
1497 |
break; |
|
1498 |
||
1499 |
case EShPoolFreeSpace: |
|
1500 |
anchor = &iNotifFreeReqQueue.iA; |
|
1501 |
||
1502 |
for (pLink = anchor->iNext; |
|
1503 |
pLink != anchor && _LOFF(pLink, TShPoolNotificationRequest, iObjLink)->iThreshold < aThreshold; |
|
1504 |
pLink = pLink->iNext) |
|
1505 |
{} /* nothing */ |
|
1506 |
||
1507 |
// Insert before first entry with strictly higher threshold; |
|
1508 |
// if no such entry, inserting before the anchor makes it last in the list |
|
1509 |
req->iObjLink.InsertBefore(pLink); |
|
1510 |
||
1511 |
// Remember the threshold of the first notification on the list (will be the lowest). |
|
1512 |
iFreeSpaceThreshold = _LOFF(anchor->iNext, TShPoolNotificationRequest, iObjLink)->iThreshold; |
|
1513 |
break; |
|
1514 |
} |
|
1515 |
||
1516 |
TShPoolNotificationRequest::Unlock(); |
|
1517 |
||
1518 |
// Care required here: new notifier could be completed right here |
|
1519 |
} |
|
1520 |
||
1521 |
// Queue the ManagementDfc, which completes notifications as appropriate |
|
1522 |
if (HaveWorkToDo()) |
|
1523 |
KickManagementDfc(); |
|
1524 |
||
1525 |
return KErrNone; |
|
1526 |
} |
|
1527 |
||
1528 |
void DShPool::KickManagementDfc() |
|
1529 |
{ |
|
1530 |
Open(); |
|
1531 |
if (!iNotifDfc.Enque()) |
|
1532 |
Close(NULL); |
|
1533 |
} |
|
1534 |
||
1535 |
TBool DShPool::HaveWorkToDo() |
|
1536 |
{ |
|
1537 |
// Sufficiently few free buffers that at least one low space notifier will be triggered |
|
1538 |
// Avoid accessing notifier list here since we don't hold any locks |
|
1539 |
if (iFreeBuffers < iLowSpaceThreshold) // iLowSpaceThreshold == 0 => no low space waiters |
|
1540 |
return ETrue; // else it's the first threshold + 1 |
|
1541 |
||
1542 |
if (iFreeBuffers >= iFreeSpaceThreshold) // iFreeSpaceThreshold is the first threshold value |
|
1543 |
return ETrue; // (no +1 here) |
|
1544 |
||
1545 |
if (iFreeBuffers < iGrowTrigger) // do not use <=, it will break things |
|
1546 |
return ETrue; |
|
1547 |
||
1548 |
if (iFreeBuffers > iShrinkTrigger) // do not use >=, it will break things |
|
1549 |
return ETrue; |
|
1550 |
||
1551 |
return EFalse; |
|
1552 |
} |
|
1553 |
||
1554 |
TInt DShPool::RemoveNotification(TShPoolNotifyType aType, TRequestStatus& aStatus) |
|
1555 |
{ |
|
1556 |
__KTRACE_OPT(KMMU, Kern::Printf(">DShPool::RemoveNotification")); |
|
1557 |
||
1558 |
SDblQueLink* anchor = NULL; |
|
1559 |
switch (aType) |
|
1560 |
{ |
|
1561 |
case EShPoolLowSpace: |
|
1562 |
anchor = &iNotifLowReqQueue.iA; |
|
1563 |
break; |
|
1564 |
case EShPoolFreeSpace: |
|
1565 |
anchor = &iNotifFreeReqQueue.iA; |
|
1566 |
break; |
|
1567 |
} |
|
1568 |
||
1569 |
if (anchor == NULL) |
|
1570 |
return KErrArgument; |
|
1571 |
||
1572 |
DThread* pT = TheCurrentThread; // This might involve calling a function, so only do it once |
|
1573 |
SDblQueLink* pLink; |
|
1574 |
||
1575 |
TShPoolNotificationRequest* req = NULL; |
|
1576 |
TShPoolNotificationRequest::Lock(); |
|
1577 |
for (pLink = anchor->iNext; pLink != anchor; pLink = pLink->iNext) |
|
1578 |
{ |
|
1579 |
req = _LOFF(pLink, TShPoolNotificationRequest, iObjLink); |
|
1580 |
if ((req->iStatus == ((T_UintPtr)&aStatus & ~KClientRequestFlagMask)) |
|
1581 |
&& (req->iOwningThread == pT)) |
|
1582 |
break; |
|
1583 |
} |
|
1584 |
||
1585 |
if (pLink == anchor) |
|
1586 |
{ |
|
1587 |
// Not found on the list. Ah well. |
|
1588 |
TShPoolNotificationRequest::Unlock(); |
|
1589 |
return KErrNotFound; |
|
1590 |
} |
|
1591 |
||
1592 |
TBool first = (pLink == anchor->iNext); |
|
1593 |
TBool last = (pLink == anchor->iPrev); |
|
1594 |
pLink->Deque(); |
|
1595 |
pLink->iNext = NULL; |
|
1596 |
||
1597 |
// Notifier won't now be completed, and since its owning thread is the current |
|
1598 |
// thread we are now home and dry: no-one else can touch this notifier. |
|
1599 |
if (first) |
|
1600 |
{ |
|
1601 |
switch (aType) |
|
1602 |
{ |
|
1603 |
case EShPoolLowSpace: |
|
1604 |
// This was first on the list, so adjust iLowSpaceThreshold |
|
1605 |
iLowSpaceThreshold = last ? 0 : _LOFF(anchor->iNext, TShPoolNotificationRequest, iObjLink)->iThreshold + 1; |
|
1606 |
break; |
|
1607 |
case EShPoolFreeSpace: |
|
1608 |
// This was first on the list, so adjust iFreeSpaceThreshold |
|
1609 |
iFreeSpaceThreshold = last ? iMaxBuffers + 1 : _LOFF(anchor->iNext, TShPoolNotificationRequest, iObjLink)->iThreshold; |
|
1610 |
break; |
|
1611 |
} |
|
1612 |
} |
|
1613 |
||
1614 |
TShPoolNotificationRequest::Unlock(); |
|
1615 |
req->Complete(KErrCancel); |
|
1616 |
return KErrNone; // removed successfully |
|
1617 |
} |
|
1618 |
||
1619 |
void DShPool::CompleteAllNotifications() |
|
1620 |
{ |
|
1621 |
__KTRACE_OPT(KMMU, Kern::Printf(">DShPool::CompleteAllNotifications")); |
|
1622 |
||
1623 |
// Cancel all outstanding notifications on pool close |
|
1624 |
CheckAndCompleteNotifications(ETrue); |
|
1625 |
} |
|
1626 |
||
1627 |
void DShPool::CheckAndCompleteNotifications(TBool aAll) |
|
1628 |
{ |
|
1629 |
__KTRACE_OPT(KMMU, Kern::Printf(">DShPool::CheckAndCompleteNotifications")); |
|
1630 |
||
1631 |
CheckLowSpaceNotificationQueue(aAll); |
|
1632 |
CheckFreeSpaceNotificationQueue(aAll); |
|
1633 |
} |
|
1634 |
||
1635 |
void DShPool::CheckLowSpaceNotificationQueue(TBool aAll) |
|
1636 |
{ |
|
1637 |
__KTRACE_OPT(KMMU, Kern::Printf(">DShPool::CheckLowSpaceNotificationQueue")); |
|
1638 |
// Goes through one of the ordered lists and completes all notifications on this new size |
|
1639 |
TInt result = aAll ? KErrCancel : KErrNone; |
|
1640 |
||
1641 |
SDblQueLink* anchor = &iNotifLowReqQueue.iA; |
|
1642 |
||
1643 |
TShPoolNotificationRequest::Lock(); |
|
1644 |
||
1645 |
while (aAll || iFreeBuffers < iLowSpaceThreshold) |
|
1646 |
{ |
|
1647 |
// take first notifier off list and complete it, provided owning thread has not terminated |
|
1648 |
SDblQueLink* pLink = anchor->iNext; |
|
1649 |
if (pLink == anchor) |
|
1650 |
break; |
|
1651 |
||
1652 |
TBool last = (pLink == anchor->iPrev); |
|
1653 |
iLowSpaceThreshold = last ? 0 : _LOFF(pLink->iNext, TShPoolNotificationRequest, iObjLink)->iThreshold + 1; |
|
1654 |
||
1655 |
pLink->Deque(); |
|
1656 |
pLink->iNext = NULL; |
|
1657 |
||
1658 |
// The notifier has been detached from the pool's notifier list but is still attached to its |
|
1659 |
// owning thread. As soon as we signal ShPoolNotifierLock the notifier may be zapped by the owning |
|
1660 |
// thread's cleanup code. |
|
1661 |
TShPoolNotificationRequest* req = _LOFF(pLink, TShPoolNotificationRequest, iObjLink); |
|
1662 |
TShPoolNotificationRequest::Unlock(); |
|
1663 |
||
1664 |
// Owning thread is still alive, detach notifier from it and complete |
|
1665 |
req->Complete(result); |
|
1666 |
TShPoolNotificationRequest::Lock(); |
|
1667 |
} |
|
1668 |
||
1669 |
TShPoolNotificationRequest::Unlock(); |
|
1670 |
} |
|
1671 |
||
1672 |
void DShPool::CheckFreeSpaceNotificationQueue(TBool aAll) |
|
1673 |
{ |
|
1674 |
__KTRACE_OPT(KMMU, Kern::Printf(">DShPool::CheckFreeSpaceNotificationQueue")); |
|
1675 |
// Goes through one of the ordered lists and completes all notifications on this new size |
|
1676 |
TInt result = aAll ? KErrCancel : KErrNone; |
|
1677 |
||
1678 |
SDblQueLink* anchor = &iNotifFreeReqQueue.iA; |
|
1679 |
||
1680 |
TShPoolNotificationRequest::Lock(); |
|
1681 |
||
1682 |
while (aAll || iFreeBuffers >= iFreeSpaceThreshold) |
|
1683 |
{ |
|
1684 |
// take first notifier off list and complete it, provided owning thread has not terminated |
|
1685 |
SDblQueLink* pLink = anchor->iNext; |
|
1686 |
if (pLink == anchor) |
|
1687 |
break; |
|
1688 |
||
1689 |
TBool last = (pLink == anchor->iPrev); |
|
1690 |
iFreeSpaceThreshold = last ? iMaxBuffers + 1 : _LOFF(pLink->iNext, TShPoolNotificationRequest, iObjLink)->iThreshold; |
|
1691 |
||
1692 |
pLink->Deque(); |
|
1693 |
pLink->iNext = NULL; |
|
1694 |
||
1695 |
// The notifier has been detached from the pool's notifier list but is still attached to its |
|
1696 |
// owning thread. As soon as we signal ShPoolNotifierLock the notifier may be zapped by the owning |
|
1697 |
// thread's cleanup code. |
|
1698 |
TShPoolNotificationRequest* req = _LOFF(pLink, TShPoolNotificationRequest, iObjLink); |
|
1699 |
TShPoolNotificationRequest::Unlock(); |
|
1700 |
||
1701 |
// Owning thread is still alive, detach notifier from it and complete |
|
1702 |
req->Complete(result); |
|
1703 |
TShPoolNotificationRequest::Lock(); |
|
1704 |
} |
|
1705 |
||
1706 |
TShPoolNotificationRequest::Unlock(); |
|
1707 |
} |
|
1708 |
||
1709 |
||
1710 |
TInt DShPool::OpenClient(DProcess* aProcess, TUint& aFlags) |
|
1711 |
{ |
|
1712 |
LockPool(); |
|
1713 |
DShPoolClient* client = reinterpret_cast<DShPoolClient*>(iClientMap->Find(reinterpret_cast<TUint>(aProcess))); |
|
1714 |
||
1715 |
if ((client != NULL) && client->iAccessCount) |
|
1716 |
{ |
|
1717 |
// access count must be non-zero otherwise the pool is in the process of being closed |
|
1718 |
aFlags = client->iFlags; |
|
1719 |
client->iAccessCount++; |
|
1720 |
} |
|
1721 |
else |
|
1722 |
{ |
|
1723 |
UnlockPool(); |
|
1724 |
aFlags = 0; |
|
1725 |
return KErrNotFound; |
|
1726 |
} |
|
1727 |
||
1728 |
UnlockPool(); |
|
1729 |
return KErrNone; |
|
1730 |
} |
|
1731 |
||
1732 |
void DShPool::CloseClient(DProcess* aProcess) |
|
1733 |
{ |
|
1734 |
LockPool(); |
|
1735 |
DShPoolClient* client = reinterpret_cast<DShPoolClient*>(iClientMap->Find(reinterpret_cast<TUint>(aProcess))); |
|
1736 |
||
1737 |
if (client == NULL) |
|
1738 |
{ |
|
1739 |
UnlockPool(); |
|
1740 |
return; |
|
1741 |
} |
|
1742 |
||
1743 |
TInt r = --client->iAccessCount; |
|
1744 |
||
1745 |
UnlockPool(); |
|
1746 |
||
1747 |
if (r == 0) |
|
1748 |
{ |
|
1749 |
// check that it has not been reopened by another thread in the same process |
|
1750 |
Kern::MutexWait(*iProcessLock); |
|
1751 |
if (r == client->iAccessCount) |
|
1752 |
{ |
|
1753 |
DestroyClientResources(aProcess); |
|
1754 |
} |
|
1755 |
Kern::MutexSignal(*iProcessLock); |
|
1756 |
} |
|
1757 |
} |
|
1758 |
||
1759 |
TInt DShPool::DoCreate(TShPoolCreateInfo& /*aInfo*/) |
|
1760 |
{ |
|
1761 |
// This virtual must always be implemented in the derived class |
|
1762 |
K::Fault(K::EShBufVirtualNotDefined); |
|
1763 |
||
1764 |
return KErrNotSupported; |
|
1765 |
} |
|
1766 |
||
1767 |
TInt DShPool::RequestUserHandle(DThread* __DEBUG_ONLY(aThread), TOwnerType aType) |
|
1768 |
{ |
|
1769 |
__KTRACE_OPT(KMMU, Kern::Printf(">DShPool::RequestUserHandle (aThread = 0x%08x, aType = 0x%08x)", aThread, aType)); |
|
1770 |
||
1771 |
if (aType != EOwnerProcess) |
|
1772 |
{ |
|
1773 |
__KTRACE_OPT(KFAIL,Kern::Printf("Tried to create thread handle to DShPool")); |
|
1774 |
return KErrNotSupported; |
|
1775 |
} |
|
1776 |
||
1777 |
return KErrNone; |
|
1778 |
} |
|
1779 |
||
1780 |
TInt DShPool::UpdateReservedHandles(TInt aNoOfBuffers) |
|
1781 |
{ |
|
1782 |
__KTRACE_OPT(KMMU, Kern::Printf(">DShPool::UpdateReservedHandles(%d)", aNoOfBuffers)); |
|
1783 |
||
1784 |
TInt ret = KErrNone; |
|
1785 |
||
1786 |
// reserve handles for interested processes |
|
1787 |
Kern::MutexWait(*iProcessLock); |
|
1788 |
SMap::TIterator iter(*iClientMap); |
|
1789 |
||
1790 |
SMap::TEntry* entry; |
|
1791 |
SMap::TEntry* lastEntry = NULL; |
|
1792 |
||
1793 |
while ((entry = iter.Next()) != lastEntry) |
|
1794 |
{ |
|
1795 |
DProcess* pP = reinterpret_cast<DProcess*>(entry->iKey); |
|
1796 |
||
1797 |
if (pP) |
|
1798 |
{ |
|
1799 |
TInt r = pP->iHandles.Reserve(aNoOfBuffers); |
|
1800 |
||
1801 |
// try to cleanup the best we can |
|
1802 |
if (r != KErrNone && ret == KErrNone) |
|
1803 |
{ |
|
1804 |
iter.Reset(); |
|
1805 |
lastEntry = entry; |
|
1806 |
aNoOfBuffers = -aNoOfBuffers; |
|
1807 |
ret = r; |
|
1808 |
} |
|
1809 |
} |
|
1810 |
} |
|
1811 |
||
1812 |
Kern::MutexSignal(*iProcessLock); |
|
1813 |
||
1814 |
return ret; |
|
1815 |
} |
|
1816 |
||
1817 |
TInt DShPool::SetBufferWindow(DProcess* /* aProcess */, TInt /* aWindowSize*/ ) |
|
1818 |
{ |
|
1819 |
return KErrNotSupported; |
|
1820 |
} |