|
1 // Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of "Eclipse Public License v1.0" |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: |
|
14 // |
|
15 |
|
16 |
|
17 #include "es_commsbuf_internal.h" |
|
18 #include <comms-infras/commsbufpondop.h> |
|
19 #include "systemsharedbufs.h" |
|
20 #include "commsbufasyncreqinternal.h" |
|
21 #include "commsbufpanic.h" |
|
22 #include "commsbufpond_internal.h" |
|
23 |
|
24 CSystemSharedBufPool* CSystemSharedBufPool::New(MCommsBufPondIntf& aPondIntf, const TCommsBufPoolCreateInfo& aCreateInfo) |
|
25 { |
|
26 CSystemSharedBufPool* self = NULL; |
|
27 #ifdef SYMBIAN_ZEROCOPY_BUF_FREELIST |
|
28 // We take the freelist count as 50% of the ceiling size. |
|
29 const TInt KFreeListCount = aCreateInfo.iCeiling / 2; |
|
30 self = new CSystemSharedBufPool(aPondIntf, aCreateInfo.iBufSize, KFreeListCount); |
|
31 #elif |
|
32 self = new CSystemSharedBufPool(aPondIntf, aCreateInfo.iBufSize); |
|
33 #endif // SYMBIAN_ZEROCOPY_BUF_FREELIST |
|
34 if(self) |
|
35 { |
|
36 if(self->Construct(aCreateInfo) != KErrNone) |
|
37 { |
|
38 delete self; |
|
39 self = NULL; |
|
40 } |
|
41 } |
|
42 return self; |
|
43 } |
|
44 |
|
45 TInt CSystemSharedBufPool::Construct(const TCommsBufPoolCreateInfo& aCreateInfo) |
|
46 { |
|
47 TShPoolCreateInfo poolCreateInfo( |
|
48 TShPoolCreateInfo::ENonPageAlignedBuffer, |
|
49 aCreateInfo.iBufSize + ALIGN_UP(sizeof(RCommsBuf)), |
|
50 aCreateInfo.iInitialBufs, |
|
51 0); |
|
52 // The ratios are represented as a 32-bit fixed-point number, where the binary point is defined |
|
53 // to be between bits 7 and 8 (where the least-significant bit is defined as bit 0). |
|
54 // The format is also know as Q8. |
|
55 // See RShBuf documentation for more details. |
|
56 const TReal KQ8Number = 256.0; // Q8 value 2^8. |
|
57 // Normalise to the Q8 number format. |
|
58 TUint growTriggerRatio = (((TReal)aCreateInfo.iMinFreeBufs/aCreateInfo.iInitialBufs) * KQ8Number); |
|
59 TUint growByRatio = (((TReal)aCreateInfo.iGrowByBufs/aCreateInfo.iInitialBufs) * KQ8Number); |
|
60 const TUint shrinkHysteresisRatio = 332; // Value of 1.3 normalised with 256 (Q8). |
|
61 // Shrink hysteresis ration should be > 256 |
|
62 poolCreateInfo.SetSizingAttributes(aCreateInfo.iCeiling,growTriggerRatio,growByRatio,shrinkHysteresisRatio); |
|
63 |
|
64 TInt err = iPool.Create(poolCreateInfo, EShPoolAllocate | EShPoolWriteable); |
|
65 #ifdef SYMBIAN_ZEROCOPY_BUF_FREELIST |
|
66 if(err == KErrNone) |
|
67 { |
|
68 err = iFreeListLock.CreateLocal(); |
|
69 } |
|
70 #endif // SYMBIAN_ZEROCOPY_BUF_FREELIST |
|
71 return err; |
|
72 } |
|
73 |
|
74 |
|
75 CSystemSharedBufPool::~CSystemSharedBufPool() |
|
76 { |
|
77 #ifdef SYMBIAN_ZEROCOPY_BUF_FREELIST |
|
78 iFreeListCount = iMaxFreeListCount; |
|
79 iFreeList.Free(); |
|
80 iFreeListLock.Close(); |
|
81 #endif // SYMBIAN_ZEROCOPY_BUF_FREELIST |
|
82 iPool.Close(); |
|
83 } |
|
84 |
|
85 #ifdef SYMBIAN_ZEROCOPY_BUF_FREELIST |
|
86 |
|
87 TInt CSystemSharedBufPool::AllocFromFreeList(RCommsBufQ& aBufQ, TInt aSize) |
|
88 { |
|
89 // Allocate from the free list and returns the total size that is allocated. |
|
90 // Note that the total size that is allocated may not be equal to the size |
|
91 // and depends on the pool size, sometimes would be little greater than than |
|
92 // the requested size. |
|
93 TInt transfered = 0; |
|
94 TInt bufCount = 0; |
|
95 iFreeListLock.Wait(); |
|
96 transfered = iFreeList.Transfer(aBufQ, aSize, BufSize(), bufCount); |
|
97 iFreeListCount -= bufCount; |
|
98 iFreeListLock.Signal(); |
|
99 return transfered; |
|
100 } |
|
101 |
|
102 TBool CSystemSharedBufPool::ReleaseToFreeList(RCommsBuf* aBuf) |
|
103 { |
|
104 TBool released = EFalse; |
|
105 aBuf->SetDataRange(0, aBuf->RawSize()); // Reset the offset to 0 and length to the size. |
|
106 iFreeListLock.Wait(); |
|
107 if(iFreeListCount < iMaxFreeListCount) |
|
108 { |
|
109 ++iFreeListCount; |
|
110 iFreeList.Append(aBuf); |
|
111 released = ETrue; |
|
112 } |
|
113 iFreeListLock.Signal(); |
|
114 return released; |
|
115 } |
|
116 |
|
117 TInt CSystemSharedBufPool::FreeListCount() |
|
118 { |
|
119 iFreeListLock.Wait(); |
|
120 TInt freeCount = iFreeListCount; |
|
121 iFreeListLock.Signal(); |
|
122 return freeCount; |
|
123 } |
|
124 #endif // SYMBIAN_ZEROCOPY_BUF_FREELIST |
|
125 |
|
126 TInt CSystemSharedBufPool::AllocOverflow(RCommsBufQ& aBufQ, TInt aSize) |
|
127 { |
|
128 TInt allocated = 0; |
|
129 |
|
130 #ifdef SYMBIAN_ZEROCOPY_BUF_FREELIST |
|
131 allocated = AllocFromFreeList(aBufQ, aSize); |
|
132 // See comment in AllocFromFreeList. RCommsBufQ::Transfer adjust the |
|
133 // end of the commsbuf. We have to do this in our algorithm when we allocate from |
|
134 // the system shared pool. To avoid conflicts mark the size as 0. |
|
135 aSize = (allocated > aSize) ? 0 : (aSize - allocated); |
|
136 #endif |
|
137 |
|
138 while(aSize > 0) |
|
139 { |
|
140 RCommsBuf* buf = Alloc(); |
|
141 if(buf) |
|
142 { |
|
143 aBufQ.Append(buf); |
|
144 aSize -= buf->RawSize(); |
|
145 } |
|
146 else |
|
147 { |
|
148 break; |
|
149 } |
|
150 } |
|
151 return aSize; // Return the pending allocation size. |
|
152 } |
|
153 |
|
154 TInt CSystemSharedBufPool::AllocUnderflow(RCommsBufQ& aBufQ, TInt aSize) |
|
155 { |
|
156 TInt allocated = 0; |
|
157 #ifdef SYMBIAN_ZEROCOPY_BUF_FREELIST |
|
158 TInt toAllocate = aSize < BufSize() ? aSize : (aSize - (aSize % BufSize())); |
|
159 allocated = AllocFromFreeList(aBufQ, toAllocate); |
|
160 // See comment in AllocFromFreeList. RCommsBufQ::Transfer adjust the |
|
161 // end of the commsbuf. We have to do this in our algorithm when we allocate from |
|
162 // the system shared pool. To avoid conflicts mark the size as 0. |
|
163 aSize = (allocated > aSize) ? 0 : (aSize - allocated); |
|
164 #endif |
|
165 // Either there was no buffers available in the freelist or we allocated partially from |
|
166 // the freelist. See we have still to allocate. |
|
167 // If we are doing a zero size alloc and freelist doesn't contain |
|
168 // any buffers we have to check whether we did actually allocate or not from the freelist. |
|
169 if((allocated == 0) || (aSize >= BufSize())) |
|
170 { |
|
171 do |
|
172 { |
|
173 RCommsBuf* buf = Alloc(); |
|
174 if(buf) |
|
175 { |
|
176 aBufQ.Prepend(buf); |
|
177 aSize -= buf->RawSize(); |
|
178 } |
|
179 else |
|
180 { |
|
181 break; |
|
182 } |
|
183 }while((aSize - BufSize()) >= 0); |
|
184 } |
|
185 return aSize; |
|
186 } |
|
187 |
|
188 RCommsBuf* CSystemSharedBufPool::Alloc() |
|
189 { |
|
190 RShBuf buf; |
|
191 if(buf.Alloc(iPool) != KErrNone) |
|
192 { |
|
193 return NULL; |
|
194 } |
|
195 TUint8* metaStart = buf.Ptr() + BufSize(); |
|
196 return new(metaStart) RCommsBuf(-BufSize(), BufSize(), buf.Handle(), Id()); |
|
197 } |
|
198 |
|
199 void CSystemSharedBufPool::Free(RCommsBuf* aBuf) |
|
200 { |
|
201 TBool released = EFalse; |
|
202 #ifdef SYMBIAN_ZEROCOPY_BUF_FREELIST |
|
203 released = ReleaseToFreeList(aBuf); |
|
204 #endif |
|
205 if(!released) |
|
206 { |
|
207 RShBuf buf; |
|
208 buf.SetHandle(aBuf->Handle()); |
|
209 buf.Close(); |
|
210 } |
|
211 } |
|
212 |
|
213 TInt CSystemSharedBufPool::Compare(const CSystemSharedBufPool& aLhs, const CSystemSharedBufPool& aRhs) |
|
214 { |
|
215 if(aLhs.BufSize() == aRhs.BufSize()) |
|
216 { |
|
217 return 0; |
|
218 } |
|
219 else if(aLhs.BufSize() > aRhs.BufSize()) |
|
220 { |
|
221 return -1; |
|
222 } |
|
223 else |
|
224 { |
|
225 return 1; |
|
226 } |
|
227 } |
|
228 |
|
229 |
|
230 // ----------------------------------------------------------------------------------------------- |
|
231 |
|
232 T3StageAllocator::T3StageAllocator(RPointerArray<CSystemSharedBufPool>& aPools, TInt aSize, TInt aMinSize, TInt aMaxSize) |
|
233 : iPools(aPools), |
|
234 iSize(aSize), |
|
235 iBiggestPoolIndex(KErrNotFound), |
|
236 iSmallestPoolIndex(KErrNotFound), |
|
237 iMarkedPoolIndex(KErrNotFound), |
|
238 iZeroBufSize(iSize == 0) |
|
239 { |
|
240 Init(aMinSize, aMaxSize); |
|
241 } |
|
242 |
|
243 RCommsBuf* T3StageAllocator::Do() |
|
244 { |
|
245 // If the criteria didn't match we have nothing to allocate |
|
246 if(iBiggestPoolIndex == KErrNotFound || iSmallestPoolIndex == KErrNotFound) |
|
247 { |
|
248 return NULL; |
|
249 } |
|
250 |
|
251 // Note that our pools are ordered from big to small. |
|
252 if(iSize == 0) |
|
253 { |
|
254 // Note. This is a special case. If the size requested is 0 then we allocate smallest |
|
255 // buffer as possible provided the availability of buffers on the pools. |
|
256 ZeroSizedAlloc(); |
|
257 } |
|
258 else |
|
259 { |
|
260 ForwardAlloc1stStage(); |
|
261 BackwardAlloc2ndStage(); |
|
262 // We have to do the 3rd stage allocation only if we set the iIntermediatePoolIndex. |
|
263 // The setting is happening at the end of 1st stage, if further allocation is required |
|
264 // in the 2nd and probably in the 3rd stage |
|
265 if(iMarkedPoolIndex != KErrNotFound) |
|
266 { |
|
267 // We already traversed till the iIntermediatePoolIndex so we increment the |
|
268 // index by 1 |
|
269 ++iMarkedPoolIndex; |
|
270 ForwardAlloc3rdStage(); |
|
271 } |
|
272 } |
|
273 // Check allocation is partially failed |
|
274 if (iSize > 0 && (!iBufQ.IsEmpty())) |
|
275 { |
|
276 iBufQ.Free(); |
|
277 } |
|
278 |
|
279 // Adjust the end of the commsbuf if we hit with -ve size. Happens when the request buffer size |
|
280 // is less than the actual pool buffer size |
|
281 if(iSize < 0) |
|
282 { |
|
283 iBufQ.Last()->AdjustDataEnd(iSize); |
|
284 } |
|
285 return iBufQ.First(); |
|
286 } |
|
287 |
|
288 void T3StageAllocator::Init(TInt aMinSize, TInt aMaxSize) |
|
289 { |
|
290 TInt poolCount = iPools.Count(); |
|
291 TInt index = 0; |
|
292 |
|
293 while(index < poolCount) |
|
294 { |
|
295 CSystemSharedBufPool* pool = iPools[index]; |
|
296 if((pool->BufSize() >= aMinSize) && (pool->BufSize() <= aMaxSize)) |
|
297 { |
|
298 if(iBiggestPoolIndex == KErrNotFound) |
|
299 { |
|
300 iBiggestPoolIndex = index; |
|
301 } |
|
302 |
|
303 iSmallestPoolIndex = index; |
|
304 } |
|
305 ++index; |
|
306 } |
|
307 iMarkedPoolIndex = iSmallestPoolIndex; |
|
308 } |
|
309 |
|
310 void T3StageAllocator::ForwardAlloc1stStage() |
|
311 { |
|
312 // We are going to do a forward traversal on the array. |
|
313 TInt traversalIndex = iBiggestPoolIndex; |
|
314 while(iSize > 0 && traversalIndex <= iSmallestPoolIndex) |
|
315 { |
|
316 CSystemSharedBufPool* currentPool = iPools[traversalIndex]; |
|
317 TInt nextPoolSize = 0; |
|
318 if(traversalIndex + 1 <= iSmallestPoolIndex) |
|
319 { |
|
320 nextPoolSize = iPools[traversalIndex + 1]->BufSize(); |
|
321 } |
|
322 |
|
323 // If we still need to allocate then check whether the next pool buffers size |
|
324 // smaller than that we needed. |
|
325 if(nextPoolSize < iSize) |
|
326 { |
|
327 // Yes? Force an allocation with the current pool. |
|
328 TInt remains = currentPool->AllocUnderflow(iBufQ, iSize); |
|
329 if(remains == iSize) |
|
330 { |
|
331 iMarkedPoolIndex = traversalIndex; |
|
332 break; |
|
333 } |
|
334 iSize = remains; |
|
335 } |
|
336 else |
|
337 { |
|
338 ++traversalIndex; |
|
339 } |
|
340 } |
|
341 } |
|
342 |
|
343 void T3StageAllocator::BackwardAlloc2ndStage() |
|
344 { |
|
345 // We are going to do a backward traversal on the array. |
|
346 TInt traversalIndex = iMarkedPoolIndex - 1; |
|
347 |
|
348 // Reverse allocation allocates from any pool that has the buffers |
|
349 while((iSize > 0) && iBiggestPoolIndex <= traversalIndex) |
|
350 { |
|
351 iSize = iPools[traversalIndex]->AllocUnderflow(iBufQ, iSize); |
|
352 --traversalIndex; |
|
353 } |
|
354 } |
|
355 |
|
356 void T3StageAllocator::ForwardAlloc3rdStage() |
|
357 { |
|
358 // We are going to do a forward traversal on the array |
|
359 // from the marked pool index position. |
|
360 TInt traversalIndex = iMarkedPoolIndex; |
|
361 |
|
362 while(iSize > 0 && traversalIndex <= iSmallestPoolIndex) |
|
363 { |
|
364 iSize = iPools[traversalIndex]->AllocOverflow(iBufQ, iSize); |
|
365 ++traversalIndex; |
|
366 } |
|
367 } |
|
368 |
|
369 void T3StageAllocator::ZeroSizedAlloc() |
|
370 { |
|
371 // We are going to do a backward traversal on the array. |
|
372 TInt traversalIndex = iMarkedPoolIndex; |
|
373 // We need only one buffer. |
|
374 while(iBufQ.IsEmpty() && iBiggestPoolIndex <= traversalIndex) |
|
375 { |
|
376 iSize = iPools[traversalIndex]->AllocUnderflow(iBufQ, iSize); |
|
377 --traversalIndex; |
|
378 } |
|
379 } |
|
380 |
|
381 // ----------------------------------------------------------------------------------------------- |
|
382 |
|
383 MCommsBufPondIntf* CSystemSharedBufPond::New(RArray <TCommsBufPoolCreateInfo>& aPoolInfo) |
|
384 { |
|
385 CSystemSharedBufPond* self = new CSystemSharedBufPond; |
|
386 if(self) |
|
387 { |
|
388 if(self->Construct(aPoolInfo) != KErrNone) |
|
389 { |
|
390 delete self; |
|
391 self = NULL; |
|
392 } |
|
393 } |
|
394 return self; |
|
395 } |
|
396 |
|
397 TInt CSystemSharedBufPond::Construct(RArray <TCommsBufPoolCreateInfo>& aPoolInfo) |
|
398 { |
|
399 for (TInt i = 0; i < aPoolInfo.Count(); ++i) |
|
400 { |
|
401 CSystemSharedBufPool* commsPoolInfo = CSystemSharedBufPool::New(*this, aPoolInfo[i]); |
|
402 TInt err = (commsPoolInfo == NULL) ? KErrNoMemory : iPools.Append(commsPoolInfo); |
|
403 if(err != KErrNone) |
|
404 { |
|
405 return err; |
|
406 } |
|
407 } |
|
408 // Sort the pool from bigger to small order |
|
409 TLinearOrder<CSystemSharedBufPool> order(CSystemSharedBufPool::Compare); |
|
410 iPools.Sort(order); |
|
411 |
|
412 iAsyncAlloc = CSystemSharedAsyncAlloc::New(iPools); |
|
413 if(!iAsyncAlloc) |
|
414 { |
|
415 return KErrNoMemory; |
|
416 } |
|
417 return KErrNone; |
|
418 } |
|
419 |
|
420 CSystemSharedBufPond::~CSystemSharedBufPond() |
|
421 { |
|
422 iPools.ResetAndDestroy(); |
|
423 iPools.Close(); |
|
424 delete iAsyncAlloc; |
|
425 } |
|
426 |
|
427 // From MCommsBufManagerIntf |
|
428 RCommsBuf* CSystemSharedBufPond::FromHandle(TInt aHandle) |
|
429 { |
|
430 RShBuf buf; |
|
431 buf.SetHandle(aHandle); |
|
432 TInt bufSize = buf.Size() - sizeof(RCommsBuf); |
|
433 TUint8* metaStart = buf.Ptr() + bufSize; |
|
434 return new(metaStart)RCommsBuf(); |
|
435 } |
|
436 |
|
437 RCommsBuf* CSystemSharedBufPond::Alloc(TInt aSize, TInt aMinBufSize, TInt aMaxBufSize) |
|
438 { |
|
439 // check args |
|
440 // - regarding use of TInt instead of TUint, refer comments in CMBufPoolManager::AddL |
|
441 __ASSERT_ALWAYS(aSize >= 0, CommsBuf::Panic(EMBuf_SillyAlloc)); |
|
442 __ASSERT_DEBUG(aMinBufSize >= 0, CommsBuf::Panic(EMBuf_NegativeMinMBufSize)); |
|
443 __ASSERT_DEBUG(aMaxBufSize >= 0, CommsBuf::Panic(EMBuf_NegativeMaxMBufSize)); |
|
444 __ASSERT_DEBUG(aMaxBufSize >= aMinBufSize, CommsBuf::Panic(EMBuf_MinExceedsMaxMBufSize)); |
|
445 |
|
446 |
|
447 T3StageAllocator allocator(iPools, aSize, aMinBufSize, aMaxBufSize); |
|
448 return allocator.Do(); |
|
449 } |
|
450 |
|
451 TInt CSystemSharedBufPond::BytesAvailable() const |
|
452 { |
|
453 TInt totalBytesAvbl = 0; |
|
454 TInt poolsCount = iPools.Count(); |
|
455 for (TInt i = 0; i < poolsCount; ++i) |
|
456 { |
|
457 totalBytesAvbl += (iPools[i]->Pool().FreeCount() * iPools[i]->BufSize()); |
|
458 #ifdef SYMBIAN_ZEROCOPY_BUF_FREELIST |
|
459 totalBytesAvbl += (iPools[i]->FreeListCount() * iPools[i]->BufSize()); |
|
460 #endif // SYMBIAN_ZEROCOPY_BUF_FREELIST |
|
461 } |
|
462 return totalBytesAvbl; |
|
463 } |
|
464 |
|
465 TInt CSystemSharedBufPond::BytesAvailable(TInt aSize) const |
|
466 { |
|
467 TInt totalBytesAvbl = 0; |
|
468 TInt poolsCount = iPools.Count(); |
|
469 for (TInt i = 0; i < poolsCount; ++i) |
|
470 { |
|
471 if(iPools[i]->BufSize() == aSize) |
|
472 { |
|
473 totalBytesAvbl = (iPools[i]->Pool().FreeCount() * iPools[i]->BufSize()); |
|
474 #ifdef SYMBIAN_ZEROCOPY_BUF_FREELIST |
|
475 totalBytesAvbl += (iPools[i]->FreeListCount() * iPools[i]->BufSize()); |
|
476 #endif // SYMBIAN_ZEROCOPY_BUF_FREELIST |
|
477 break; |
|
478 } |
|
479 } |
|
480 return totalBytesAvbl; |
|
481 } |
|
482 |
|
483 TInt CSystemSharedBufPond::NextBufSize(TInt aSize) const |
|
484 { |
|
485 TInt poolsCount = iPools.Count() - 1; |
|
486 for (TInt i = poolsCount; i >= 0; --i) |
|
487 { |
|
488 if (iPools[i]->BufSize() > aSize) |
|
489 { |
|
490 return iPools[i]->BufSize(); |
|
491 } |
|
492 } |
|
493 return KErrNotFound; |
|
494 } |
|
495 |
|
496 TInt CSystemSharedBufPond::LargestBufSize() const |
|
497 { |
|
498 return iPools[0]->BufSize(); |
|
499 } |
|
500 |
|
501 void CSystemSharedBufPond::StartRequest(CCommsBufAsyncRequest& aRequest) |
|
502 { |
|
503 iAsyncAlloc->StartRequest(aRequest); |
|
504 } |
|
505 |
|
506 void CSystemSharedBufPond::CancelRequest(CCommsBufAsyncRequest& aRequest) |
|
507 { |
|
508 iAsyncAlloc->CancelRequest(aRequest); |
|
509 } |
|
510 |
|
511 void CSystemSharedBufPond::Free(RCommsBuf* aBuf) |
|
512 { |
|
513 while(aBuf != NULL) |
|
514 { |
|
515 RCommsBuf* nextBuf = aBuf->Next(); |
|
516 aBuf->SetNext(NULL); |
|
517 CSystemSharedBufPool* pool = static_cast<CSystemSharedBufPool*>(aBuf->Pool()); |
|
518 pool->Free(aBuf); |
|
519 aBuf = nextBuf; |
|
520 } |
|
521 } |
|
522 |
|
523 void CSystemSharedBufPond::SetContext() |
|
524 { |
|
525 } |
|
526 |
|
527 void CSystemSharedBufPond::Release(RLibrary& /*aLib*/) |
|
528 { |
|
529 delete this; |
|
530 } |
|
531 |
|
532 MCommsBufPondDbg& CSystemSharedBufPond::CommsBufPondDbg() |
|
533 { |
|
534 return *this; |
|
535 } |
|
536 |
|
537 RCommsBuf* CSystemSharedBufPond::__DbgBufChain() |
|
538 { |
|
539 return NULL; |
|
540 } |
|
541 |
|
542 RCommsBuf* CSystemSharedBufPond::__DbgBufChain(TUint /* aBufSize */) |
|
543 { |
|
544 return NULL; |
|
545 } |
|
546 |
|
547 void CSystemSharedBufPond::__DbgSetPoolLimit(TInt /* aCount */) |
|
548 { |
|
549 |
|
550 } |
|
551 |
|
552 void CSystemSharedBufPond::__DbgSetPoolLimit(TInt /* aCount */, TUint /* aBufSize */) |
|
553 { |
|
554 |
|
555 } |
|
556 |
|
557 void CSystemSharedBufPond::__DbgSetFailAfter(TInt /* aCount */) |
|
558 { |
|
559 |
|
560 } |
|
561 |
|
562 TUint CSystemSharedBufPond::__DbgGetBufSpace() |
|
563 { |
|
564 return 0; |
|
565 } |
|
566 |
|
567 TUint CSystemSharedBufPond::__DbgGetBufSpace(TUint /* aBufSize */) |
|
568 { |
|
569 return 0; |
|
570 } |
|
571 |
|
572 TUint CSystemSharedBufPond::__DbgGetBufTotal() |
|
573 { |
|
574 return 0; |
|
575 } |
|
576 |
|
577 TUint CSystemSharedBufPond::__DbgGetBufTotal(TUint /* aMufSize */) |
|
578 { |
|
579 return 0; |
|
580 } |
|
581 |
|
582 TInt CSystemSharedBufPond::__DbgGetHeapSize() |
|
583 { |
|
584 return 0; |
|
585 } |
|
586 |
|
587 |
|
588 /** |
|
589 @purpose Writes flattened pond structure to a descriptor for transfer to a commsbufs aware driver |
|
590 @param aStore Descriptor in to which the pond structure is to be flattened |
|
591 */ |
|
592 TInt CSystemSharedBufPond::Store(TDes8& aStore) const |
|
593 { |
|
594 // todo_cdg needs to be fixed as assumes alignment of TDes8 which need not be word aligned at all |
|
595 // Need enough space to store the max number of pools |
|
596 if(aStore.Length() < sizeof(TCommsPond)) |
|
597 { |
|
598 #ifdef _DEBUG |
|
599 CommsBuf::Panic(EMBuf_InsufficientSpaceToStorePond); |
|
600 #endif |
|
601 return KErrArgument; |
|
602 } |
|
603 |
|
604 // Map basic pond structure on top of flat buffer we are writing to |
|
605 TCommsPond* pond = const_cast<TCommsPond*>(reinterpret_cast<const TCommsPond*>(aStore.Ptr())); |
|
606 |
|
607 TInt numPools = iPools.Count(); |
|
608 for(TInt i = 0; i < numPools; i++) |
|
609 { |
|
610 CSystemSharedBufPool* pool = iPools[i]; |
|
611 RShPool shPool = pool->Pool(); |
|
612 pond->iPoolRecords[i] = TPoolRecord(shPool.Handle(), (TInt)pool, pool->BufSize()); |
|
613 } |
|
614 pond->iNumPools = numPools; |
|
615 return KErrNone; |
|
616 } |
|
617 |