|
1 // Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of "Eclipse Public License v1.0" |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: Implementation of system shared buffer allocation and buffer |
|
14 // management. |
|
15 // |
|
16 |
|
17 #ifndef __SYSTEMSHAREDBUFS_H__ |
|
18 #define __SYSTEMSHAREDBUFS_H__ |
|
19 |
|
20 #include "commsbufpondintf.h" |
|
21 #include "commsbufponddbg.h" |
|
22 #include "commsbufpool.h" |
|
23 #include "systemsharedasyncalloc.h" |
|
24 #include "commsbufq.h" |
|
25 #include "commsbufponddbg.h" |
|
26 #include <e32shbuf.h> |
|
27 |
|
28 class TCommsBufPoolCreateInfo; |
|
29 class RCommsBuf; |
|
30 class RShPool; |
|
31 class MCommsBufPondIntf; |
|
32 /** |
|
33 |
|
34 Representation of a single system shared pool.Allocates/free from the system shared pool or from |
|
35 the freelist if enabled) |
|
36 |
|
37 @internalTechnology |
|
38 |
|
39 */ |
|
40 NONSHARABLE_CLASS(CSystemSharedBufPool) : public CCommsBufPool |
|
41 { |
|
42 public: |
|
43 static CSystemSharedBufPool* New(MCommsBufPondIntf& aPondIntf, const TCommsBufPoolCreateInfo& aCreateInfo); |
|
44 |
|
45 #ifdef SYMBIAN_ZEROCOPY_BUF_FREELIST |
|
46 TInt FreeListCount(); |
|
47 #endif // SYMBIAN_ZEROCOPY_BUF_FREELIST |
|
48 |
|
49 ~CSystemSharedBufPool(); |
|
50 |
|
51 TInt AllocUnderflow(RCommsBufQ& aBufQ, TInt aSize); |
|
52 TInt AllocOverflow(RCommsBufQ& aBufQ, TInt aSize); |
|
53 |
|
54 RCommsBuf* Alloc(); |
|
55 void Free(RCommsBuf* aBuf); |
|
56 inline RShPool Pool() const; |
|
57 |
|
58 static TInt Compare(const CSystemSharedBufPool& aLhs, const CSystemSharedBufPool& aRhs); |
|
59 |
|
60 private: |
|
61 inline CSystemSharedBufPool(MCommsBufPondIntf& aPondIntf, TInt aSize); |
|
62 TInt Construct(const TCommsBufPoolCreateInfo& aCreateInfo); |
|
63 |
|
64 #ifdef SYMBIAN_ZEROCOPY_BUF_FREELIST |
|
65 inline CSystemSharedBufPool(MCommsBufPondIntf& aPondIntf, TInt aSize, TInt aFreeListCount); |
|
66 TInt AllocFromFreeList(RCommsBufQ& BufQ, TInt aSize); |
|
67 TBool ReleaseToFreeList(RCommsBuf* aBuf); |
|
68 #endif // SYMBIAN_ZEROCOPY_BUF_FREELIST |
|
69 |
|
70 private: |
|
71 RShPool iPool; |
|
72 #ifdef SYMBIAN_ZEROCOPY_BUF_FREELIST |
|
73 RWorkerLock iFreeListLock; |
|
74 RCommsBufQ iFreeList; |
|
75 TInt iMaxFreeListCount; |
|
76 TInt iFreeListCount; |
|
77 #endif // SYMBIAN_ZEROCOPY_BUF_FREELIST |
|
78 }; |
|
79 |
|
80 /** |
|
81 Implements the 3 stage buffer allocation algorithm using the system wide shared buffer APIs or |
|
82 from the freelist (if enabled) |
|
83 |
|
84 Description of the 3 stage allocator. |
|
85 |
|
86 Requirements: |
|
87 1/ The algorithm MUST try to allocate from the pool that has bigger or equal size buffers in |
|
88 preference to the pools that has smaller size buffers ie; |
|
89 2/ The algorithm MUST return fewest bytes of memory that satisfy the request and doesn't conflict |
|
90 with 1/ |
|
91 3/ More than one buffer may be allocated and linked, if the requested buffer size doesn't directly |
|
92 satisfy with the configured buffer sizes. |
|
93 |
|
94 Input parameters: |
|
95 Size - Requested total size |
|
96 Minimum Size - The size that the allocated buffer(s) must atleast have |
|
97 Maximum Size - The size that the allocated buffer(s) must not exceed |
|
98 |
|
99 |
|
100 Working Model: |
|
101 The algorithm relies on the big to small order. If the order is not obeyed/adhered the algorithm will |
|
102 fail to allocate the buffers from the right pools and more buffers maybe allocated than needed.The |
|
103 pools are sorted during initialization time. |
|
104 |
|
105 The first step in the algorithm is to clip the pool array position based on the given min and max size. |
|
106 Clipping is done to ensure that allocation happens from the pools that satisfies the given min and |
|
107 max condition, and no further checking is needed on the pools to see whether the pool buffer size |
|
108 satisfies the min and max values. Once the clipping is done the algorithm works in 4 stages. |
|
109 |
|
110 The following terms are used in the description of the algorithm. The terms should be read as: |
|
111 biggest pool --- The pool that has the bigger buffer size, relative to the given max value |
|
112 smallest pool --- The pool that has the smallest buffer size, relative to the given min value |
|
113 |
|
114 The reader should note that there is no gurantee that the requested allocation size will be exactly or |
|
115 nearly matching to the pool sizes and this applies to each stage that operatea on the allocation size. |
|
116 |
|
117 The first stage execution depends on the input parameters and lower and upper index. Subsequent stages |
|
118 exectuion depend on the outcome of the prior stage. |
|
119 |
|
120 The 4 stages are: |
|
121 1/ Traverse the pool array forward from the biggest pool index till the traversal reaches the smallest |
|
122 pool index. When the requested size is greater than the buffer size of the "next" pool allocate as much |
|
123 as possible from the "current" pool and decrement the requested size if there is an allocation. If there |
|
124 is no allocation happens from the "current" pool mark the "current" pool index and exit the stage. |
|
125 At this point, |
|
126 a/ We allocated completely as requested or |
|
127 b/ We would have been passed through and didn't allocate from the bigger pools that may have buffer for |
|
128 allocation in order to satisfy the Credo 2/. But we MUST satisfy the Credo 1/ and in order to satisfy the |
|
129 Credo 1/ we need to traverse back from the point where we stopped the traversal, hence need to mark the |
|
130 "current" pool index. |
|
131 2/ If the first stage allocation cannot complete due to the non-availability of buffer in the pools or with |
|
132 1.b/ traverse the array backward starting from the marked pool index till the traversal reaches the biggest |
|
133 pool index. Allocate from any pool where buffers are available by satisfying Credo 1/. |
|
134 3/ If the second stage allocation cannot complete due to the non-availability of buffers then we have to |
|
135 increment the marked pool index by 1 as the marked pool has been checked for buffer availability on the 2nd |
|
136 stage. We have the following scenario: |
|
137 a/ The pending allocation size (maybe partial or the size "actually" requested)is greater than the marked |
|
138 pool index buffer size |
|
139 b/ Allocation MUST satisfy the Credo 1/ |
|
140 |
|
141 Traverse the array forward from the marked pool index till the traversal reaches the smallest pool index. |
|
142 Allocate from any pool where buffers are available. |
|
143 |
|
144 Special condition: |
|
145 If the requested size is 0 we will allocate from the pool that has the smallest buffer size, provided the |
|
146 availability of buffers in the pool. The zero sized allocation takes a different path (using ZeroSizedAlloc Fn.) |
|
147 and does not run through the stages as described above. |
|
148 |
|
149 Reasoning: |
|
150 |
|
151 We MUST satisfy the requirement 1 and we SHOULD try to achieve the requirement 2 then we have the following |
|
152 that we encounter during allocation |
|
153 |
|
154 1/ No gurantee that we get buffers from the pool that has the best size for the allocation. |
|
155 2/ We might skip a pool to identify the more best size and we may not be able to allocate from that pool |
|
156 due to the non-availablity of the buffers. We would be needing to traverse back on the pool array. |
|
157 3/ No gurantee that we can allocate we traverse back because some other threads woule have been executed inbetween |
|
158 and allocate the buffers and the pool might became empty. and vice versa. |
|
159 |
|
160 Drawbacks: |
|
161 |
|
162 1/ More looping happens with the algorithm on certain cases when the bigger size pools became empty. |
|
163 |
|
164 Conclusion: |
|
165 1/ Couple of more loops on certain cases does not add much problems as the total no. of elements in the pools |
|
166 are expected to be ~10 hence does not cause much problems in terms of performance. |
|
167 2/ All depends on how the pool buffer sizes are configured. It is expected that the smaller size buffer numbers |
|
168 in the pool will be lesser than bigger sizes. So most time we get good allocation. |
|
169 3/ More problems are on the allocation of system shared buffer from the Kernal as the algorithm has to allocate |
|
170 one by one and ask the pool each time. If there is no buffers available on the requested pool there will be more |
|
171 system calls |
|
172 |
|
173 Based on the above arguments the conclusion is that algorithm (and the couple of more loops) doesn't cause much |
|
174 problems |
|
175 |
|
176 |
|
177 Future & suggestions: |
|
178 It maybe possible to fine tune the algorithm. But any futuer enhancements should consider |
|
179 1/ the min and max conditions |
|
180 2/ Non-deterministic configured pool sizes |
|
181 |
|
182 One of the thing that would definitly improve the algorithm is to request the kernel to allocate a series of |
|
183 RShBuf's. This would reduce the no. of system calls that is currently happening on the system. The current freelist |
|
184 implementation tries to address this issue to an extent. |
|
185 |
|
186 @internalTechnology |
|
187 */ |
|
188 |
|
189 class T3StageAllocator |
|
190 { |
|
191 public: |
|
192 T3StageAllocator(RPointerArray<CSystemSharedBufPool>& aPools, TInt aSize, TInt aMinSize, TInt aMaxSize); |
|
193 RCommsBuf* Do(); |
|
194 |
|
195 private: |
|
196 |
|
197 void Init(TInt aMinSize, TInt aMaxSize); |
|
198 |
|
199 void ForwardAlloc1stStage(); |
|
200 void BackwardAlloc2ndStage(); |
|
201 void ForwardAlloc3rdStage(); |
|
202 |
|
203 void ZeroSizedAlloc(); |
|
204 |
|
205 private: |
|
206 RPointerArray<CSystemSharedBufPool>& iPools; |
|
207 TInt iSize; |
|
208 TInt iBiggestPoolIndex; |
|
209 TInt iSmallestPoolIndex; |
|
210 TInt iMarkedPoolIndex; |
|
211 RCommsBufQ iBufQ; |
|
212 TBool iZeroBufSize; |
|
213 }; |
|
214 |
|
215 /** |
|
216 Implements the buffer management using the system wide shared buffers |
|
217 |
|
218 @internalTechnology |
|
219 */ |
|
220 NONSHARABLE_CLASS(CSystemSharedBufPond) : public CBase, public MCommsBufPondIntf, public MCommsBufPondDbg |
|
221 { |
|
222 friend class RCommsBufPond; |
|
223 |
|
224 public: |
|
225 static MCommsBufPondIntf* New(RArray <TCommsBufPoolCreateInfo>& aPoolInfo); |
|
226 ~CSystemSharedBufPond(); |
|
227 |
|
228 private: |
|
229 |
|
230 TInt Construct(RArray <TCommsBufPoolCreateInfo>& aPoolInfo); |
|
231 |
|
232 // From MCommsBufPondIntf |
|
233 virtual RCommsBuf* Alloc(TInt aSize, TInt aMinBufSize, TInt aMaxBufSize); |
|
234 virtual RCommsBuf* FromHandle(TInt aHandle); |
|
235 virtual TInt Store(TDes8& aStore) const; |
|
236 virtual void Free(RCommsBuf* aBuf); |
|
237 virtual TInt BytesAvailable() const; |
|
238 virtual TInt BytesAvailable(TInt aSize) const; |
|
239 virtual TInt NextBufSize(TInt aSize) const; |
|
240 virtual TInt LargestBufSize() const; |
|
241 virtual void StartRequest(CCommsBufAsyncRequest& aRequest); |
|
242 virtual void CancelRequest(CCommsBufAsyncRequest& aRequest); |
|
243 virtual void SetContext(); |
|
244 virtual void Release(RLibrary& aLib); |
|
245 virtual MCommsBufPondDbg& CommsBufPondDbg(); |
|
246 |
|
247 // From MCommsBufPondDbg |
|
248 virtual RCommsBuf* __DbgBufChain(); |
|
249 virtual RCommsBuf* __DbgBufChain(TUint aBufSize); |
|
250 virtual void __DbgSetPoolLimit(TInt aCount); |
|
251 virtual void __DbgSetPoolLimit(TInt aCount, TUint aBufSize); |
|
252 virtual void __DbgSetFailAfter(TInt aCount=0); |
|
253 virtual TUint __DbgGetBufSpace(); |
|
254 virtual TUint __DbgGetBufSpace(TUint aBufSize); |
|
255 virtual TUint __DbgGetBufTotal(); |
|
256 virtual TUint __DbgGetBufTotal(TUint aMufSize); |
|
257 virtual TInt __DbgGetHeapSize(); |
|
258 |
|
259 private: |
|
260 RPointerArray<CSystemSharedBufPool> iPools; |
|
261 CSystemSharedAsyncAlloc* iAsyncAlloc; |
|
262 }; |
|
263 |
|
264 #include "systemsharedbufs.inl" |
|
265 #endif // __SYSTEMSHAREDBUFS_H__ |
|
266 |
|
267 |