149
|
1 |
// Copyright (c) 1994-2009 Nokia Corporation and/or its subsidiary(-ies).
|
|
2 |
// All rights reserved.
|
|
3 |
// This component and the accompanying materials are made available
|
|
4 |
// under the terms of the License "Eclipse Public License v1.0"
|
|
5 |
// which accompanies this distribution, and is available
|
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
|
|
7 |
//
|
|
8 |
// Initial Contributors:
|
|
9 |
// Nokia Corporation - initial contribution.
|
|
10 |
//
|
|
11 |
// Contributors:
|
|
12 |
//
|
|
13 |
// Description:
|
|
14 |
// kernel\eka\common\heap_hybrid.cpp
|
|
15 |
//
|
|
16 |
// Uses malloc (aka dlmalloc) written by Doug Lea version 2.8.4
|
|
17 |
//
|
|
18 |
|
|
19 |
#include "common.h"
|
|
20 |
#ifdef __KERNEL_MODE__
|
|
21 |
#include <kernel/kern_priv.h>
|
|
22 |
#endif
|
|
23 |
#include "dla.h"
|
|
24 |
#ifndef __KERNEL_MODE__
|
|
25 |
#include "slab.h"
|
|
26 |
#include "page_alloc.h"
|
|
27 |
#endif
|
|
28 |
#include "heap_hybrid.h"
|
|
29 |
|
|
30 |
// enables btrace code compiling into
|
|
31 |
#define ENABLE_BTRACE
|
|
32 |
|
|
33 |
// if non zero this causes the iSlabs to be configured only when the chunk size exceeds this level
|
|
34 |
#define DELAYED_SLAB_THRESHOLD (64*1024) // 64KB seems about right based on trace data
|
|
35 |
#define SLAB_CONFIG 0xabe // Use slabs of size 48, 40, 32, 24, 20, 16, 12, and 8 bytes
|
|
36 |
|
|
37 |
#ifdef _DEBUG
|
|
38 |
#define __SIMULATE_ALLOC_FAIL(s) if (CheckForSimulatedAllocFail()) {s}
|
|
39 |
#define __ALLOC_DEBUG_HEADER(s) (s += EDebugHdrSize)
|
|
40 |
#define __SET_DEBUG_DATA(p,n,c) (((SDebugCell*)(p))->nestingLevel = (n), ((SDebugCell*)(p))->allocCount = (c))
|
|
41 |
#define __GET_USER_DATA_BFR(p) ((p!=0) ? (TUint8*)(p) + EDebugHdrSize : NULL)
|
|
42 |
#define __GET_DEBUG_DATA_BFR(p) ((p!=0) ? (TUint8*)(p) - EDebugHdrSize : NULL)
|
|
43 |
#define __ZAP_CELL(p) memset( (TUint8*)p, 0xde, (AllocLen(__GET_USER_DATA_BFR(p))+EDebugHdrSize))
|
|
44 |
#define __DEBUG_SAVE(p) TInt dbgNestLevel = ((SDebugCell*)p)->nestingLevel
|
|
45 |
#define __DEBUG_RESTORE(p) if (p) {((SDebugCell*)p)->nestingLevel = dbgNestLevel;}
|
|
46 |
#define __DEBUG_HDR_SIZE EDebugHdrSize
|
|
47 |
#define __REMOVE_DBG_HDR(n) (n*EDebugHdrSize)
|
|
48 |
#define __GET_AVAIL_BLOCK_SIZE(s) ( (s<EDebugHdrSize) ? 0 : s-EDebugHdrSize )
|
|
49 |
#define __UPDATE_ALLOC_COUNT(o,n,c) if (o!=n && n) {((SDebugCell*)n)->allocCount = (c);}
|
|
50 |
#define __INIT_COUNTERS(i) iCellCount=i,iTotalAllocSize=i
|
|
51 |
#define __INCREMENT_COUNTERS(p) iCellCount++, iTotalAllocSize += AllocLen(p)
|
|
52 |
#define __DECREMENT_COUNTERS(p) iCellCount--, iTotalAllocSize -= AllocLen(p)
|
|
53 |
#define __UPDATE_TOTAL_ALLOC(p,s) iTotalAllocSize += (AllocLen(__GET_USER_DATA_BFR(p)) - s)
|
|
54 |
|
|
55 |
#else
|
|
56 |
#define __SIMULATE_ALLOC_FAIL(s)
|
|
57 |
#define __ALLOC_DEBUG_HEADER(s)
|
|
58 |
#define __SET_DEBUG_DATA(p,n,c)
|
|
59 |
#define __GET_USER_DATA_BFR(p) (p)
|
|
60 |
#define __GET_DEBUG_DATA_BFR(p) (p)
|
|
61 |
#define __ZAP_CELL(p)
|
|
62 |
#define __DEBUG_SAVE(p)
|
|
63 |
#define __DEBUG_RESTORE(p)
|
|
64 |
#define __DEBUG_HDR_SIZE 0
|
|
65 |
#define __REMOVE_DBG_HDR(n) 0
|
|
66 |
#define __GET_AVAIL_BLOCK_SIZE(s) (s)
|
|
67 |
#define __UPDATE_ALLOC_COUNT(o,n,c)
|
|
68 |
#define __INIT_COUNTERS(i) iCellCount=i,iTotalAllocSize=i
|
|
69 |
#define __INCREMENT_COUNTERS(p)
|
|
70 |
#define __DECREMENT_COUNTERS(p)
|
|
71 |
#define __UPDATE_TOTAL_ALLOC(p,s)
|
|
72 |
|
|
73 |
#endif
|
|
74 |
|
|
75 |
|
|
76 |
#define MEMORY_MONITORED (iFlags & EMonitorMemory)
|
|
77 |
#define GM (&iGlobalMallocState)
|
|
78 |
#define IS_FIXED_HEAP (iFlags & EFixedSize)
|
|
79 |
#define __INIT_COUNTERS(i) iCellCount=i,iTotalAllocSize=i
|
|
80 |
#define __POWER_OF_2(x) (!((x)&((x)-1)))
|
|
81 |
|
|
82 |
#define __DL_BFR_CHECK(M,P) \
|
|
83 |
if ( MEMORY_MONITORED ) \
|
|
84 |
if ( !IS_ALIGNED(P) || ((TUint8*)(P)<M->iSeg.iBase) || ((TUint8*)(P)>(M->iSeg.iBase+M->iSeg.iSize))) \
|
|
85 |
BTraceContext12(BTrace::EHeap, BTrace::EHeapCorruption, (TUint32)this, (TUint32)P, (TUint32)0), HEAP_PANIC(ETHeapBadCellAddress); \
|
|
86 |
else DoCheckInuseChunk(M, MEM2CHUNK(P))
|
|
87 |
|
|
88 |
#ifndef __KERNEL_MODE__
|
|
89 |
|
|
90 |
#define __SLAB_BFR_CHECK(S,P,B) \
|
|
91 |
if ( MEMORY_MONITORED ) \
|
|
92 |
if ( ((TUint32)P & 0x3) || ((TUint8*)P<iMemBase) || ((TUint8*)(P)>(TUint8*)this)) \
|
|
93 |
BTraceContext12(BTrace::EHeap, BTrace::EHeapCorruption, (TUint32)this, (TUint32)P, (TUint32)S), HEAP_PANIC(ETHeapBadCellAddress); \
|
|
94 |
else DoCheckSlab(S, EPartialFullSlab, P), BuildPartialSlabBitmap(B,S,P)
|
|
95 |
#define __PAGE_BFR_CHECK(P) \
|
|
96 |
if ( MEMORY_MONITORED ) \
|
|
97 |
if ( ((TUint32)P & ((1 << iPageSize)-1)) || ((TUint8*)P<iMemBase) || ((TUint8*)(P)>(TUint8*)this)) \
|
|
98 |
BTraceContext12(BTrace::EHeap, BTrace::EHeapCorruption, (TUint32)this, (TUint32)P, (TUint32)0), HEAP_PANIC(ETHeapBadCellAddress)
|
|
99 |
|
|
100 |
#endif
|
|
101 |
|
|
102 |
#ifdef _MSC_VER
|
|
103 |
// This is required while we are still using VC6 to compile, so as to avoid warnings that cannot be fixed
|
|
104 |
// without having to edit the original Doug Lea source. The 4146 warnings are due to the original code having
|
|
105 |
// a liking for negating unsigned numbers and the 4127 warnings are due to the original code using the RTCHECK
|
|
106 |
// macro with values that are always defined as 1. It is better to turn these warnings off than to introduce
|
|
107 |
// diffs between the original Doug Lea implementation and our adaptation of it
|
|
108 |
#pragma warning( disable : 4146 ) /* unary minus operator applied to unsigned type, result still unsigned */
|
|
109 |
#pragma warning( disable : 4127 ) /* conditional expression is constant */
|
|
110 |
#endif // _MSC_VER
|
|
111 |
|
|
112 |
|
|
113 |
/**
|
|
114 |
@SYMPatchable
|
|
115 |
@publishedPartner
|
|
116 |
@released
|
|
117 |
|
|
118 |
Defines the minimum cell size of a heap.
|
|
119 |
|
|
120 |
The constant can be changed at ROM build time using patchdata OBY keyword.
|
|
121 |
|
|
122 |
@deprecated Patching this constant no longer has any effect.
|
|
123 |
*/
|
|
124 |
#ifdef __X86GCC__ // For X86GCC we dont use the proper data import attribute
|
|
125 |
#undef IMPORT_D // since the constants are not really imported. GCC doesn't
|
|
126 |
#define IMPORT_D // allow imports from self.
|
|
127 |
#endif
|
|
128 |
IMPORT_D extern const TInt KHeapMinCellSize;
|
|
129 |
|
|
130 |
/**
|
|
131 |
@SYMPatchable
|
|
132 |
@publishedPartner
|
|
133 |
@released
|
|
134 |
|
|
135 |
This constant defines the ratio that determines the amount of hysteresis between heap growing and heap
|
|
136 |
shrinking.
|
|
137 |
It is a 32-bit fixed point number where the radix point is defined to be
|
|
138 |
between bits 7 and 8 (where the LSB is bit 0) i.e. using standard notation, a Q8 or a fx24.8
|
|
139 |
fixed point number. For example, for a ratio of 2.0, set KHeapShrinkHysRatio=0x200.
|
|
140 |
|
|
141 |
The heap shrinking hysteresis value is calculated to be:
|
|
142 |
@code
|
|
143 |
KHeapShrinkHysRatio*(iGrowBy>>8)
|
|
144 |
@endcode
|
|
145 |
where iGrowBy is a page aligned value set by the argument, aGrowBy, to the RHeap constructor.
|
|
146 |
The default hysteresis value is iGrowBy bytes i.e. KHeapShrinkHysRatio=2.0.
|
|
147 |
|
|
148 |
Memory usage may be improved by reducing the heap shrinking hysteresis
|
|
149 |
by setting 1.0 < KHeapShrinkHysRatio < 2.0. Heap shrinking hysteresis is disabled/removed
|
|
150 |
when KHeapShrinkHysRatio <= 1.0.
|
|
151 |
|
|
152 |
The constant can be changed at ROM build time using patchdata OBY keyword.
|
|
153 |
*/
|
|
154 |
IMPORT_D extern const TInt KHeapShrinkHysRatio;
|
|
155 |
|
|
156 |
UEXPORT_C TInt RHeap::AllocLen(const TAny* aCell) const
|
|
157 |
{
|
|
158 |
const MAllocator* m = this;
|
|
159 |
return m->AllocLen(aCell);
|
|
160 |
}
|
|
161 |
|
|
162 |
UEXPORT_C TAny* RHeap::Alloc(TInt aSize)
|
|
163 |
{
|
|
164 |
const MAllocator* m = this;
|
|
165 |
return ((MAllocator*)m)->Alloc(aSize);
|
|
166 |
}
|
|
167 |
|
|
168 |
UEXPORT_C void RHeap::Free(TAny* aCell)
|
|
169 |
{
|
|
170 |
const MAllocator* m = this;
|
|
171 |
((MAllocator*)m)->Free(aCell);
|
|
172 |
}
|
|
173 |
|
|
174 |
UEXPORT_C TAny* RHeap::ReAlloc(TAny* aCell, TInt aSize, TInt aMode)
|
|
175 |
{
|
|
176 |
const MAllocator* m = this;
|
|
177 |
return ((MAllocator*)m)->ReAlloc(aCell, aSize, aMode);
|
|
178 |
}
|
|
179 |
|
|
180 |
UEXPORT_C TInt RHeap::DebugFunction(TInt aFunc, TAny* a1, TAny* a2)
|
|
181 |
{
|
|
182 |
const MAllocator* m = this;
|
|
183 |
return ((MAllocator*)m)->DebugFunction(aFunc, a1, a2);
|
|
184 |
}
|
|
185 |
|
|
186 |
UEXPORT_C TInt RHeap::Extension_(TUint aExtensionId, TAny*& a0, TAny* a1)
|
|
187 |
{
|
|
188 |
const MAllocator* m = this;
|
|
189 |
return ((MAllocator*)m)->Extension_(aExtensionId, a0, a1);
|
|
190 |
}
|
|
191 |
|
|
192 |
#ifndef __KERNEL_MODE__
|
|
193 |
|
|
194 |
EXPORT_C TInt RHeap::AllocSize(TInt& aTotalAllocSize) const
|
|
195 |
{
|
|
196 |
const MAllocator* m = this;
|
|
197 |
return m->AllocSize(aTotalAllocSize);
|
|
198 |
}
|
|
199 |
|
|
200 |
EXPORT_C TInt RHeap::Available(TInt& aBiggestBlock) const
|
|
201 |
{
|
|
202 |
const MAllocator* m = this;
|
|
203 |
return m->Available(aBiggestBlock);
|
|
204 |
}
|
|
205 |
|
|
206 |
EXPORT_C void RHeap::Reset()
|
|
207 |
{
|
|
208 |
const MAllocator* m = this;
|
|
209 |
((MAllocator*)m)->Reset();
|
|
210 |
}
|
|
211 |
|
|
212 |
EXPORT_C TInt RHeap::Compress()
|
|
213 |
{
|
|
214 |
const MAllocator* m = this;
|
|
215 |
return ((MAllocator*)m)->Compress();
|
|
216 |
}
|
|
217 |
#endif
|
|
218 |
|
|
219 |
RHybridHeap::RHybridHeap()
|
|
220 |
{
|
|
221 |
// This initialisation cannot be done in RHeap() for compatibility reasons
|
|
222 |
iMaxLength = iChunkHandle = iNestingLevel = 0;
|
|
223 |
iTop = NULL;
|
|
224 |
iFailType = ENone;
|
|
225 |
iTestData = NULL;
|
|
226 |
}
|
|
227 |
|
|
228 |
void RHybridHeap::operator delete(TAny*, TAny*)
|
|
229 |
/**
|
|
230 |
Called if constructor issued by operator new(TUint aSize, TAny* aBase) throws exception.
|
|
231 |
This is dummy as corresponding new operator does not allocate memory.
|
|
232 |
*/
|
|
233 |
{}
|
|
234 |
|
|
235 |
|
|
236 |
#ifndef __KERNEL_MODE__
|
|
237 |
void RHybridHeap::Lock() const
|
|
238 |
/**
|
|
239 |
@internalComponent
|
|
240 |
*/
|
|
241 |
{((RFastLock&)iLock).Wait();}
|
|
242 |
|
|
243 |
|
|
244 |
void RHybridHeap::Unlock() const
|
|
245 |
/**
|
|
246 |
@internalComponent
|
|
247 |
*/
|
|
248 |
{((RFastLock&)iLock).Signal();}
|
|
249 |
|
|
250 |
|
|
251 |
TInt RHybridHeap::ChunkHandle() const
|
|
252 |
/**
|
|
253 |
@internalComponent
|
|
254 |
*/
|
|
255 |
{
|
|
256 |
return iChunkHandle;
|
|
257 |
}
|
|
258 |
|
|
259 |
#else
|
|
260 |
//
|
|
261 |
// This method is implemented in kheap.cpp
|
|
262 |
//
|
|
263 |
//void RHybridHeap::Lock() const
|
|
264 |
/**
|
|
265 |
@internalComponent
|
|
266 |
*/
|
|
267 |
// {;}
|
|
268 |
|
|
269 |
|
|
270 |
|
|
271 |
//
|
|
272 |
// This method is implemented in kheap.cpp
|
|
273 |
//
|
|
274 |
//void RHybridHeap::Unlock() const
|
|
275 |
/**
|
|
276 |
@internalComponent
|
|
277 |
*/
|
|
278 |
// {;}
|
|
279 |
|
|
280 |
|
|
281 |
TInt RHybridHeap::ChunkHandle() const
|
|
282 |
/**
|
|
283 |
@internalComponent
|
|
284 |
*/
|
|
285 |
{
|
|
286 |
return 0;
|
|
287 |
}
|
|
288 |
#endif
|
|
289 |
|
|
290 |
RHybridHeap::RHybridHeap(TInt aChunkHandle, TInt aOffset, TInt aMinLength, TInt aMaxLength, TInt aGrowBy, TInt aAlign, TBool aSingleThread, TBool aDLOnly, TBool aUseAdjust)
|
|
291 |
/**
|
|
292 |
Constructor for a non fixed heap. Unlike the fixed heap, this heap is quite flexible in terms of its minimum and
|
|
293 |
maximum lengths and in that it can use the hybrid allocator if all of its requirements are met.
|
|
294 |
*/
|
|
295 |
: iOffset(aOffset), iChunkSize(aMinLength)
|
|
296 |
{
|
|
297 |
__ASSERT_ALWAYS(iOffset>=0, HEAP_PANIC(ETHeapNewBadOffset));
|
|
298 |
|
|
299 |
iChunkHandle = aChunkHandle;
|
|
300 |
iMinLength = aMinLength;
|
|
301 |
iMaxLength = aMaxLength;
|
|
302 |
|
|
303 |
// If the user has explicitly specified 0 as the aGrowBy value, set it to 1 so that it will be rounded up to the nearst page size
|
|
304 |
if (aGrowBy == 0)
|
|
305 |
aGrowBy = 1;
|
|
306 |
GET_PAGE_SIZE(iPageSize);
|
|
307 |
iGrowBy = _ALIGN_UP(aGrowBy, iPageSize);
|
|
308 |
|
|
309 |
Construct(aSingleThread, aDLOnly, aUseAdjust, aAlign);
|
|
310 |
}
|
|
311 |
|
|
312 |
RHybridHeap::RHybridHeap(TInt aMaxLength, TInt aAlign, TBool aSingleThread)
|
|
313 |
/**
|
|
314 |
Constructor for a fixed heap. We have restrictions in that we have fixed minimum and maximum lengths and cannot grow
|
|
315 |
and we only use DL allocator.
|
|
316 |
*/
|
|
317 |
: iOffset(0), iChunkSize(aMaxLength)
|
|
318 |
{
|
|
319 |
iChunkHandle = NULL;
|
|
320 |
iMinLength = aMaxLength;
|
|
321 |
iMaxLength = aMaxLength;
|
|
322 |
iGrowBy = 0;
|
|
323 |
|
|
324 |
Construct(aSingleThread, ETrue, ETrue, aAlign);
|
|
325 |
}
|
|
326 |
|
|
327 |
TAny* RHybridHeap::operator new(TUint aSize, TAny* aBase) __NO_THROW
|
|
328 |
{
|
|
329 |
__ASSERT_ALWAYS(aSize>=sizeof(RHybridHeap), HEAP_PANIC(ETHeapNewBadSize));
|
|
330 |
RHybridHeap* h = (RHybridHeap*)aBase;
|
|
331 |
h->iBase = ((TUint8*)aBase) + aSize;
|
|
332 |
return aBase;
|
|
333 |
}
|
|
334 |
|
|
335 |
void RHybridHeap::Construct(TBool aSingleThread, TBool aDLOnly, TBool aUseAdjust, TInt aAlign)
|
|
336 |
{
|
|
337 |
iAlign = aAlign ? aAlign : RHybridHeap::ECellAlignment;
|
|
338 |
__ASSERT_ALWAYS((TUint32)iAlign>=sizeof(TAny*) && __POWER_OF_2(iAlign), HEAP_PANIC(ETHeapNewBadAlignment));
|
|
339 |
|
|
340 |
// This initialisation cannot be done in RHeap() for compatibility reasons
|
|
341 |
iTop = NULL;
|
|
342 |
iFailType = ENone;
|
|
343 |
iNestingLevel = 0;
|
|
344 |
iTestData = NULL;
|
|
345 |
|
|
346 |
iHighWaterMark = iMinLength;
|
|
347 |
iAllocCount = 0;
|
|
348 |
iFlags = aSingleThread ? ESingleThreaded : 0;
|
|
349 |
iGrowBy = _ALIGN_UP(iGrowBy, iPageSize);
|
|
350 |
|
|
351 |
if ( iMinLength == iMaxLength )
|
|
352 |
{
|
|
353 |
iFlags |= EFixedSize;
|
|
354 |
aDLOnly = ETrue;
|
|
355 |
}
|
|
356 |
#ifndef __KERNEL_MODE__
|
|
357 |
#ifdef DELAYED_SLAB_THRESHOLD
|
|
358 |
iSlabInitThreshold = DELAYED_SLAB_THRESHOLD;
|
|
359 |
#else
|
|
360 |
iSlabInitThreshold = 0;
|
|
361 |
#endif // DELAYED_SLAB_THRESHOLD
|
|
362 |
iUseAdjust = aUseAdjust;
|
|
363 |
iDLOnly = aDLOnly;
|
|
364 |
#else
|
|
365 |
(void)aUseAdjust;
|
|
366 |
#endif
|
|
367 |
// Initialise suballocators
|
|
368 |
// if DL only is required then it cannot allocate slab or page memory
|
|
369 |
// so these sub-allocators should be disabled. Otherwise initialise with default values
|
|
370 |
if ( aDLOnly )
|
|
371 |
{
|
|
372 |
Init(0, 0);
|
|
373 |
}
|
|
374 |
else
|
|
375 |
{
|
|
376 |
Init(SLAB_CONFIG, 16);
|
|
377 |
}
|
|
378 |
|
|
379 |
#ifdef ENABLE_BTRACE
|
|
380 |
|
|
381 |
TUint32 traceData[4];
|
|
382 |
traceData[0] = iMinLength;
|
|
383 |
traceData[1] = iMaxLength;
|
|
384 |
traceData[2] = iGrowBy;
|
|
385 |
traceData[3] = iAlign;
|
|
386 |
BTraceContextN(BTrace::ETest1, 90, (TUint32)this, 11, traceData, sizeof(traceData));
|
|
387 |
#endif
|
|
388 |
|
|
389 |
}
|
|
390 |
|
|
391 |
#ifndef __KERNEL_MODE__
|
|
392 |
TInt RHybridHeap::ConstructLock(TUint32 aMode)
|
|
393 |
{
|
|
394 |
TBool duplicateLock = EFalse;
|
|
395 |
TInt r = KErrNone;
|
|
396 |
if (!(iFlags & ESingleThreaded))
|
|
397 |
{
|
|
398 |
duplicateLock = aMode & UserHeap::EChunkHeapSwitchTo;
|
|
399 |
r = iLock.CreateLocal(duplicateLock ? EOwnerThread : EOwnerProcess);
|
|
400 |
if( r != KErrNone)
|
|
401 |
{
|
|
402 |
iChunkHandle = 0;
|
|
403 |
return r;
|
|
404 |
}
|
|
405 |
}
|
|
406 |
|
|
407 |
if ( aMode & UserHeap::EChunkHeapSwitchTo )
|
|
408 |
User::SwitchHeap(this);
|
|
409 |
|
|
410 |
iHandles = &iChunkHandle;
|
|
411 |
if (!(iFlags & ESingleThreaded))
|
|
412 |
{
|
|
413 |
// now change the thread-relative chunk/semaphore handles into process-relative handles
|
|
414 |
iHandleCount = 2;
|
|
415 |
if(duplicateLock)
|
|
416 |
{
|
|
417 |
RHandleBase s = iLock;
|
|
418 |
r = iLock.Duplicate(RThread());
|
|
419 |
s.Close();
|
|
420 |
}
|
|
421 |
if (r==KErrNone && (aMode & UserHeap::EChunkHeapDuplicate))
|
|
422 |
{
|
|
423 |
r = ((RChunk*)&iChunkHandle)->Duplicate(RThread());
|
|
424 |
if (r!=KErrNone)
|
|
425 |
iLock.Close(), iChunkHandle=0;
|
|
426 |
}
|
|
427 |
}
|
|
428 |
else
|
|
429 |
{
|
|
430 |
iHandleCount = 1;
|
|
431 |
if (aMode & UserHeap::EChunkHeapDuplicate)
|
|
432 |
r = ((RChunk*)&iChunkHandle)->Duplicate(RThread(), EOwnerThread);
|
|
433 |
}
|
|
434 |
|
|
435 |
return r;
|
|
436 |
}
|
|
437 |
#endif
|
|
438 |
|
|
439 |
void RHybridHeap::Init(TInt aBitmapSlab, TInt aPagePower)
|
|
440 |
{
|
|
441 |
/*Moved code which does initilization */
|
|
442 |
iTop = (TUint8*)this + iMinLength;
|
|
443 |
iBase = Ceiling(iBase, ECellAlignment); // Align iBase address
|
|
444 |
|
|
445 |
__INIT_COUNTERS(0);
|
|
446 |
// memset(&mparams,0,sizeof(mparams));
|
|
447 |
|
|
448 |
InitDlMalloc(iTop - iBase, 0);
|
|
449 |
|
|
450 |
#ifndef __KERNEL_MODE__
|
|
451 |
SlabInit();
|
|
452 |
iSlabConfigBits = aBitmapSlab;
|
|
453 |
if ( iChunkSize > iSlabInitThreshold )
|
|
454 |
{
|
|
455 |
iSlabInitThreshold = KMaxTInt32;
|
|
456 |
SlabConfig(aBitmapSlab); // Delayed slab configuration done
|
|
457 |
}
|
|
458 |
if ( aPagePower )
|
|
459 |
{
|
|
460 |
RChunk chunk;
|
|
461 |
chunk.SetHandle(iChunkHandle);
|
|
462 |
iMemBase = chunk.Base(); // Store base address for paged allocator
|
|
463 |
}
|
|
464 |
|
|
465 |
/*10-1K,11-2K,12-4k,13-8K,14-16K,15-32K,16-64K*/
|
|
466 |
PagedInit(aPagePower);
|
|
467 |
|
|
468 |
#ifdef ENABLE_BTRACE
|
|
469 |
TUint32 traceData[3];
|
|
470 |
traceData[0] = aBitmapSlab;
|
|
471 |
traceData[1] = aPagePower;
|
|
472 |
traceData[2] = GM->iTrimCheck;
|
|
473 |
BTraceContextN(BTrace::ETest1, 90, (TUint32)this, 0, traceData, sizeof(traceData));
|
|
474 |
#endif
|
|
475 |
#else
|
|
476 |
(void)aBitmapSlab;
|
|
477 |
(void)aPagePower;
|
|
478 |
#endif // __KERNEL_MODE__
|
|
479 |
|
|
480 |
}
|
|
481 |
|
|
482 |
|
|
483 |
TInt RHybridHeap::AllocLen(const TAny* aCell) const
|
|
484 |
{
|
|
485 |
aCell = __GET_DEBUG_DATA_BFR(aCell);
|
|
486 |
|
|
487 |
if (PtrDiff(aCell, this) >= 0)
|
|
488 |
{
|
|
489 |
mchunkptr m = MEM2CHUNK(aCell);
|
|
490 |
return CHUNKSIZE(m) - OVERHEAD_FOR(m) - __DEBUG_HDR_SIZE;
|
|
491 |
}
|
|
492 |
#ifndef __KERNEL_MODE__
|
|
493 |
if ( aCell )
|
|
494 |
{
|
|
495 |
if (LowBits(aCell, iPageSize) )
|
|
496 |
return SlabHeaderSize(slab::SlabFor(aCell)->iHeader) - __DEBUG_HDR_SIZE;
|
|
497 |
|
|
498 |
return PagedSize((void*)aCell) - __DEBUG_HDR_SIZE;
|
|
499 |
}
|
|
500 |
#endif
|
|
501 |
return 0; // NULL pointer situation, should PANIC !!
|
|
502 |
}
|
|
503 |
|
|
504 |
#ifdef __KERNEL_MODE__
|
|
505 |
TAny* RHybridHeap::Alloc(TInt aSize)
|
|
506 |
{
|
|
507 |
__CHECK_THREAD_STATE;
|
|
508 |
__ASSERT_ALWAYS((TUint)aSize<(KMaxTInt/2),HEAP_PANIC(ETHeapBadAllocatedCellSize));
|
|
509 |
__SIMULATE_ALLOC_FAIL(return NULL;)
|
|
510 |
Lock();
|
|
511 |
__ALLOC_DEBUG_HEADER(aSize);
|
|
512 |
TAny* addr = DlMalloc(aSize);
|
|
513 |
if ( addr )
|
|
514 |
{
|
|
515 |
// iCellCount++;
|
|
516 |
__SET_DEBUG_DATA(addr, iNestingLevel, ++iAllocCount);
|
|
517 |
addr = __GET_USER_DATA_BFR(addr);
|
|
518 |
__INCREMENT_COUNTERS(addr);
|
|
519 |
memclr(addr, AllocLen(addr));
|
|
520 |
}
|
|
521 |
Unlock();
|
|
522 |
#ifdef ENABLE_BTRACE
|
|
523 |
if (iFlags & ETraceAllocs)
|
|
524 |
{
|
|
525 |
if ( addr )
|
|
526 |
{
|
|
527 |
TUint32 traceData[3];
|
|
528 |
traceData[0] = AllocLen(addr);
|
|
529 |
traceData[1] = aSize - __DEBUG_HDR_SIZE;
|
|
530 |
traceData[2] = 0;
|
|
531 |
BTraceContextN(BTrace::EHeap, BTrace::EHeapAlloc, (TUint32)this, (TUint32)addr, traceData, sizeof(traceData));
|
|
532 |
}
|
|
533 |
else
|
|
534 |
BTraceContext8(BTrace::EHeap, BTrace::EHeapAllocFail, (TUint32)this, (TUint32)(aSize - __DEBUG_HDR_SIZE));
|
|
535 |
}
|
|
536 |
#endif
|
|
537 |
return addr;
|
|
538 |
}
|
|
539 |
#else
|
|
540 |
|
|
541 |
TAny* RHybridHeap::Alloc(TInt aSize)
|
|
542 |
{
|
|
543 |
__ASSERT_ALWAYS((TUint)aSize<(KMaxTInt/2),HEAP_PANIC(ETHeapBadAllocatedCellSize));
|
|
544 |
__SIMULATE_ALLOC_FAIL(return NULL;)
|
|
545 |
|
|
546 |
TAny* addr;
|
|
547 |
#ifdef ENABLE_BTRACE
|
|
548 |
TInt aSubAllocator=0;
|
|
549 |
#endif
|
|
550 |
|
|
551 |
Lock();
|
|
552 |
|
|
553 |
__ALLOC_DEBUG_HEADER(aSize);
|
|
554 |
|
|
555 |
if (aSize < iSlabThreshold)
|
|
556 |
{
|
|
557 |
TInt ix = iSizeMap[(aSize+3)>>2];
|
|
558 |
HEAP_ASSERT(ix != 0xff);
|
|
559 |
addr = SlabAllocate(iSlabAlloc[ix]);
|
|
560 |
if ( !addr )
|
|
561 |
{ // Slab allocation has failed, try to allocate from DL
|
|
562 |
addr = DlMalloc(aSize);
|
|
563 |
}
|
|
564 |
#ifdef ENABLE_BTRACE
|
|
565 |
else
|
|
566 |
aSubAllocator=1;
|
|
567 |
#endif
|
|
568 |
}else if((aSize >> iPageThreshold)==0)
|
|
569 |
{
|
|
570 |
addr = DlMalloc(aSize);
|
|
571 |
}
|
|
572 |
else
|
|
573 |
{
|
|
574 |
addr = PagedAllocate(aSize);
|
|
575 |
if ( !addr )
|
|
576 |
{ // Page allocation has failed, try to allocate from DL
|
|
577 |
addr = DlMalloc(aSize);
|
|
578 |
}
|
|
579 |
#ifdef ENABLE_BTRACE
|
|
580 |
else
|
|
581 |
aSubAllocator=2;
|
|
582 |
#endif
|
|
583 |
}
|
|
584 |
|
|
585 |
if ( addr )
|
|
586 |
{
|
|
587 |
// iCellCount++;
|
|
588 |
__SET_DEBUG_DATA(addr, iNestingLevel, ++iAllocCount);
|
|
589 |
addr = __GET_USER_DATA_BFR(addr);
|
|
590 |
__INCREMENT_COUNTERS(addr);
|
|
591 |
}
|
|
592 |
Unlock();
|
|
593 |
|
|
594 |
#ifdef ENABLE_BTRACE
|
|
595 |
if (iFlags & ETraceAllocs)
|
|
596 |
{
|
|
597 |
if ( addr )
|
|
598 |
{
|
|
599 |
TUint32 traceData[3];
|
|
600 |
traceData[0] = AllocLen(addr);
|
|
601 |
traceData[1] = aSize - __DEBUG_HDR_SIZE;
|
|
602 |
traceData[2] = aSubAllocator;
|
|
603 |
BTraceContextN(BTrace::EHeap, BTrace::EHeapAlloc, (TUint32)this, (TUint32)addr, traceData, sizeof(traceData));
|
|
604 |
}
|
|
605 |
else
|
|
606 |
BTraceContext8(BTrace::EHeap, BTrace::EHeapAllocFail, (TUint32)this, (TUint32)(aSize - __DEBUG_HDR_SIZE));
|
|
607 |
}
|
|
608 |
#endif
|
|
609 |
|
|
610 |
return addr;
|
|
611 |
}
|
|
612 |
#endif // __KERNEL_MODE__
|
|
613 |
|
|
614 |
#ifndef __KERNEL_MODE__
|
|
615 |
TInt RHybridHeap::Compress()
|
|
616 |
{
|
|
617 |
if ( IS_FIXED_HEAP )
|
|
618 |
return 0;
|
|
619 |
|
|
620 |
Lock();
|
|
621 |
TInt Reduced = SysTrim(GM, 0);
|
|
622 |
if (iSparePage)
|
|
623 |
{
|
|
624 |
Unmap(iSparePage, iPageSize);
|
|
625 |
iSparePage = 0;
|
|
626 |
Reduced += iPageSize;
|
|
627 |
}
|
|
628 |
Unlock();
|
|
629 |
return Reduced;
|
|
630 |
}
|
|
631 |
#endif
|
|
632 |
|
|
633 |
void RHybridHeap::Free(TAny* aPtr)
|
|
634 |
{
|
|
635 |
__CHECK_THREAD_STATE;
|
|
636 |
if ( !aPtr )
|
|
637 |
return;
|
|
638 |
#ifdef ENABLE_BTRACE
|
|
639 |
TInt aSubAllocator=0;
|
|
640 |
#endif
|
|
641 |
Lock();
|
|
642 |
|
|
643 |
aPtr = __GET_DEBUG_DATA_BFR(aPtr);
|
|
644 |
|
|
645 |
#ifndef __KERNEL_MODE__
|
|
646 |
if (PtrDiff(aPtr, this) >= 0)
|
|
647 |
{
|
|
648 |
#endif
|
|
649 |
__DL_BFR_CHECK(GM, aPtr);
|
|
650 |
__DECREMENT_COUNTERS(__GET_USER_DATA_BFR(aPtr));
|
|
651 |
__ZAP_CELL(aPtr);
|
|
652 |
DlFree( aPtr);
|
|
653 |
#ifndef __KERNEL_MODE__
|
|
654 |
}
|
|
655 |
|
|
656 |
else if ( LowBits(aPtr, iPageSize) == 0 )
|
|
657 |
{
|
|
658 |
#ifdef ENABLE_BTRACE
|
|
659 |
aSubAllocator = 2;
|
|
660 |
#endif
|
|
661 |
__PAGE_BFR_CHECK(aPtr);
|
|
662 |
__DECREMENT_COUNTERS(__GET_USER_DATA_BFR(aPtr));
|
|
663 |
PagedFree(aPtr);
|
|
664 |
}
|
|
665 |
else
|
|
666 |
{
|
|
667 |
#ifdef ENABLE_BTRACE
|
|
668 |
aSubAllocator = 1;
|
|
669 |
#endif
|
|
670 |
TUint32 bm[4];
|
|
671 |
__SLAB_BFR_CHECK(slab::SlabFor(aPtr),aPtr,bm);
|
|
672 |
__DECREMENT_COUNTERS(__GET_USER_DATA_BFR(aPtr));
|
|
673 |
__ZAP_CELL(aPtr);
|
|
674 |
SlabFree(aPtr);
|
|
675 |
}
|
|
676 |
#endif // __KERNEL_MODE__
|
|
677 |
// iCellCount--;
|
|
678 |
Unlock();
|
|
679 |
#ifdef ENABLE_BTRACE
|
|
680 |
if (iFlags & ETraceAllocs)
|
|
681 |
{
|
|
682 |
TUint32 traceData;
|
|
683 |
traceData = aSubAllocator;
|
|
684 |
BTraceContextN(BTrace::EHeap, BTrace::EHeapFree, (TUint32)this, (TUint32)__GET_USER_DATA_BFR(aPtr), &traceData, sizeof(traceData));
|
|
685 |
}
|
|
686 |
#endif
|
|
687 |
}
|
|
688 |
|
|
689 |
#ifndef __KERNEL_MODE__
|
|
690 |
void RHybridHeap::Reset()
|
|
691 |
/**
|
|
692 |
Frees all allocated cells on this heap.
|
|
693 |
*/
|
|
694 |
{
|
|
695 |
Lock();
|
|
696 |
if ( !IS_FIXED_HEAP )
|
|
697 |
{
|
|
698 |
if ( GM->iSeg.iSize > (iMinLength - sizeof(*this)) )
|
|
699 |
Unmap(GM->iSeg.iBase + (iMinLength - sizeof(*this)), (GM->iSeg.iSize - (iMinLength - sizeof(*this))));
|
|
700 |
ResetBitmap();
|
|
701 |
if ( !iDLOnly )
|
|
702 |
Init(iSlabConfigBits, iPageThreshold);
|
|
703 |
else
|
|
704 |
Init(0,0);
|
|
705 |
}
|
|
706 |
else Init(0,0);
|
|
707 |
Unlock();
|
|
708 |
}
|
|
709 |
#endif
|
|
710 |
|
|
711 |
TAny* RHybridHeap::ReAllocImpl(TAny* aPtr, TInt aSize, TInt aMode)
|
|
712 |
{
|
|
713 |
// First handle special case of calling reallocate with NULL aPtr
|
|
714 |
if (!aPtr)
|
|
715 |
{
|
|
716 |
if (( aMode & ENeverMove ) == 0 )
|
|
717 |
{
|
|
718 |
aPtr = Alloc(aSize - __DEBUG_HDR_SIZE);
|
|
719 |
aPtr = __GET_DEBUG_DATA_BFR(aPtr);
|
|
720 |
}
|
|
721 |
return aPtr;
|
|
722 |
}
|
|
723 |
|
|
724 |
TInt oldsize = AllocLen(__GET_USER_DATA_BFR(aPtr)) + __DEBUG_HDR_SIZE;
|
|
725 |
|
|
726 |
// Insist on geometric growth when reallocating memory, this reduces copying and fragmentation
|
|
727 |
// generated during arithmetic growth of buffer/array/vector memory
|
|
728 |
// Experiments have shown that 25% is a good threshold for this policy
|
|
729 |
if (aSize <= oldsize)
|
|
730 |
{
|
|
731 |
if (aSize >= oldsize - (oldsize>>2))
|
|
732 |
return aPtr; // don't change if >75% original size
|
|
733 |
}
|
|
734 |
else
|
|
735 |
{
|
|
736 |
__SIMULATE_ALLOC_FAIL(return NULL;)
|
|
737 |
if (aSize < oldsize + (oldsize>>2))
|
|
738 |
{
|
|
739 |
aSize = _ALIGN_UP(oldsize + (oldsize>>2), 4); // grow to at least 125% original size
|
|
740 |
}
|
|
741 |
}
|
|
742 |
__DEBUG_SAVE(aPtr);
|
|
743 |
|
|
744 |
TAny* newp;
|
|
745 |
#ifdef __KERNEL_MODE__
|
|
746 |
Lock();
|
|
747 |
__DL_BFR_CHECK(GM, aPtr);
|
|
748 |
newp = DlRealloc(aPtr, aSize, aMode);
|
|
749 |
Unlock();
|
|
750 |
if ( newp )
|
|
751 |
{
|
|
752 |
if ( aSize > oldsize )
|
|
753 |
memclr(((TUint8*)newp) + oldsize, (aSize-oldsize)); // Buffer has grown in place, clear extra
|
|
754 |
__DEBUG_RESTORE(newp);
|
|
755 |
__UPDATE_ALLOC_COUNT(aPtr, newp, ++iAllocCount);
|
|
756 |
__UPDATE_TOTAL_ALLOC(newp, oldsize);
|
|
757 |
}
|
|
758 |
#else
|
|
759 |
// Decide how to reallocate based on (a) the current cell location, (b) the mode requested and (c) the new size
|
|
760 |
if ( PtrDiff(aPtr, this) >= 0 )
|
|
761 |
{ // current cell in Doug Lea iArena
|
|
762 |
if ( (aMode & ENeverMove)
|
|
763 |
||
|
|
764 |
(!(aMode & EAllowMoveOnShrink) && (aSize < oldsize))
|
|
765 |
||
|
|
766 |
((aSize >= iSlabThreshold) && ((aSize >> iPageThreshold) == 0)) )
|
|
767 |
{
|
|
768 |
Lock();
|
|
769 |
__DL_BFR_CHECK(GM, aPtr);
|
|
770 |
newp = DlRealloc(aPtr, aSize, aMode); // old and new in DL allocator
|
|
771 |
Unlock();
|
|
772 |
__DEBUG_RESTORE(newp);
|
|
773 |
__UPDATE_ALLOC_COUNT(aPtr,newp, ++iAllocCount);
|
|
774 |
__UPDATE_TOTAL_ALLOC(newp, oldsize);
|
|
775 |
return newp;
|
|
776 |
}
|
|
777 |
}
|
|
778 |
else if (LowBits(aPtr, iPageSize) == 0)
|
|
779 |
{ // current cell in paged iArena
|
|
780 |
if ( (aMode & ENeverMove)
|
|
781 |
||
|
|
782 |
(!(aMode & EAllowMoveOnShrink) && (aSize < oldsize))
|
|
783 |
||
|
|
784 |
((aSize >> iPageThreshold) != 0) )
|
|
785 |
{
|
|
786 |
Lock();
|
|
787 |
__PAGE_BFR_CHECK(aPtr);
|
|
788 |
newp = PagedReallocate(aPtr, aSize, aMode); // old and new in paged allocator
|
|
789 |
Unlock();
|
|
790 |
__DEBUG_RESTORE(newp);
|
|
791 |
__UPDATE_ALLOC_COUNT(aPtr,newp, ++iAllocCount);
|
|
792 |
__UPDATE_TOTAL_ALLOC(newp, oldsize);
|
|
793 |
return newp;
|
|
794 |
}
|
|
795 |
}
|
|
796 |
else
|
|
797 |
{ // current cell in slab iArena
|
|
798 |
TUint32 bm[4];
|
|
799 |
Lock();
|
|
800 |
__SLAB_BFR_CHECK(slab::SlabFor(aPtr), aPtr, bm);
|
|
801 |
Unlock();
|
|
802 |
if ( aSize <= oldsize)
|
|
803 |
return aPtr;
|
|
804 |
if (aMode & ENeverMove)
|
|
805 |
return NULL; // cannot grow in slab iArena
|
|
806 |
// just use alloc/copy/free...
|
|
807 |
}
|
|
808 |
|
|
809 |
// fallback to allocate and copy
|
|
810 |
// shouldn't get here if we cannot move the cell
|
|
811 |
// __ASSERT(mode == emobile || (mode==efixshrink && size>oldsize));
|
|
812 |
|
|
813 |
newp = Alloc(aSize - __DEBUG_HDR_SIZE);
|
|
814 |
newp = __GET_DEBUG_DATA_BFR(newp);
|
|
815 |
if (newp)
|
|
816 |
{
|
|
817 |
memcpy(newp, aPtr, oldsize<aSize ? oldsize : aSize);
|
|
818 |
__DEBUG_RESTORE(newp);
|
|
819 |
Free(__GET_USER_DATA_BFR(aPtr));
|
|
820 |
}
|
|
821 |
|
|
822 |
#endif // __KERNEL_MODE__
|
|
823 |
return newp;
|
|
824 |
}
|
|
825 |
|
|
826 |
|
|
827 |
TAny* RHybridHeap::ReAlloc(TAny* aPtr, TInt aSize, TInt aMode )
|
|
828 |
{
|
|
829 |
|
|
830 |
aPtr = __GET_DEBUG_DATA_BFR(aPtr);
|
|
831 |
__ALLOC_DEBUG_HEADER(aSize);
|
|
832 |
|
|
833 |
TAny* retval = ReAllocImpl(aPtr, aSize, aMode);
|
|
834 |
|
|
835 |
retval = __GET_USER_DATA_BFR(retval);
|
|
836 |
|
|
837 |
#ifdef ENABLE_BTRACE
|
|
838 |
if (iFlags & ETraceAllocs)
|
|
839 |
{
|
|
840 |
if ( retval )
|
|
841 |
{
|
|
842 |
TUint32 traceData[3];
|
|
843 |
traceData[0] = AllocLen(retval);
|
|
844 |
traceData[1] = aSize - __DEBUG_HDR_SIZE;
|
|
845 |
traceData[2] = (TUint32)aPtr;
|
|
846 |
BTraceContextN(BTrace::EHeap, BTrace::EHeapReAlloc,(TUint32)this, (TUint32)retval, traceData, sizeof(traceData));
|
|
847 |
}
|
|
848 |
else
|
|
849 |
BTraceContext12(BTrace::EHeap, BTrace::EHeapReAllocFail, (TUint32)this, (TUint32)aPtr, (TUint32)(aSize - __DEBUG_HDR_SIZE));
|
|
850 |
}
|
|
851 |
#endif
|
|
852 |
return retval;
|
|
853 |
}
|
|
854 |
|
|
855 |
#ifndef __KERNEL_MODE__
|
|
856 |
TInt RHybridHeap::Available(TInt& aBiggestBlock) const
|
|
857 |
/**
|
|
858 |
Gets the total free space currently available on the heap and the space
|
|
859 |
available in the largest free block.
|
|
860 |
|
|
861 |
Note that this function exists mainly for compatibility reasons. In a modern
|
|
862 |
heap implementation such as that present in Symbian it is not appropriate to
|
|
863 |
concern oneself with details such as the amount of free memory available on a
|
|
864 |
heap and its largeset free block, because the way that a modern heap implmentation
|
|
865 |
works is not simple. The amount of available virtual memory != physical memory
|
|
866 |
and there are multiple allocation strategies used internally, which makes all
|
|
867 |
memory usage figures "fuzzy" at best.
|
|
868 |
|
|
869 |
In short, if you want to see if there is enough memory available to allocate a
|
|
870 |
block of memory, call Alloc() and if it succeeds then there is enough memory!
|
|
871 |
Messing around with functions like this is somewhat pointless with modern heap
|
|
872 |
allocators.
|
|
873 |
|
|
874 |
@param aBiggestBlock On return, contains the space available in the largest
|
|
875 |
free block on the heap. Due to the internals of modern
|
|
876 |
heap implementations, you can probably still allocate a
|
|
877 |
block larger than this!
|
|
878 |
|
|
879 |
@return The total free space currently available on the heap. Again, you can
|
|
880 |
probably still allocate more than this!
|
|
881 |
*/
|
|
882 |
{
|
|
883 |
struct HeapInfo info;
|
|
884 |
Lock();
|
|
885 |
TInt Biggest = GetInfo(&info);
|
|
886 |
aBiggestBlock = __GET_AVAIL_BLOCK_SIZE(Biggest);
|
|
887 |
Unlock();
|
|
888 |
return __GET_AVAIL_BLOCK_SIZE(info.iFreeBytes);
|
|
889 |
|
|
890 |
}
|
|
891 |
|
|
892 |
TInt RHybridHeap::AllocSize(TInt& aTotalAllocSize) const
|
|
893 |
/**
|
|
894 |
Gets the number of cells allocated on this heap, and the total space
|
|
895 |
allocated to them.
|
|
896 |
|
|
897 |
@param aTotalAllocSize On return, contains the total space allocated
|
|
898 |
to the cells.
|
|
899 |
|
|
900 |
@return The number of cells allocated on this heap.
|
|
901 |
*/
|
|
902 |
{
|
|
903 |
struct HeapInfo info;
|
|
904 |
Lock();
|
|
905 |
GetInfo(&info);
|
|
906 |
aTotalAllocSize = info.iAllocBytes - __REMOVE_DBG_HDR(info.iAllocN);
|
|
907 |
Unlock();
|
|
908 |
return info.iAllocN;
|
|
909 |
}
|
|
910 |
|
|
911 |
#endif
|
|
912 |
|
|
913 |
TInt RHybridHeap::Extension_(TUint /* aExtensionId */, TAny*& /* a0 */, TAny* /* a1 */)
|
|
914 |
{
|
|
915 |
return KErrNotSupported;
|
|
916 |
}
|
|
917 |
|
|
918 |
|
|
919 |
|
|
920 |
///////////////////////////////////////////////////////////////////////////////
|
|
921 |
// imported from dla.cpp
|
|
922 |
///////////////////////////////////////////////////////////////////////////////
|
|
923 |
|
|
924 |
//#include <unistd.h>
|
|
925 |
//#define DEBUG_REALLOC
|
|
926 |
#ifdef DEBUG_REALLOC
|
|
927 |
#include <e32debug.h>
|
|
928 |
#endif
|
|
929 |
|
|
930 |
inline void RHybridHeap::InitBins(mstate m)
|
|
931 |
{
|
|
932 |
/* Establish circular links for iSmallBins */
|
|
933 |
bindex_t i;
|
|
934 |
for (i = 0; i < NSMALLBINS; ++i) {
|
|
935 |
sbinptr bin = SMALLBIN_AT(m,i);
|
|
936 |
bin->iFd = bin->iBk = bin;
|
|
937 |
}
|
|
938 |
}
|
|
939 |
/* ---------------------------- malloc support --------------------------- */
|
|
940 |
|
|
941 |
/* allocate a large request from the best fitting chunk in a treebin */
|
|
942 |
void* RHybridHeap::TmallocLarge(mstate m, size_t nb) {
|
|
943 |
tchunkptr v = 0;
|
|
944 |
size_t rsize = -nb; /* Unsigned negation */
|
|
945 |
tchunkptr t;
|
|
946 |
bindex_t idx;
|
|
947 |
ComputeTreeIndex(nb, idx);
|
|
948 |
|
|
949 |
if ((t = *TREEBIN_AT(m, idx)) != 0)
|
|
950 |
{
|
|
951 |
/* Traverse tree for this bin looking for node with size == nb */
|
|
952 |
size_t sizebits = nb << LEFTSHIFT_FOR_TREE_INDEX(idx);
|
|
953 |
tchunkptr rst = 0; /* The deepest untaken right subtree */
|
|
954 |
for (;;)
|
|
955 |
{
|
|
956 |
tchunkptr rt;
|
|
957 |
size_t trem = CHUNKSIZE(t) - nb;
|
|
958 |
if (trem < rsize)
|
|
959 |
{
|
|
960 |
v = t;
|
|
961 |
if ((rsize = trem) == 0)
|
|
962 |
break;
|
|
963 |
}
|
|
964 |
rt = t->iChild[1];
|
|
965 |
t = t->iChild[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
|
|
966 |
if (rt != 0 && rt != t)
|
|
967 |
rst = rt;
|
|
968 |
if (t == 0)
|
|
969 |
{
|
|
970 |
t = rst; /* set t to least subtree holding sizes > nb */
|
|
971 |
break;
|
|
972 |
}
|
|
973 |
sizebits <<= 1;
|
|
974 |
}
|
|
975 |
}
|
|
976 |
if (t == 0 && v == 0)
|
|
977 |
{ /* set t to root of next non-empty treebin */
|
|
978 |
binmap_t leftbits = LEFT_BITS(IDX2BIT(idx)) & m->iTreeMap;
|
|
979 |
if (leftbits != 0)
|
|
980 |
{
|
|
981 |
bindex_t i;
|
|
982 |
binmap_t leastbit = LEAST_BIT(leftbits);
|
|
983 |
ComputeBit2idx(leastbit, i);
|
|
984 |
t = *TREEBIN_AT(m, i);
|
|
985 |
}
|
|
986 |
}
|
|
987 |
while (t != 0)
|
|
988 |
{ /* Find smallest of tree or subtree */
|
|
989 |
size_t trem = CHUNKSIZE(t) - nb;
|
|
990 |
if (trem < rsize) {
|
|
991 |
rsize = trem;
|
|
992 |
v = t;
|
|
993 |
}
|
|
994 |
t = LEFTMOST_CHILD(t);
|
|
995 |
}
|
|
996 |
/* If iDv is a better fit, return 0 so malloc will use it */
|
|
997 |
if (v != 0 && rsize < (size_t)(m->iDvSize - nb))
|
|
998 |
{
|
|
999 |
if (RTCHECK(OK_ADDRESS(m, v)))
|
|
1000 |
{ /* split */
|
|
1001 |
mchunkptr r = CHUNK_PLUS_OFFSET(v, nb);
|
|
1002 |
HEAP_ASSERT(CHUNKSIZE(v) == rsize + nb);
|
|
1003 |
if (RTCHECK(OK_NEXT(v, r)))
|
|
1004 |
{
|
|
1005 |
UnlinkLargeChunk(m, v);
|
|
1006 |
if (rsize < MIN_CHUNK_SIZE)
|
|
1007 |
SET_INUSE_AND_PINUSE(m, v, (rsize + nb));
|
|
1008 |
else
|
|
1009 |
{
|
|
1010 |
SET_SIZE_AND_PINUSE_OF_INUSE_CHUNK(m, v, nb);
|
|
1011 |
SET_SIZE_AND_PINUSE_OF_FREE_CHUNK(r, rsize);
|
|
1012 |
InsertChunk(m, r, rsize);
|
|
1013 |
}
|
|
1014 |
return CHUNK2MEM(v);
|
|
1015 |
}
|
|
1016 |
}
|
|
1017 |
// CORRUPTION_ERROR_ACTION(m);
|
|
1018 |
}
|
|
1019 |
return 0;
|
|
1020 |
}
|
|
1021 |
|
|
1022 |
/* allocate a small request from the best fitting chunk in a treebin */
|
|
1023 |
void* RHybridHeap::TmallocSmall(mstate m, size_t nb)
|
|
1024 |
{
|
|
1025 |
tchunkptr t, v;
|
|
1026 |
size_t rsize;
|
|
1027 |
bindex_t i;
|
|
1028 |
binmap_t leastbit = LEAST_BIT(m->iTreeMap);
|
|
1029 |
ComputeBit2idx(leastbit, i);
|
|
1030 |
|
|
1031 |
v = t = *TREEBIN_AT(m, i);
|
|
1032 |
rsize = CHUNKSIZE(t) - nb;
|
|
1033 |
|
|
1034 |
while ((t = LEFTMOST_CHILD(t)) != 0)
|
|
1035 |
{
|
|
1036 |
size_t trem = CHUNKSIZE(t) - nb;
|
|
1037 |
if (trem < rsize)
|
|
1038 |
{
|
|
1039 |
rsize = trem;
|
|
1040 |
v = t;
|
|
1041 |
}
|
|
1042 |
}
|
|
1043 |
|
|
1044 |
if (RTCHECK(OK_ADDRESS(m, v)))
|
|
1045 |
{
|
|
1046 |
mchunkptr r = CHUNK_PLUS_OFFSET(v, nb);
|
|
1047 |
HEAP_ASSERT(CHUNKSIZE(v) == rsize + nb);
|
|
1048 |
if (RTCHECK(OK_NEXT(v, r)))
|
|
1049 |
{
|
|
1050 |
UnlinkLargeChunk(m, v);
|
|
1051 |
if (rsize < MIN_CHUNK_SIZE)
|
|
1052 |
SET_INUSE_AND_PINUSE(m, v, (rsize + nb));
|
|
1053 |
else
|
|
1054 |
{
|
|
1055 |
SET_SIZE_AND_PINUSE_OF_INUSE_CHUNK(m, v, nb);
|
|
1056 |
SET_SIZE_AND_PINUSE_OF_FREE_CHUNK(r, rsize);
|
|
1057 |
ReplaceDv(m, r, rsize);
|
|
1058 |
}
|
|
1059 |
return CHUNK2MEM(v);
|
|
1060 |
}
|
|
1061 |
}
|
|
1062 |
// CORRUPTION_ERROR_ACTION(m);
|
|
1063 |
// return 0;
|
|
1064 |
}
|
|
1065 |
|
|
1066 |
inline void RHybridHeap::InitTop(mstate m, mchunkptr p, size_t psize)
|
|
1067 |
{
|
|
1068 |
/* Ensure alignment */
|
|
1069 |
size_t offset = ALIGN_OFFSET(CHUNK2MEM(p));
|
|
1070 |
p = (mchunkptr)((TUint8*)p + offset);
|
|
1071 |
psize -= offset;
|
|
1072 |
m->iTop = p;
|
|
1073 |
m->iTopSize = psize;
|
|
1074 |
p->iHead = psize | PINUSE_BIT;
|
|
1075 |
/* set size of fake trailing chunk holding overhead space only once */
|
|
1076 |
mchunkptr chunkPlusOff = CHUNK_PLUS_OFFSET(p, psize);
|
|
1077 |
chunkPlusOff->iHead = TOP_FOOT_SIZE;
|
|
1078 |
m->iTrimCheck = KHeapShrinkHysRatio*(iGrowBy>>8);
|
|
1079 |
}
|
|
1080 |
|
|
1081 |
|
|
1082 |
/* Unlink the first chunk from a smallbin */
|
|
1083 |
inline void RHybridHeap::UnlinkFirstSmallChunk(mstate M,mchunkptr B,mchunkptr P,bindex_t& I)
|
|
1084 |
{
|
|
1085 |
mchunkptr F = P->iFd;
|
|
1086 |
HEAP_ASSERT(P != B);
|
|
1087 |
HEAP_ASSERT(P != F);
|
|
1088 |
HEAP_ASSERT(CHUNKSIZE(P) == SMALL_INDEX2SIZE(I));
|
|
1089 |
if (B == F)
|
|
1090 |
CLEAR_SMALLMAP(M, I);
|
|
1091 |
else if (RTCHECK(OK_ADDRESS(M, F)))
|
|
1092 |
{
|
|
1093 |
B->iFd = F;
|
|
1094 |
F->iBk = B;
|
|
1095 |
}
|
|
1096 |
else
|
|
1097 |
{
|
|
1098 |
CORRUPTION_ERROR_ACTION(M);
|
|
1099 |
}
|
|
1100 |
}
|
|
1101 |
/* Link a free chunk into a smallbin */
|
|
1102 |
inline void RHybridHeap::InsertSmallChunk(mstate M,mchunkptr P, size_t S)
|
|
1103 |
{
|
|
1104 |
bindex_t I = SMALL_INDEX(S);
|
|
1105 |
mchunkptr B = SMALLBIN_AT(M, I);
|
|
1106 |
mchunkptr F = B;
|
|
1107 |
HEAP_ASSERT(S >= MIN_CHUNK_SIZE);
|
|
1108 |
if (!SMALLMAP_IS_MARKED(M, I))
|
|
1109 |
MARK_SMALLMAP(M, I);
|
|
1110 |
else if (RTCHECK(OK_ADDRESS(M, B->iFd)))
|
|
1111 |
F = B->iFd;
|
|
1112 |
else
|
|
1113 |
{
|
|
1114 |
CORRUPTION_ERROR_ACTION(M);
|
|
1115 |
}
|
|
1116 |
B->iFd = P;
|
|
1117 |
F->iBk = P;
|
|
1118 |
P->iFd = F;
|
|
1119 |
P->iBk = B;
|
|
1120 |
}
|
|
1121 |
|
|
1122 |
|
|
1123 |
inline void RHybridHeap::InsertChunk(mstate M,mchunkptr P,size_t S)
|
|
1124 |
{
|
|
1125 |
if (IS_SMALL(S))
|
|
1126 |
InsertSmallChunk(M, P, S);
|
|
1127 |
else
|
|
1128 |
{
|
|
1129 |
tchunkptr TP = (tchunkptr)(P); InsertLargeChunk(M, TP, S);
|
|
1130 |
}
|
|
1131 |
}
|
|
1132 |
|
|
1133 |
inline void RHybridHeap::UnlinkLargeChunk(mstate M,tchunkptr X)
|
|
1134 |
{
|
|
1135 |
tchunkptr XP = X->iParent;
|
|
1136 |
tchunkptr R;
|
|
1137 |
if (X->iBk != X)
|
|
1138 |
{
|
|
1139 |
tchunkptr F = X->iFd;
|
|
1140 |
R = X->iBk;
|
|
1141 |
if (RTCHECK(OK_ADDRESS(M, F)))
|
|
1142 |
{
|
|
1143 |
F->iBk = R;
|
|
1144 |
R->iFd = F;
|
|
1145 |
}
|
|
1146 |
else
|
|
1147 |
{
|
|
1148 |
CORRUPTION_ERROR_ACTION(M);
|
|
1149 |
}
|
|
1150 |
}
|
|
1151 |
else
|
|
1152 |
{
|
|
1153 |
tchunkptr* RP;
|
|
1154 |
if (((R = *(RP = &(X->iChild[1]))) != 0) ||
|
|
1155 |
((R = *(RP = &(X->iChild[0]))) != 0))
|
|
1156 |
{
|
|
1157 |
tchunkptr* CP;
|
|
1158 |
while ((*(CP = &(R->iChild[1])) != 0) ||
|
|
1159 |
(*(CP = &(R->iChild[0])) != 0))
|
|
1160 |
{
|
|
1161 |
R = *(RP = CP);
|
|
1162 |
}
|
|
1163 |
if (RTCHECK(OK_ADDRESS(M, RP)))
|
|
1164 |
*RP = 0;
|
|
1165 |
else
|
|
1166 |
{
|
|
1167 |
CORRUPTION_ERROR_ACTION(M);
|
|
1168 |
}
|
|
1169 |
}
|
|
1170 |
}
|
|
1171 |
if (XP != 0)
|
|
1172 |
{
|
|
1173 |
tbinptr* H = TREEBIN_AT(M, X->iIndex);
|
|
1174 |
if (X == *H)
|
|
1175 |
{
|
|
1176 |
if ((*H = R) == 0)
|
|
1177 |
CLEAR_TREEMAP(M, X->iIndex);
|
|
1178 |
}
|
|
1179 |
else if (RTCHECK(OK_ADDRESS(M, XP)))
|
|
1180 |
{
|
|
1181 |
if (XP->iChild[0] == X)
|
|
1182 |
XP->iChild[0] = R;
|
|
1183 |
else
|
|
1184 |
XP->iChild[1] = R;
|
|
1185 |
}
|
|
1186 |
else
|
|
1187 |
CORRUPTION_ERROR_ACTION(M);
|
|
1188 |
if (R != 0)
|
|
1189 |
{
|
|
1190 |
if (RTCHECK(OK_ADDRESS(M, R)))
|
|
1191 |
{
|
|
1192 |
tchunkptr C0, C1;
|
|
1193 |
R->iParent = XP;
|
|
1194 |
if ((C0 = X->iChild[0]) != 0)
|
|
1195 |
{
|
|
1196 |
if (RTCHECK(OK_ADDRESS(M, C0)))
|
|
1197 |
{
|
|
1198 |
R->iChild[0] = C0;
|
|
1199 |
C0->iParent = R;
|
|
1200 |
}
|
|
1201 |
else
|
|
1202 |
CORRUPTION_ERROR_ACTION(M);
|
|
1203 |
}
|
|
1204 |
if ((C1 = X->iChild[1]) != 0)
|
|
1205 |
{
|
|
1206 |
if (RTCHECK(OK_ADDRESS(M, C1)))
|
|
1207 |
{
|
|
1208 |
R->iChild[1] = C1;
|
|
1209 |
C1->iParent = R;
|
|
1210 |
}
|
|
1211 |
else
|
|
1212 |
CORRUPTION_ERROR_ACTION(M);
|
|
1213 |
}
|
|
1214 |
}
|
|
1215 |
else
|
|
1216 |
CORRUPTION_ERROR_ACTION(M);
|
|
1217 |
}
|
|
1218 |
}
|
|
1219 |
}
|
|
1220 |
|
|
1221 |
/* Unlink a chunk from a smallbin */
|
|
1222 |
inline void RHybridHeap::UnlinkSmallChunk(mstate M, mchunkptr P,size_t S)
|
|
1223 |
{
|
|
1224 |
mchunkptr F = P->iFd;
|
|
1225 |
mchunkptr B = P->iBk;
|
|
1226 |
bindex_t I = SMALL_INDEX(S);
|
|
1227 |
HEAP_ASSERT(P != B);
|
|
1228 |
HEAP_ASSERT(P != F);
|
|
1229 |
HEAP_ASSERT(CHUNKSIZE(P) == SMALL_INDEX2SIZE(I));
|
|
1230 |
if (F == B)
|
|
1231 |
CLEAR_SMALLMAP(M, I);
|
|
1232 |
else if (RTCHECK((F == SMALLBIN_AT(M,I) || OK_ADDRESS(M, F)) &&
|
|
1233 |
(B == SMALLBIN_AT(M,I) || OK_ADDRESS(M, B))))
|
|
1234 |
{
|
|
1235 |
F->iBk = B;
|
|
1236 |
B->iFd = F;
|
|
1237 |
}
|
|
1238 |
else
|
|
1239 |
{
|
|
1240 |
CORRUPTION_ERROR_ACTION(M);
|
|
1241 |
}
|
|
1242 |
}
|
|
1243 |
|
|
1244 |
inline void RHybridHeap::UnlinkChunk(mstate M, mchunkptr P, size_t S)
|
|
1245 |
{
|
|
1246 |
if (IS_SMALL(S))
|
|
1247 |
UnlinkSmallChunk(M, P, S);
|
|
1248 |
else
|
|
1249 |
{
|
|
1250 |
tchunkptr TP = (tchunkptr)(P); UnlinkLargeChunk(M, TP);
|
|
1251 |
}
|
|
1252 |
}
|
|
1253 |
|
|
1254 |
// For DL debug functions
|
|
1255 |
void RHybridHeap::DoComputeTreeIndex(size_t S, bindex_t& I)
|
|
1256 |
{
|
|
1257 |
ComputeTreeIndex(S, I);
|
|
1258 |
}
|
|
1259 |
|
|
1260 |
inline void RHybridHeap::ComputeTreeIndex(size_t S, bindex_t& I)
|
|
1261 |
{
|
|
1262 |
size_t X = S >> TREEBIN_SHIFT;
|
|
1263 |
if (X == 0)
|
|
1264 |
I = 0;
|
|
1265 |
else if (X > 0xFFFF)
|
|
1266 |
I = NTREEBINS-1;
|
|
1267 |
else
|
|
1268 |
{
|
|
1269 |
unsigned int Y = (unsigned int)X;
|
|
1270 |
unsigned int N = ((Y - 0x100) >> 16) & 8;
|
|
1271 |
unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;
|
|
1272 |
N += K;
|
|
1273 |
N += K = (((Y <<= K) - 0x4000) >> 16) & 2;
|
|
1274 |
K = 14 - N + ((Y <<= K) >> 15);
|
|
1275 |
I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));
|
|
1276 |
}
|
|
1277 |
}
|
|
1278 |
|
|
1279 |
/* ------------------------- Operations on trees ------------------------- */
|
|
1280 |
|
|
1281 |
/* Insert chunk into tree */
|
|
1282 |
inline void RHybridHeap::InsertLargeChunk(mstate M,tchunkptr X,size_t S)
|
|
1283 |
{
|
|
1284 |
tbinptr* H;
|
|
1285 |
bindex_t I;
|
|
1286 |
ComputeTreeIndex(S, I);
|
|
1287 |
H = TREEBIN_AT(M, I);
|
|
1288 |
X->iIndex = I;
|
|
1289 |
X->iChild[0] = X->iChild[1] = 0;
|
|
1290 |
if (!TREEMAP_IS_MARKED(M, I))
|
|
1291 |
{
|
|
1292 |
MARK_TREEMAP(M, I);
|
|
1293 |
*H = X;
|
|
1294 |
X->iParent = (tchunkptr)H;
|
|
1295 |
X->iFd = X->iBk = X;
|
|
1296 |
}
|
|
1297 |
else
|
|
1298 |
{
|
|
1299 |
tchunkptr T = *H;
|
|
1300 |
size_t K = S << LEFTSHIFT_FOR_TREE_INDEX(I);
|
|
1301 |
for (;;)
|
|
1302 |
{
|
|
1303 |
if (CHUNKSIZE(T) != S) {
|
|
1304 |
tchunkptr* C = &(T->iChild[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);
|
|
1305 |
K <<= 1;
|
|
1306 |
if (*C != 0)
|
|
1307 |
T = *C;
|
|
1308 |
else if (RTCHECK(OK_ADDRESS(M, C)))
|
|
1309 |
{
|
|
1310 |
*C = X;
|
|
1311 |
X->iParent = T;
|
|
1312 |
X->iFd = X->iBk = X;
|
|
1313 |
break;
|
|
1314 |
}
|
|
1315 |
else
|
|
1316 |
{
|
|
1317 |
CORRUPTION_ERROR_ACTION(M);
|
|
1318 |
break;
|
|
1319 |
}
|
|
1320 |
}
|
|
1321 |
else
|
|
1322 |
{
|
|
1323 |
tchunkptr F = T->iFd;
|
|
1324 |
if (RTCHECK(OK_ADDRESS(M, T) && OK_ADDRESS(M, F)))
|
|
1325 |
{
|
|
1326 |
T->iFd = F->iBk = X;
|
|
1327 |
X->iFd = F;
|
|
1328 |
X->iBk = T;
|
|
1329 |
X->iParent = 0;
|
|
1330 |
break;
|
|
1331 |
}
|
|
1332 |
else
|
|
1333 |
{
|
|
1334 |
CORRUPTION_ERROR_ACTION(M);
|
|
1335 |
break;
|
|
1336 |
}
|
|
1337 |
}
|
|
1338 |
}
|
|
1339 |
}
|
|
1340 |
}
|
|
1341 |
|
|
1342 |
/*
|
|
1343 |
Unlink steps:
|
|
1344 |
|
|
1345 |
1. If x is a chained node, unlink it from its same-sized iFd/iBk links
|
|
1346 |
and choose its iBk node as its replacement.
|
|
1347 |
2. If x was the last node of its size, but not a leaf node, it must
|
|
1348 |
be replaced with a leaf node (not merely one with an open left or
|
|
1349 |
right), to make sure that lefts and rights of descendents
|
|
1350 |
correspond properly to bit masks. We use the rightmost descendent
|
|
1351 |
of x. We could use any other leaf, but this is easy to locate and
|
|
1352 |
tends to counteract removal of leftmosts elsewhere, and so keeps
|
|
1353 |
paths shorter than minimally guaranteed. This doesn't loop much
|
|
1354 |
because on average a node in a tree is near the bottom.
|
|
1355 |
3. If x is the base of a chain (i.e., has iParent links) relink
|
|
1356 |
x's iParent and children to x's replacement (or null if none).
|
|
1357 |
*/
|
|
1358 |
|
|
1359 |
/* Replace iDv node, binning the old one */
|
|
1360 |
/* Used only when iDvSize known to be small */
|
|
1361 |
inline void RHybridHeap::ReplaceDv(mstate M, mchunkptr P, size_t S)
|
|
1362 |
{
|
|
1363 |
size_t DVS = M->iDvSize;
|
|
1364 |
if (DVS != 0)
|
|
1365 |
{
|
|
1366 |
mchunkptr DV = M->iDv;
|
|
1367 |
HEAP_ASSERT(IS_SMALL(DVS));
|
|
1368 |
InsertSmallChunk(M, DV, DVS);
|
|
1369 |
}
|
|
1370 |
M->iDvSize = S;
|
|
1371 |
M->iDv = P;
|
|
1372 |
}
|
|
1373 |
|
|
1374 |
|
|
1375 |
inline void RHybridHeap::ComputeBit2idx(binmap_t X,bindex_t& I)
|
|
1376 |
{
|
|
1377 |
unsigned int Y = X - 1;
|
|
1378 |
unsigned int K = Y >> (16-4) & 16;
|
|
1379 |
unsigned int N = K; Y >>= K;
|
|
1380 |
N += K = Y >> (8-3) & 8; Y >>= K;
|
|
1381 |
N += K = Y >> (4-2) & 4; Y >>= K;
|
|
1382 |
N += K = Y >> (2-1) & 2; Y >>= K;
|
|
1383 |
N += K = Y >> (1-0) & 1; Y >>= K;
|
|
1384 |
I = (bindex_t)(N + Y);
|
|
1385 |
}
|
|
1386 |
|
|
1387 |
|
|
1388 |
|
|
1389 |
int RHybridHeap::SysTrim(mstate m, size_t pad)
|
|
1390 |
{
|
|
1391 |
size_t extra = 0;
|
|
1392 |
|
|
1393 |
if ( IS_INITIALIZED(m) )
|
|
1394 |
{
|
|
1395 |
pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
|
|
1396 |
|
|
1397 |
if (m->iTopSize > pad)
|
|
1398 |
{
|
|
1399 |
extra = Floor(m->iTopSize - pad, iPageSize);
|
|
1400 |
if ( (m->iSeg.iSize - extra) < (iMinLength - sizeof(*this)) )
|
|
1401 |
{
|
|
1402 |
if ( m->iSeg.iSize > (iMinLength - sizeof(*this)) )
|
|
1403 |
extra = Floor(m->iSeg.iSize - (iMinLength - sizeof(*this)), iPageSize); /* do not shrink heap below min length */
|
|
1404 |
else extra = 0;
|
|
1405 |
}
|
|
1406 |
|
|
1407 |
if ( extra )
|
|
1408 |
{
|
|
1409 |
Unmap(m->iSeg.iBase + m->iSeg.iSize - extra, extra);
|
|
1410 |
|
|
1411 |
m->iSeg.iSize -= extra;
|
|
1412 |
InitTop(m, m->iTop, m->iTopSize - extra);
|
|
1413 |
CHECK_TOP_CHUNK(m, m->iTop);
|
|
1414 |
}
|
|
1415 |
}
|
|
1416 |
|
|
1417 |
}
|
|
1418 |
|
|
1419 |
return extra;
|
|
1420 |
}
|
|
1421 |
|
|
1422 |
/* Get memory from system using MORECORE */
|
|
1423 |
|
|
1424 |
void* RHybridHeap::SysAlloc(mstate m, size_t nb)
|
|
1425 |
{
|
|
1426 |
HEAP_ASSERT(m->iTop);
|
|
1427 |
/* Subtract out existing available iTop space from MORECORE request. */
|
|
1428 |
// size_t asize = _ALIGN_UP(nb - m->iTopSize + TOP_FOOT_SIZE + SIZE_T_ONE, iGrowBy);
|
|
1429 |
TInt asize = _ALIGN_UP(nb - m->iTopSize + SYS_ALLOC_PADDING, iGrowBy); // From DLA version 2.8.4
|
|
1430 |
|
|
1431 |
char* br = (char*)Map(m->iSeg.iBase+m->iSeg.iSize, asize);
|
|
1432 |
if (!br)
|
|
1433 |
return 0;
|
|
1434 |
HEAP_ASSERT(br == (char*)m->iSeg.iBase+m->iSeg.iSize);
|
|
1435 |
|
|
1436 |
/* Merge with an existing segment */
|
|
1437 |
m->iSeg.iSize += asize;
|
|
1438 |
InitTop(m, m->iTop, m->iTopSize + asize);
|
|
1439 |
|
|
1440 |
if (nb < m->iTopSize)
|
|
1441 |
{ /* Allocate from new or extended iTop space */
|
|
1442 |
size_t rsize = m->iTopSize -= nb;
|
|
1443 |
mchunkptr p = m->iTop;
|
|
1444 |
mchunkptr r = m->iTop = CHUNK_PLUS_OFFSET(p, nb);
|
|
1445 |
r->iHead = rsize | PINUSE_BIT;
|
|
1446 |
SET_SIZE_AND_PINUSE_OF_INUSE_CHUNK(m, p, nb);
|
|
1447 |
CHECK_TOP_CHUNK(m, m->iTop);
|
|
1448 |
CHECK_MALLOCED_CHUNK(m, CHUNK2MEM(p), nb);
|
|
1449 |
return CHUNK2MEM(p);
|
|
1450 |
}
|
|
1451 |
|
|
1452 |
return 0;
|
|
1453 |
}
|
|
1454 |
|
|
1455 |
|
|
1456 |
void RHybridHeap::InitDlMalloc(size_t capacity, int /*locked*/)
|
|
1457 |
{
|
|
1458 |
memset(GM,0,sizeof(malloc_state));
|
|
1459 |
// The maximum amount that can be allocated can be calculated as:-
|
|
1460 |
// 2^sizeof(size_t) - sizeof(malloc_state) - TOP_FOOT_SIZE - page Size(all accordingly padded)
|
|
1461 |
// If the capacity exceeds this, no allocation will be done.
|
|
1462 |
GM->iSeg.iBase = iBase;
|
|
1463 |
GM->iSeg.iSize = capacity;
|
|
1464 |
InitBins(GM);
|
|
1465 |
InitTop(GM, (mchunkptr)iBase, capacity - TOP_FOOT_SIZE);
|
|
1466 |
}
|
|
1467 |
|
|
1468 |
void* RHybridHeap::DlMalloc(size_t bytes)
|
|
1469 |
{
|
|
1470 |
/*
|
|
1471 |
Basic algorithm:
|
|
1472 |
If a small request (< 256 bytes minus per-chunk overhead):
|
|
1473 |
1. If one exists, use a remainderless chunk in associated smallbin.
|
|
1474 |
(Remainderless means that there are too few excess bytes to
|
|
1475 |
represent as a chunk.)
|
|
1476 |
2. If it is big enough, use the iDv chunk, which is normally the
|
|
1477 |
chunk adjacent to the one used for the most recent small request.
|
|
1478 |
3. If one exists, split the smallest available chunk in a bin,
|
|
1479 |
saving remainder in iDv.
|
|
1480 |
4. If it is big enough, use the iTop chunk.
|
|
1481 |
5. If available, get memory from system and use it
|
|
1482 |
Otherwise, for a large request:
|
|
1483 |
1. Find the smallest available binned chunk that fits, and use it
|
|
1484 |
if it is better fitting than iDv chunk, splitting if necessary.
|
|
1485 |
2. If better fitting than any binned chunk, use the iDv chunk.
|
|
1486 |
3. If it is big enough, use the iTop chunk.
|
|
1487 |
4. If request size >= mmap threshold, try to directly mmap this chunk.
|
|
1488 |
5. If available, get memory from system and use it
|
|
1489 |
*/
|
|
1490 |
void* mem;
|
|
1491 |
size_t nb;
|
|
1492 |
if (bytes <= MAX_SMALL_REQUEST)
|
|
1493 |
{
|
|
1494 |
bindex_t idx;
|
|
1495 |
binmap_t smallbits;
|
|
1496 |
nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : PAD_REQUEST(bytes);
|
|
1497 |
idx = SMALL_INDEX(nb);
|
|
1498 |
smallbits = GM->iSmallMap >> idx;
|
|
1499 |
|
|
1500 |
if ((smallbits & 0x3U) != 0)
|
|
1501 |
{ /* Remainderless fit to a smallbin. */
|
|
1502 |
mchunkptr b, p;
|
|
1503 |
idx += ~smallbits & 1; /* Uses next bin if idx empty */
|
|
1504 |
b = SMALLBIN_AT(GM, idx);
|
|
1505 |
p = b->iFd;
|
|
1506 |
HEAP_ASSERT(CHUNKSIZE(p) == SMALL_INDEX2SIZE(idx));
|
|
1507 |
UnlinkFirstSmallChunk(GM, b, p, idx);
|
|
1508 |
SET_INUSE_AND_PINUSE(GM, p, SMALL_INDEX2SIZE(idx));
|
|
1509 |
mem = CHUNK2MEM(p);
|
|
1510 |
CHECK_MALLOCED_CHUNK(GM, mem, nb);
|
|
1511 |
return mem;
|
|
1512 |
}
|
|
1513 |
|
|
1514 |
else if (nb > GM->iDvSize)
|
|
1515 |
{
|
|
1516 |
if (smallbits != 0)
|
|
1517 |
{ /* Use chunk in next nonempty smallbin */
|
|
1518 |
mchunkptr b, p, r;
|
|
1519 |
size_t rsize;
|
|
1520 |
bindex_t i;
|
|
1521 |
binmap_t leftbits = (smallbits << idx) & LEFT_BITS(IDX2BIT(idx));
|
|
1522 |
binmap_t leastbit = LEAST_BIT(leftbits);
|
|
1523 |
ComputeBit2idx(leastbit, i);
|
|
1524 |
b = SMALLBIN_AT(GM, i);
|
|
1525 |
p = b->iFd;
|
|
1526 |
HEAP_ASSERT(CHUNKSIZE(p) == SMALL_INDEX2SIZE(i));
|
|
1527 |
UnlinkFirstSmallChunk(GM, b, p, i);
|
|
1528 |
rsize = SMALL_INDEX2SIZE(i) - nb;
|
|
1529 |
/* Fit here cannot be remainderless if 4byte sizes */
|
|
1530 |
if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
|
|
1531 |
SET_INUSE_AND_PINUSE(GM, p, SMALL_INDEX2SIZE(i));
|
|
1532 |
else
|
|
1533 |
{
|
|
1534 |
SET_SIZE_AND_PINUSE_OF_INUSE_CHUNK(GM, p, nb);
|
|
1535 |
r = CHUNK_PLUS_OFFSET(p, nb);
|
|
1536 |
SET_SIZE_AND_PINUSE_OF_FREE_CHUNK(r, rsize);
|
|
1537 |
ReplaceDv(GM, r, rsize);
|
|
1538 |
}
|
|
1539 |
mem = CHUNK2MEM(p);
|
|
1540 |
CHECK_MALLOCED_CHUNK(GM, mem, nb);
|
|
1541 |
return mem;
|
|
1542 |
}
|
|
1543 |
|
|
1544 |
else if (GM->iTreeMap != 0 && (mem = TmallocSmall(GM, nb)) != 0)
|
|
1545 |
{
|
|
1546 |
CHECK_MALLOCED_CHUNK(GM, mem, nb);
|
|
1547 |
return mem;
|
|
1548 |
}
|
|
1549 |
}
|
|
1550 |
}
|
|
1551 |
else if (bytes >= MAX_REQUEST)
|
|
1552 |
nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
|
|
1553 |
else
|
|
1554 |
{
|
|
1555 |
nb = PAD_REQUEST(bytes);
|
|
1556 |
if (GM->iTreeMap != 0 && (mem = TmallocLarge(GM, nb)) != 0)
|
|
1557 |
{
|
|
1558 |
CHECK_MALLOCED_CHUNK(GM, mem, nb);
|
|
1559 |
return mem;
|
|
1560 |
}
|
|
1561 |
}
|
|
1562 |
|
|
1563 |
if (nb <= GM->iDvSize)
|
|
1564 |
{
|
|
1565 |
size_t rsize = GM->iDvSize - nb;
|
|
1566 |
mchunkptr p = GM->iDv;
|
|
1567 |
if (rsize >= MIN_CHUNK_SIZE)
|
|
1568 |
{ /* split iDv */
|
|
1569 |
mchunkptr r = GM->iDv = CHUNK_PLUS_OFFSET(p, nb);
|
|
1570 |
GM->iDvSize = rsize;
|
|
1571 |
SET_SIZE_AND_PINUSE_OF_FREE_CHUNK(r, rsize);
|
|
1572 |
SET_SIZE_AND_PINUSE_OF_INUSE_CHUNK(GM, p, nb);
|
|
1573 |
}
|
|
1574 |
else
|
|
1575 |
{ /* exhaust iDv */
|
|
1576 |
size_t dvs = GM->iDvSize;
|
|
1577 |
GM->iDvSize = 0;
|
|
1578 |
GM->iDv = 0;
|
|
1579 |
SET_INUSE_AND_PINUSE(GM, p, dvs);
|
|
1580 |
}
|
|
1581 |
mem = CHUNK2MEM(p);
|
|
1582 |
CHECK_MALLOCED_CHUNK(GM, mem, nb);
|
|
1583 |
return mem;
|
|
1584 |
}
|
|
1585 |
|
|
1586 |
else if (nb < GM->iTopSize)
|
|
1587 |
{ /* Split iTop */
|
|
1588 |
size_t rsize = GM->iTopSize -= nb;
|
|
1589 |
mchunkptr p = GM->iTop;
|
|
1590 |
mchunkptr r = GM->iTop = CHUNK_PLUS_OFFSET(p, nb);
|
|
1591 |
r->iHead = rsize | PINUSE_BIT;
|
|
1592 |
SET_SIZE_AND_PINUSE_OF_INUSE_CHUNK(GM, p, nb);
|
|
1593 |
mem = CHUNK2MEM(p);
|
|
1594 |
CHECK_TOP_CHUNK(GM, GM->iTop);
|
|
1595 |
CHECK_MALLOCED_CHUNK(GM, mem, nb);
|
|
1596 |
return mem;
|
|
1597 |
}
|
|
1598 |
|
|
1599 |
return SysAlloc(GM, nb);
|
|
1600 |
}
|
|
1601 |
|
|
1602 |
|
|
1603 |
void RHybridHeap::DlFree(void* mem)
|
|
1604 |
{
|
|
1605 |
/*
|
|
1606 |
Consolidate freed chunks with preceeding or succeeding bordering
|
|
1607 |
free chunks, if they exist, and then place in a bin. Intermixed
|
|
1608 |
with special cases for iTop, iDv, mmapped chunks, and usage errors.
|
|
1609 |
*/
|
|
1610 |
mchunkptr p = MEM2CHUNK(mem);
|
|
1611 |
CHECK_INUSE_CHUNK(GM, p);
|
|
1612 |
if (RTCHECK(OK_ADDRESS(GM, p) && OK_CINUSE(p)))
|
|
1613 |
{
|
|
1614 |
size_t psize = CHUNKSIZE(p);
|
|
1615 |
mchunkptr next = CHUNK_PLUS_OFFSET(p, psize);
|
|
1616 |
if (!PINUSE(p))
|
|
1617 |
{
|
|
1618 |
size_t prevsize = p->iPrevFoot;
|
|
1619 |
mchunkptr prev = CHUNK_MINUS_OFFSET(p, prevsize);
|
|
1620 |
psize += prevsize;
|
|
1621 |
p = prev;
|
|
1622 |
if (RTCHECK(OK_ADDRESS(GM, prev)))
|
|
1623 |
{ /* consolidate backward */
|
|
1624 |
if (p != GM->iDv)
|
|
1625 |
{
|
|
1626 |
UnlinkChunk(GM, p, prevsize);
|
|
1627 |
}
|
|
1628 |
else if ((next->iHead & INUSE_BITS) == INUSE_BITS)
|
|
1629 |
{
|
|
1630 |
GM->iDvSize = psize;
|
|
1631 |
SET_FREE_WITH_PINUSE(p, psize, next);
|
|
1632 |
return;
|
|
1633 |
}
|
|
1634 |
}
|
|
1635 |
else
|
|
1636 |
{
|
|
1637 |
USAGE_ERROR_ACTION(GM, p);
|
|
1638 |
return;
|
|
1639 |
}
|
|
1640 |
}
|
|
1641 |
|
|
1642 |
if (RTCHECK(OK_NEXT(p, next) && OK_PINUSE(next)))
|
|
1643 |
{
|
|
1644 |
if (!CINUSE(next))
|
|
1645 |
{ /* consolidate forward */
|
|
1646 |
if (next == GM->iTop)
|
|
1647 |
{
|
|
1648 |
size_t tsize = GM->iTopSize += psize;
|
|
1649 |
GM->iTop = p;
|
|
1650 |
p->iHead = tsize | PINUSE_BIT;
|
|
1651 |
if (p == GM->iDv)
|
|
1652 |
{
|
|
1653 |
GM->iDv = 0;
|
|
1654 |
GM->iDvSize = 0;
|
|
1655 |
}
|
|
1656 |
if ( !IS_FIXED_HEAP && SHOULD_TRIM(GM, tsize) )
|
|
1657 |
SysTrim(GM, 0);
|
|
1658 |
return;
|
|
1659 |
}
|
|
1660 |
else if (next == GM->iDv)
|
|
1661 |
{
|
|
1662 |
size_t dsize = GM->iDvSize += psize;
|
|
1663 |
GM->iDv = p;
|
|
1664 |
SET_SIZE_AND_PINUSE_OF_FREE_CHUNK(p, dsize);
|
|
1665 |
return;
|
|
1666 |
}
|
|
1667 |
else
|
|
1668 |
{
|
|
1669 |
size_t nsize = CHUNKSIZE(next);
|
|
1670 |
psize += nsize;
|
|
1671 |
UnlinkChunk(GM, next, nsize);
|
|
1672 |
SET_SIZE_AND_PINUSE_OF_FREE_CHUNK(p, psize);
|
|
1673 |
if (p == GM->iDv)
|
|
1674 |
{
|
|
1675 |
GM->iDvSize = psize;
|
|
1676 |
return;
|
|
1677 |
}
|
|
1678 |
}
|
|
1679 |
}
|
|
1680 |
else
|
|
1681 |
SET_FREE_WITH_PINUSE(p, psize, next);
|
|
1682 |
InsertChunk(GM, p, psize);
|
|
1683 |
CHECK_FREE_CHUNK(GM, p);
|
|
1684 |
return;
|
|
1685 |
}
|
|
1686 |
}
|
|
1687 |
}
|
|
1688 |
|
|
1689 |
|
|
1690 |
void* RHybridHeap::DlRealloc(void* oldmem, size_t bytes, TInt mode)
|
|
1691 |
{
|
|
1692 |
mchunkptr oldp = MEM2CHUNK(oldmem);
|
|
1693 |
size_t oldsize = CHUNKSIZE(oldp);
|
|
1694 |
mchunkptr next = CHUNK_PLUS_OFFSET(oldp, oldsize);
|
|
1695 |
mchunkptr newp = 0;
|
|
1696 |
void* extra = 0;
|
|
1697 |
|
|
1698 |
/* Try to either shrink or extend into iTop. Else malloc-copy-free */
|
|
1699 |
|
|
1700 |
if (RTCHECK(OK_ADDRESS(GM, oldp) && OK_CINUSE(oldp) &&
|
|
1701 |
OK_NEXT(oldp, next) && OK_PINUSE(next)))
|
|
1702 |
{
|
|
1703 |
size_t nb = REQUEST2SIZE(bytes);
|
|
1704 |
if (oldsize >= nb) { /* already big enough */
|
|
1705 |
size_t rsize = oldsize - nb;
|
|
1706 |
newp = oldp;
|
|
1707 |
if (rsize >= MIN_CHUNK_SIZE)
|
|
1708 |
{
|
|
1709 |
mchunkptr remainder = CHUNK_PLUS_OFFSET(newp, nb);
|
|
1710 |
SET_INUSE(GM, newp, nb);
|
|
1711 |
// SET_INUSE(GM, remainder, rsize);
|
|
1712 |
SET_INUSE_AND_PINUSE(GM, remainder, rsize); // corrected in original DLA version V2.8.4
|
|
1713 |
extra = CHUNK2MEM(remainder);
|
|
1714 |
}
|
|
1715 |
}
|
|
1716 |
else if (next == GM->iTop && oldsize + GM->iTopSize > nb)
|
|
1717 |
{
|
|
1718 |
/* Expand into iTop */
|
|
1719 |
size_t newsize = oldsize + GM->iTopSize;
|
|
1720 |
size_t newtopsize = newsize - nb;
|
|
1721 |
mchunkptr newtop = CHUNK_PLUS_OFFSET(oldp, nb);
|
|
1722 |
SET_INUSE(GM, oldp, nb);
|
|
1723 |
newtop->iHead = newtopsize |PINUSE_BIT;
|
|
1724 |
GM->iTop = newtop;
|
|
1725 |
GM->iTopSize = newtopsize;
|
|
1726 |
newp = oldp;
|
|
1727 |
}
|
|
1728 |
}
|
|
1729 |
else
|
|
1730 |
{
|
|
1731 |
USAGE_ERROR_ACTION(GM, oldmem);
|
|
1732 |
}
|
|
1733 |
|
|
1734 |
if (newp != 0)
|
|
1735 |
{
|
|
1736 |
if (extra != 0)
|
|
1737 |
{
|
|
1738 |
DlFree(extra);
|
|
1739 |
}
|
|
1740 |
CHECK_INUSE_CHUNK(GM, newp);
|
|
1741 |
return CHUNK2MEM(newp);
|
|
1742 |
}
|
|
1743 |
else
|
|
1744 |
{
|
|
1745 |
if ( mode & ENeverMove )
|
|
1746 |
return 0; // cannot move
|
|
1747 |
void* newmem = DlMalloc(bytes);
|
|
1748 |
if (newmem != 0)
|
|
1749 |
{
|
|
1750 |
size_t oc = oldsize - OVERHEAD_FOR(oldp);
|
|
1751 |
memcpy(newmem, oldmem, (oc < bytes)? oc : bytes);
|
|
1752 |
DlFree(oldmem);
|
|
1753 |
}
|
|
1754 |
return newmem;
|
|
1755 |
}
|
|
1756 |
// return 0;
|
|
1757 |
}
|
|
1758 |
|
|
1759 |
size_t RHybridHeap::DlInfo(struct HeapInfo* i, SWalkInfo* wi) const
|
|
1760 |
{
|
|
1761 |
TInt max = ((GM->iTopSize-1) & ~CHUNK_ALIGN_MASK) - CHUNK_OVERHEAD;
|
|
1762 |
if ( max < 0 )
|
|
1763 |
max = 0;
|
|
1764 |
else ++i->iFreeN; // iTop always free
|
|
1765 |
i->iFreeBytes += max;
|
|
1766 |
|
|
1767 |
Walk(wi, GM->iTop, max, EGoodFreeCell, EDougLeaAllocator); // Introduce DL iTop buffer to the walk function
|
|
1768 |
|
|
1769 |
for (mchunkptr q = ALIGN_AS_CHUNK(GM->iSeg.iBase); q != GM->iTop; q = NEXT_CHUNK(q))
|
|
1770 |
{
|
|
1771 |
TInt sz = CHUNKSIZE(q);
|
|
1772 |
if (!CINUSE(q))
|
|
1773 |
{
|
|
1774 |
if ( sz > max )
|
|
1775 |
max = sz;
|
|
1776 |
i->iFreeBytes += sz;
|
|
1777 |
++i->iFreeN;
|
|
1778 |
Walk(wi, CHUNK2MEM(q), sz, EGoodFreeCell, EDougLeaAllocator); // Introduce DL free buffer to the walk function
|
|
1779 |
}
|
|
1780 |
else
|
|
1781 |
{
|
|
1782 |
i->iAllocBytes += sz - CHUNK_OVERHEAD;
|
|
1783 |
++i->iAllocN;
|
|
1784 |
Walk(wi, CHUNK2MEM(q), (sz- CHUNK_OVERHEAD), EGoodAllocatedCell, EDougLeaAllocator); // Introduce DL allocated buffer to the walk function
|
|
1785 |
}
|
|
1786 |
}
|
|
1787 |
return max; // return largest available chunk size
|
|
1788 |
}
|
|
1789 |
|
|
1790 |
//
|
|
1791 |
// get statistics about the state of the allocator
|
|
1792 |
//
|
|
1793 |
TInt RHybridHeap::GetInfo(struct HeapInfo* i, SWalkInfo* wi) const
|
|
1794 |
{
|
|
1795 |
memset(i,0,sizeof(HeapInfo));
|
|
1796 |
i->iFootprint = iChunkSize;
|
|
1797 |
i->iMaxSize = iMaxLength;
|
|
1798 |
#ifndef __KERNEL_MODE__
|
|
1799 |
PagedInfo(i, wi);
|
|
1800 |
SlabInfo(i, wi);
|
|
1801 |
#endif
|
|
1802 |
return DlInfo(i,wi);
|
|
1803 |
}
|
|
1804 |
|
|
1805 |
//
|
|
1806 |
// Methods to commit/decommit memory pages from chunk
|
|
1807 |
//
|
|
1808 |
|
|
1809 |
|
|
1810 |
void* RHybridHeap::Map(void* p, TInt sz)
|
|
1811 |
//
|
|
1812 |
// allocate pages in the chunk
|
|
1813 |
// if p is NULL, Find an allocate the required number of pages (which must lie in the lower half)
|
|
1814 |
// otherwise commit the pages specified
|
|
1815 |
//
|
|
1816 |
{
|
|
1817 |
HEAP_ASSERT(sz > 0);
|
|
1818 |
|
|
1819 |
if ( iChunkSize + sz > iMaxLength)
|
|
1820 |
return 0;
|
|
1821 |
|
|
1822 |
#ifdef __KERNEL_MODE__
|
|
1823 |
|
|
1824 |
TInt r = ((DChunk*)iChunkHandle)->Adjust(iChunkSize + iOffset + sz);
|
|
1825 |
if (r < 0)
|
|
1826 |
return 0;
|
|
1827 |
|
|
1828 |
iChunkSize += sz;
|
|
1829 |
|
|
1830 |
#else
|
|
1831 |
|
|
1832 |
RChunk chunk;
|
|
1833 |
chunk.SetHandle(iChunkHandle);
|
|
1834 |
if ( p )
|
|
1835 |
{
|
|
1836 |
TInt r;
|
|
1837 |
if ( iUseAdjust )
|
|
1838 |
r = chunk.Adjust(iChunkSize + sz);
|
|
1839 |
else
|
|
1840 |
{
|
|
1841 |
HEAP_ASSERT(sz == Ceiling(sz, iPageSize));
|
|
1842 |
HEAP_ASSERT(p == Floor(p, iPageSize));
|
|
1843 |
r = chunk.Commit(iOffset + PtrDiff(p, this),sz);
|
|
1844 |
}
|
|
1845 |
if (r < 0)
|
|
1846 |
return 0;
|
|
1847 |
}
|
|
1848 |
else
|
|
1849 |
{
|
|
1850 |
TInt r = chunk.Allocate(sz);
|
|
1851 |
if (r < 0)
|
|
1852 |
return 0;
|
|
1853 |
if (r > iOffset)
|
|
1854 |
{
|
|
1855 |
// can't allow page allocations in DL zone
|
|
1856 |
chunk.Decommit(r, sz);
|
|
1857 |
return 0;
|
|
1858 |
}
|
|
1859 |
p = Offset(this, r - iOffset);
|
|
1860 |
}
|
|
1861 |
iChunkSize += sz;
|
|
1862 |
|
|
1863 |
if (iChunkSize >= iSlabInitThreshold)
|
|
1864 |
{ // set up slab system now that heap is large enough
|
|
1865 |
SlabConfig(iSlabConfigBits);
|
|
1866 |
iSlabInitThreshold = KMaxTInt32;
|
|
1867 |
}
|
|
1868 |
|
|
1869 |
#endif // __KERNEL_MODE__
|
|
1870 |
|
|
1871 |
#ifdef ENABLE_BTRACE
|
|
1872 |
if(iChunkSize > iHighWaterMark)
|
|
1873 |
{
|
|
1874 |
iHighWaterMark = Ceiling(iChunkSize,16*iPageSize);
|
|
1875 |
TUint32 traceData[6];
|
|
1876 |
traceData[0] = iChunkHandle;
|
|
1877 |
traceData[1] = iMinLength;
|
|
1878 |
traceData[2] = iMaxLength;
|
|
1879 |
traceData[3] = sz;
|
|
1880 |
traceData[4] = iChunkSize;
|
|
1881 |
traceData[5] = iHighWaterMark;
|
|
1882 |
BTraceContextN(BTrace::ETest1, 90, (TUint32)this, 33, traceData, sizeof(traceData));
|
|
1883 |
}
|
|
1884 |
#endif
|
|
1885 |
|
|
1886 |
return p;
|
|
1887 |
}
|
|
1888 |
|
|
1889 |
void RHybridHeap::Unmap(void* p, TInt sz)
|
|
1890 |
{
|
|
1891 |
HEAP_ASSERT(sz > 0);
|
|
1892 |
|
|
1893 |
#ifdef __KERNEL_MODE__
|
|
1894 |
|
|
1895 |
(void)p;
|
|
1896 |
HEAP_ASSERT(sz == Ceiling(sz, iPageSize));
|
|
1897 |
#if defined(_DEBUG)
|
|
1898 |
TInt r =
|
|
1899 |
#endif
|
|
1900 |
((DChunk*)iChunkHandle)->Adjust(iChunkSize + iOffset - sz);
|
|
1901 |
HEAP_ASSERT(r >= 0);
|
|
1902 |
|
|
1903 |
#else
|
|
1904 |
|
|
1905 |
RChunk chunk;
|
|
1906 |
chunk.SetHandle(iChunkHandle);
|
|
1907 |
if ( iUseAdjust )
|
|
1908 |
{
|
|
1909 |
HEAP_ASSERT(sz == Ceiling(sz, iPageSize));
|
|
1910 |
#if defined(_DEBUG)
|
|
1911 |
TInt r =
|
|
1912 |
#endif
|
|
1913 |
chunk.Adjust(iChunkSize - sz);
|
|
1914 |
HEAP_ASSERT(r >= 0);
|
|
1915 |
}
|
|
1916 |
else
|
|
1917 |
{
|
|
1918 |
HEAP_ASSERT(sz == Ceiling(sz, iPageSize));
|
|
1919 |
HEAP_ASSERT(p == Floor(p, iPageSize));
|
|
1920 |
#if defined(_DEBUG)
|
|
1921 |
TInt r =
|
|
1922 |
#endif
|
|
1923 |
chunk.Decommit(PtrDiff(p, Offset(this,-iOffset)), sz);
|
|
1924 |
HEAP_ASSERT(r >= 0);
|
|
1925 |
}
|
|
1926 |
#endif // __KERNEL_MODE__
|
|
1927 |
|
|
1928 |
iChunkSize -= sz;
|
|
1929 |
}
|
|
1930 |
|
|
1931 |
|
|
1932 |
#ifndef __KERNEL_MODE__
|
|
1933 |
//
|
|
1934 |
// Slab allocator code
|
|
1935 |
//
|
|
1936 |
|
|
1937 |
//inline slab* slab::SlabFor(void* p)
|
|
1938 |
slab* slab::SlabFor( const void* p)
|
|
1939 |
{
|
|
1940 |
return (slab*)(Floor(p, SLABSIZE));
|
|
1941 |
}
|
|
1942 |
|
|
1943 |
//
|
|
1944 |
// Remove slab s from its tree/heap (not necessarily the root), preserving the address order
|
|
1945 |
// invariant of the heap
|
|
1946 |
//
|
|
1947 |
void RHybridHeap::TreeRemove(slab* s)
|
|
1948 |
{
|
|
1949 |
slab** r = s->iParent;
|
|
1950 |
slab* c1 = s->iChild1;
|
|
1951 |
slab* c2 = s->iChild2;
|
|
1952 |
for (;;)
|
|
1953 |
{
|
|
1954 |
if (!c2)
|
|
1955 |
{
|
|
1956 |
*r = c1;
|
|
1957 |
if (c1)
|
|
1958 |
c1->iParent = r;
|
|
1959 |
return;
|
|
1960 |
}
|
|
1961 |
if (!c1)
|
|
1962 |
{
|
|
1963 |
*r = c2;
|
|
1964 |
c2->iParent = r;
|
|
1965 |
return;
|
|
1966 |
}
|
|
1967 |
if (c1 > c2)
|
|
1968 |
{
|
|
1969 |
slab* c3 = c1;
|
|
1970 |
c1 = c2;
|
|
1971 |
c2 = c3;
|
|
1972 |
}
|
|
1973 |
slab* newc2 = c1->iChild2;
|
|
1974 |
*r = c1;
|
|
1975 |
c1->iParent = r;
|
|
1976 |
c1->iChild2 = c2;
|
|
1977 |
c2->iParent = &c1->iChild2;
|
|
1978 |
s = c1;
|
|
1979 |
c1 = s->iChild1;
|
|
1980 |
c2 = newc2;
|
|
1981 |
r = &s->iChild1;
|
|
1982 |
}
|
|
1983 |
}
|
|
1984 |
//
|
|
1985 |
// Insert slab s into the tree/heap rooted at r, preserving the address ordering
|
|
1986 |
// invariant of the heap
|
|
1987 |
//
|
|
1988 |
void RHybridHeap::TreeInsert(slab* s,slab** r)
|
|
1989 |
{
|
|
1990 |
slab* n = *r;
|
|
1991 |
for (;;)
|
|
1992 |
{
|
|
1993 |
if (!n)
|
|
1994 |
{ // tree empty
|
|
1995 |
*r = s;
|
|
1996 |
s->iParent = r;
|
|
1997 |
s->iChild1 = s->iChild2 = 0;
|
|
1998 |
break;
|
|
1999 |
}
|
|
2000 |
if (s < n)
|
|
2001 |
{ // insert between iParent and n
|
|
2002 |
*r = s;
|
|
2003 |
s->iParent = r;
|
|
2004 |
s->iChild1 = n;
|
|
2005 |
s->iChild2 = 0;
|
|
2006 |
n->iParent = &s->iChild1;
|
|
2007 |
break;
|
|
2008 |
}
|
|
2009 |
slab* c1 = n->iChild1;
|
|
2010 |
slab* c2 = n->iChild2;
|
|
2011 |
if ((c1 - 1) > (c2 - 1))
|
|
2012 |
{
|
|
2013 |
r = &n->iChild1;
|
|
2014 |
n = c1;
|
|
2015 |
}
|
|
2016 |
else
|
|
2017 |
{
|
|
2018 |
r = &n->iChild2;
|
|
2019 |
n = c2;
|
|
2020 |
}
|
|
2021 |
}
|
|
2022 |
}
|
|
2023 |
|
|
2024 |
void* RHybridHeap::AllocNewSlab(slabset& allocator)
|
|
2025 |
//
|
|
2026 |
// Acquire and initialise a new slab, returning a cell from the slab
|
|
2027 |
// The strategy is:
|
|
2028 |
// 1. Use the lowest address free slab, if available. This is done by using the lowest slab
|
|
2029 |
// in the page at the root of the iPartialPage heap (which is address ordered). If the
|
|
2030 |
// is now fully used, remove it from the iPartialPage heap.
|
|
2031 |
// 2. Allocate a new page for iSlabs if no empty iSlabs are available
|
|
2032 |
//
|
|
2033 |
{
|
|
2034 |
page* p = page::PageFor(iPartialPage);
|
|
2035 |
if (!p)
|
|
2036 |
return AllocNewPage(allocator);
|
|
2037 |
|
|
2038 |
unsigned h = p->iSlabs[0].iHeader;
|
|
2039 |
unsigned pagemap = SlabHeaderPagemap(h);
|
|
2040 |
HEAP_ASSERT(&p->iSlabs[HIBIT(pagemap)] == iPartialPage);
|
|
2041 |
|
|
2042 |
unsigned slabix = LOWBIT(pagemap);
|
|
2043 |
p->iSlabs[0].iHeader = h &~ (0x100<<slabix);
|
|
2044 |
if (!(pagemap &~ (1<<slabix)))
|
|
2045 |
{
|
|
2046 |
TreeRemove(iPartialPage); // last free slab in page
|
|
2047 |
}
|
|
2048 |
|
|
2049 |
return InitNewSlab(allocator, &p->iSlabs[slabix]);
|
|
2050 |
}
|
|
2051 |
|
|
2052 |
/**Defination of this functionis not there in proto code***/
|
|
2053 |
#if 0
|
|
2054 |
void RHybridHeap::partial_insert(slab* s)
|
|
2055 |
{
|
|
2056 |
// slab has had first cell freed and needs to be linked back into iPartial tree
|
|
2057 |
slabset& ss = iSlabAlloc[iSizeMap[s->clz]];
|
|
2058 |
|
|
2059 |
HEAP_ASSERT(s->used == slabfull);
|
|
2060 |
s->used = ss.fulluse - s->clz; // full-1 loading
|
|
2061 |
TreeInsert(s,&ss.iPartial);
|
|
2062 |
CHECKTREE(&ss.iPartial);
|
|
2063 |
}
|
|
2064 |
/**Defination of this functionis not there in proto code***/
|
|
2065 |
#endif
|
|
2066 |
|
|
2067 |
void* RHybridHeap::AllocNewPage(slabset& allocator)
|
|
2068 |
//
|
|
2069 |
// Acquire and initialise a new page, returning a cell from a new slab
|
|
2070 |
// The iPartialPage tree is empty (otherwise we'd have used a slab from there)
|
|
2071 |
// The iPartialPage link is put in the highest addressed slab in the page, and the
|
|
2072 |
// lowest addressed slab is used to fulfill the allocation request
|
|
2073 |
//
|
|
2074 |
{
|
|
2075 |
page* p = iSparePage;
|
|
2076 |
if (p)
|
|
2077 |
iSparePage = 0;
|
|
2078 |
else
|
|
2079 |
{
|
|
2080 |
p = static_cast<page*>(Map(0, iPageSize));
|
|
2081 |
if (!p)
|
|
2082 |
return 0;
|
|
2083 |
}
|
|
2084 |
HEAP_ASSERT(p == Floor(p, iPageSize));
|
|
2085 |
// Store page allocated for slab into paged_bitmap (for RHybridHeap::Reset())
|
|
2086 |
if (!PagedSetSize(p, iPageSize))
|
|
2087 |
{
|
|
2088 |
Unmap(p, iPageSize);
|
|
2089 |
return 0;
|
|
2090 |
}
|
|
2091 |
p->iSlabs[0].iHeader = ((1<<3) + (1<<2) + (1<<1))<<8; // set pagemap
|
|
2092 |
p->iSlabs[3].iParent = &iPartialPage;
|
|
2093 |
p->iSlabs[3].iChild1 = p->iSlabs[3].iChild2 = 0;
|
|
2094 |
iPartialPage = &p->iSlabs[3];
|
|
2095 |
return InitNewSlab(allocator,&p->iSlabs[0]);
|
|
2096 |
}
|
|
2097 |
|
|
2098 |
void RHybridHeap::FreePage(page* p)
|
|
2099 |
//
|
|
2100 |
// Release an unused page to the OS
|
|
2101 |
// A single page is cached for reuse to reduce thrashing
|
|
2102 |
// the OS allocator.
|
|
2103 |
//
|
|
2104 |
{
|
|
2105 |
HEAP_ASSERT(Ceiling(p, iPageSize) == p);
|
|
2106 |
if (!iSparePage)
|
|
2107 |
{
|
|
2108 |
iSparePage = p;
|
|
2109 |
return;
|
|
2110 |
}
|
|
2111 |
|
|
2112 |
// unmapped slab page must be cleared from paged_bitmap, too
|
|
2113 |
PagedZapSize(p, iPageSize); // clear page map
|
|
2114 |
|
|
2115 |
Unmap(p, iPageSize);
|
|
2116 |
}
|
|
2117 |
|
|
2118 |
void RHybridHeap::FreeSlab(slab* s)
|
|
2119 |
//
|
|
2120 |
// Release an empty slab to the slab manager
|
|
2121 |
// The strategy is:
|
|
2122 |
// 1. The page containing the slab is checked to see the state of the other iSlabs in the page by
|
|
2123 |
// inspecting the pagemap field in the iHeader of the first slab in the page.
|
|
2124 |
// 2. The pagemap is updated to indicate the new unused slab
|
|
2125 |
// 3. If this is the only unused slab in the page then the slab iHeader is used to add the page to
|
|
2126 |
// the iPartialPage tree/heap
|
|
2127 |
// 4. If all the iSlabs in the page are now unused the page is release back to the OS
|
|
2128 |
// 5. If this slab has a higher address than the one currently used to track this page in
|
|
2129 |
// the iPartialPage heap, the linkage is moved to the new unused slab
|
|
2130 |
//
|
|
2131 |
{
|
|
2132 |
TreeRemove(s);
|
|
2133 |
CHECKTREE(s->iParent);
|
|
2134 |
HEAP_ASSERT(SlabHeaderUsedm4(s->iHeader) == SlabHeaderSize(s->iHeader)-4);
|
|
2135 |
|
|
2136 |
page* p = page::PageFor(s);
|
|
2137 |
unsigned h = p->iSlabs[0].iHeader;
|
|
2138 |
int slabix = s - &p->iSlabs[0];
|
|
2139 |
unsigned pagemap = SlabHeaderPagemap(h);
|
|
2140 |
p->iSlabs[0].iHeader = h | (0x100<<slabix);
|
|
2141 |
if (pagemap == 0)
|
|
2142 |
{ // page was full before, use this slab as link in empty heap
|
|
2143 |
TreeInsert(s, &iPartialPage);
|
|
2144 |
}
|
|
2145 |
else
|
|
2146 |
{ // Find the current empty-link slab
|
|
2147 |
slab* sl = &p->iSlabs[HIBIT(pagemap)];
|
|
2148 |
pagemap ^= (1<<slabix);
|
|
2149 |
if (pagemap == 0xf)
|
|
2150 |
{ // page is now empty so recycle page to os
|
|
2151 |
TreeRemove(sl);
|
|
2152 |
FreePage(p);
|
|
2153 |
return;
|
|
2154 |
}
|
|
2155 |
// ensure the free list link is in highest address slab in page
|
|
2156 |
if (s > sl)
|
|
2157 |
{ // replace current link with new one. Address-order tree so position stays the same
|
|
2158 |
slab** r = sl->iParent;
|
|
2159 |
slab* c1 = sl->iChild1;
|
|
2160 |
slab* c2 = sl->iChild2;
|
|
2161 |
s->iParent = r;
|
|
2162 |
s->iChild1 = c1;
|
|
2163 |
s->iChild2 = c2;
|
|
2164 |
*r = s;
|
|
2165 |
if (c1)
|
|
2166 |
c1->iParent = &s->iChild1;
|
|
2167 |
if (c2)
|
|
2168 |
c2->iParent = &s->iChild2;
|
|
2169 |
}
|
|
2170 |
CHECK(if (s < sl) s=sl);
|
|
2171 |
}
|
|
2172 |
HEAP_ASSERT(SlabHeaderPagemap(p->iSlabs[0].iHeader) != 0);
|
|
2173 |
HEAP_ASSERT(HIBIT(SlabHeaderPagemap(p->iSlabs[0].iHeader)) == unsigned(s - &p->iSlabs[0]));
|
|
2174 |
}
|
|
2175 |
|
|
2176 |
|
|
2177 |
void RHybridHeap::SlabInit()
|
|
2178 |
{
|
|
2179 |
iSlabThreshold=0;
|
|
2180 |
iPartialPage = 0;
|
|
2181 |
iFullSlab = 0;
|
|
2182 |
iSparePage = 0;
|
|
2183 |
memset(&iSizeMap[0],0xff,sizeof(iSizeMap));
|
|
2184 |
memset(&iSlabAlloc[0],0,sizeof(iSlabAlloc));
|
|
2185 |
}
|
|
2186 |
|
|
2187 |
void RHybridHeap::SlabConfig(unsigned slabbitmap)
|
|
2188 |
{
|
|
2189 |
HEAP_ASSERT((slabbitmap & ~EOkBits) == 0);
|
|
2190 |
HEAP_ASSERT(MAXSLABSIZE <= 60);
|
|
2191 |
|
|
2192 |
unsigned int ix = 0xff;
|
|
2193 |
unsigned int bit = 1<<((MAXSLABSIZE>>2)-1);
|
|
2194 |
for (int sz = MAXSLABSIZE; sz >= 0; sz -= 4, bit >>= 1)
|
|
2195 |
{
|
|
2196 |
if (slabbitmap & bit)
|
|
2197 |
{
|
|
2198 |
if (ix == 0xff)
|
|
2199 |
iSlabThreshold=sz+1;
|
|
2200 |
ix = (sz>>2)-1;
|
|
2201 |
}
|
|
2202 |
iSizeMap[sz>>2] = (TUint8) ix;
|
|
2203 |
}
|
|
2204 |
}
|
|
2205 |
|
|
2206 |
|
|
2207 |
void* RHybridHeap::SlabAllocate(slabset& ss)
|
|
2208 |
//
|
|
2209 |
// Allocate a cell from the given slabset
|
|
2210 |
// Strategy:
|
|
2211 |
// 1. Take the partially full slab at the iTop of the heap (lowest address).
|
|
2212 |
// 2. If there is no such slab, allocate from a new slab
|
|
2213 |
// 3. If the slab has a non-empty freelist, pop the cell from the front of the list and update the slab
|
|
2214 |
// 4. Otherwise, if the slab is not full, return the cell at the end of the currently used region of
|
|
2215 |
// the slab, updating the slab
|
|
2216 |
// 5. Otherwise, release the slab from the iPartial tree/heap, marking it as 'floating' and go back to
|
|
2217 |
// step 1
|
|
2218 |
//
|
|
2219 |
{
|
|
2220 |
for (;;)
|
|
2221 |
{
|
|
2222 |
slab *s = ss.iPartial;
|
|
2223 |
if (!s)
|
|
2224 |
break;
|
|
2225 |
unsigned h = s->iHeader;
|
|
2226 |
unsigned free = h & 0xff; // extract free cell positioning
|
|
2227 |
if (free)
|
|
2228 |
{
|
|
2229 |
HEAP_ASSERT(((free<<2)-sizeof(slabhdr))%SlabHeaderSize(h) == 0);
|
|
2230 |
void* p = Offset(s,free<<2);
|
|
2231 |
free = *(unsigned char*)p; // get next pos in free list
|
|
2232 |
h += (h&0x3C000)<<6; // update usedm4
|
|
2233 |
h &= ~0xff;
|
|
2234 |
h |= free; // update freelist
|
|
2235 |
s->iHeader = h;
|
|
2236 |
HEAP_ASSERT(SlabHeaderFree(h) == 0 || ((SlabHeaderFree(h)<<2)-sizeof(slabhdr))%SlabHeaderSize(h) == 0);
|
|
2237 |
HEAP_ASSERT(SlabHeaderUsedm4(h) <= 0x3F8u);
|
|
2238 |
HEAP_ASSERT((SlabHeaderUsedm4(h)+4)%SlabHeaderSize(h) == 0);
|
|
2239 |
return p;
|
|
2240 |
}
|
|
2241 |
unsigned h2 = h + ((h&0x3C000)<<6);
|
|
2242 |
// if (h2 < 0xfc00000)
|
|
2243 |
if (h2 < MAXUSEDM4BITS)
|
|
2244 |
{
|
|
2245 |
HEAP_ASSERT((SlabHeaderUsedm4(h2)+4)%SlabHeaderSize(h2) == 0);
|
|
2246 |
s->iHeader = h2;
|
|
2247 |
return Offset(s,(h>>18) + sizeof(unsigned) + sizeof(slabhdr));
|
|
2248 |
}
|
|
2249 |
h |= FLOATING_BIT; // mark the slab as full-floating
|
|
2250 |
s->iHeader = h;
|
|
2251 |
TreeRemove(s);
|
|
2252 |
slab* c = iFullSlab; // add to full list
|
|
2253 |
iFullSlab = s;
|
|
2254 |
s->iParent = &iFullSlab;
|
|
2255 |
s->iChild1 = c;
|
|
2256 |
s->iChild2 = 0;
|
|
2257 |
if (c)
|
|
2258 |
c->iParent = &s->iChild1;
|
|
2259 |
|
|
2260 |
CHECKTREE(&ss.iPartial);
|
|
2261 |
// go back and try the next slab...
|
|
2262 |
}
|
|
2263 |
// no iPartial iSlabs found, so allocate from a new slab
|
|
2264 |
return AllocNewSlab(ss);
|
|
2265 |
}
|
|
2266 |
|
|
2267 |
void RHybridHeap::SlabFree(void* p)
|
|
2268 |
//
|
|
2269 |
// Free a cell from the slab allocator
|
|
2270 |
// Strategy:
|
|
2271 |
// 1. Find the containing slab (round down to nearest 1KB boundary)
|
|
2272 |
// 2. Push the cell into the slab's freelist, and update the slab usage count
|
|
2273 |
// 3. If this is the last allocated cell, free the slab to the main slab manager
|
|
2274 |
// 4. If the slab was full-floating then insert the slab in it's respective iPartial tree
|
|
2275 |
//
|
|
2276 |
{
|
|
2277 |
HEAP_ASSERT(LowBits(p,3)==0);
|
|
2278 |
slab* s = slab::SlabFor(p);
|
|
2279 |
CHECKSLAB(s,ESlabAllocator,p);
|
|
2280 |
CHECKSLABBFR(s,p);
|
|
2281 |
|
|
2282 |
unsigned pos = LowBits(p, SLABSIZE);
|
|
2283 |
unsigned h = s->iHeader;
|
|
2284 |
HEAP_ASSERT(SlabHeaderUsedm4(h) != 0x3fC); // slab is empty already
|
|
2285 |
HEAP_ASSERT((pos-sizeof(slabhdr))%SlabHeaderSize(h) == 0);
|
|
2286 |
*(unsigned char*)p = (unsigned char)h;
|
|
2287 |
h &= ~0xFF;
|
|
2288 |
h |= (pos>>2);
|
|
2289 |
unsigned size = h & 0x3C000;
|
|
2290 |
if (int(h) >= 0)
|
|
2291 |
{
|
|
2292 |
h -= size<<6;
|
|
2293 |
if (int(h)>=0)
|
|
2294 |
{
|
|
2295 |
s->iHeader = h;
|
|
2296 |
return;
|
|
2297 |
}
|
|
2298 |
FreeSlab(s);
|
|
2299 |
return;
|
|
2300 |
}
|
|
2301 |
h -= size<<6;
|
|
2302 |
h &= ~FLOATING_BIT;
|
|
2303 |
s->iHeader = h;
|
|
2304 |
slab** full = s->iParent; // remove from full list
|
|
2305 |
slab* c = s->iChild1;
|
|
2306 |
*full = c;
|
|
2307 |
if (c)
|
|
2308 |
c->iParent = full;
|
|
2309 |
|
|
2310 |
slabset& ss = iSlabAlloc[iSizeMap[size>>14]];
|
|
2311 |
TreeInsert(s,&ss.iPartial);
|
|
2312 |
CHECKTREE(&ss.iPartial);
|
|
2313 |
}
|
|
2314 |
|
|
2315 |
void* RHybridHeap::InitNewSlab(slabset& allocator, slab* s)
|
|
2316 |
//
|
|
2317 |
// initialise an empty slab for this allocator and return the fist cell
|
|
2318 |
// pre-condition: the slabset has no iPartial iSlabs for allocation
|
|
2319 |
//
|
|
2320 |
{
|
|
2321 |
HEAP_ASSERT(allocator.iPartial==0);
|
|
2322 |
TInt size = 4 + ((&allocator-&iSlabAlloc[0])<<2); // infer size from slab allocator address
|
|
2323 |
unsigned h = s->iHeader & 0xF00; // preserve pagemap only
|
|
2324 |
h |= (size<<12); // set size
|
|
2325 |
h |= (size-4)<<18; // set usedminus4 to one object minus 4
|
|
2326 |
s->iHeader = h;
|
|
2327 |
allocator.iPartial = s;
|
|
2328 |
s->iParent = &allocator.iPartial;
|
|
2329 |
s->iChild1 = s->iChild2 = 0;
|
|
2330 |
return Offset(s,sizeof(slabhdr));
|
|
2331 |
}
|
|
2332 |
|
|
2333 |
const unsigned char slab_bitcount[16] = {0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4};
|
|
2334 |
|
|
2335 |
const unsigned char slab_ext_frag[16] =
|
|
2336 |
{
|
|
2337 |
0,
|
|
2338 |
16 + (1008 % 4),
|
|
2339 |
16 + (1008 % 8),
|
|
2340 |
16 + (1008 % 12),
|
|
2341 |
16 + (1008 % 16),
|
|
2342 |
16 + (1008 % 20),
|
|
2343 |
16 + (1008 % 24),
|
|
2344 |
16 + (1008 % 28),
|
|
2345 |
16 + (1008 % 32),
|
|
2346 |
16 + (1008 % 36),
|
|
2347 |
16 + (1008 % 40),
|
|
2348 |
16 + (1008 % 44),
|
|
2349 |
16 + (1008 % 48),
|
|
2350 |
16 + (1008 % 52),
|
|
2351 |
16 + (1008 % 56),
|
|
2352 |
16 + (1008 % 60)
|
|
2353 |
};
|
|
2354 |
|
|
2355 |
void RHybridHeap::TreeWalk(slab* const* root, void (*f)(slab*, struct HeapInfo*, SWalkInfo*), struct HeapInfo* i, SWalkInfo* wi)
|
|
2356 |
{
|
|
2357 |
// iterative walk around the tree at root
|
|
2358 |
|
|
2359 |
slab* s = *root;
|
|
2360 |
if (!s)
|
|
2361 |
return;
|
|
2362 |
|
|
2363 |
for (;;)
|
|
2364 |
{
|
|
2365 |
slab* c;
|
|
2366 |
while ((c = s->iChild1) != 0)
|
|
2367 |
s = c; // walk down left side to end
|
|
2368 |
for (;;)
|
|
2369 |
{
|
|
2370 |
f(s, i, wi);
|
|
2371 |
c = s->iChild2;
|
|
2372 |
if (c)
|
|
2373 |
{ // one step down right side, now try and walk down left
|
|
2374 |
s = c;
|
|
2375 |
break;
|
|
2376 |
}
|
|
2377 |
for (;;)
|
|
2378 |
{ // loop to walk up right side
|
|
2379 |
slab** pp = s->iParent;
|
|
2380 |
if (pp == root)
|
|
2381 |
return;
|
|
2382 |
s = slab::SlabFor(pp);
|
|
2383 |
if (pp == &s->iChild1)
|
|
2384 |
break;
|
|
2385 |
}
|
|
2386 |
}
|
|
2387 |
}
|
|
2388 |
}
|
|
2389 |
|
|
2390 |
void RHybridHeap::SlabEmptyInfo(slab* s, struct HeapInfo* i, SWalkInfo* wi)
|
|
2391 |
{
|
|
2392 |
Walk(wi, s, SLABSIZE, EGoodFreeCell, EEmptySlab); // Introduce an empty slab to the walk function
|
|
2393 |
int nslab = slab_bitcount[SlabHeaderPagemap(page::PageFor(s)->iSlabs[0].iHeader)];
|
|
2394 |
i->iFreeN += nslab;
|
|
2395 |
i->iFreeBytes += nslab << SLABSHIFT;
|
|
2396 |
}
|
|
2397 |
|
|
2398 |
void RHybridHeap::SlabPartialInfo(slab* s, struct HeapInfo* i, SWalkInfo* wi)
|
|
2399 |
{
|
|
2400 |
Walk(wi, s, SLABSIZE, EGoodAllocatedCell, EPartialFullSlab); // Introduce a full slab to the walk function
|
|
2401 |
unsigned h = s->iHeader;
|
|
2402 |
unsigned used = SlabHeaderUsedm4(h)+4;
|
|
2403 |
unsigned size = SlabHeaderSize(h);
|
|
2404 |
unsigned free = 1024 - slab_ext_frag[size>>2] - used;
|
|
2405 |
i->iFreeN += (free/size);
|
|
2406 |
i->iFreeBytes += free;
|
|
2407 |
i->iAllocN += (used/size);
|
|
2408 |
i->iAllocBytes += used;
|
|
2409 |
}
|
|
2410 |
|
|
2411 |
void RHybridHeap::SlabFullInfo(slab* s, struct HeapInfo* i, SWalkInfo* wi)
|
|
2412 |
{
|
|
2413 |
Walk(wi, s, SLABSIZE, EGoodAllocatedCell, EFullSlab); // Introduce a full slab to the walk function
|
|
2414 |
unsigned h = s->iHeader;
|
|
2415 |
unsigned used = SlabHeaderUsedm4(h)+4;
|
|
2416 |
unsigned size = SlabHeaderSize(h);
|
|
2417 |
HEAP_ASSERT(1024 - slab_ext_frag[size>>2] - used == 0);
|
|
2418 |
i->iAllocN += (used/size);
|
|
2419 |
i->iAllocBytes += used;
|
|
2420 |
}
|
|
2421 |
|
|
2422 |
void RHybridHeap::SlabInfo(struct HeapInfo* i, SWalkInfo* wi) const
|
|
2423 |
{
|
|
2424 |
if (iSparePage)
|
|
2425 |
{
|
|
2426 |
i->iFreeBytes += iPageSize;
|
|
2427 |
i->iFreeN = 4;
|
|
2428 |
Walk(wi, iSparePage, iPageSize, EGoodFreeCell, ESlabSpare); // Introduce Slab spare page to the walk function
|
|
2429 |
}
|
|
2430 |
TreeWalk(&iFullSlab, &SlabFullInfo, i, wi);
|
|
2431 |
for (int ix = 0; ix < (MAXSLABSIZE>>2); ++ix)
|
|
2432 |
TreeWalk(&iSlabAlloc[ix].iPartial, &SlabPartialInfo, i, wi);
|
|
2433 |
TreeWalk(&iPartialPage, &SlabEmptyInfo, i, wi);
|
|
2434 |
}
|
|
2435 |
|
|
2436 |
|
|
2437 |
//
|
|
2438 |
// Bitmap class implementation for large page allocator
|
|
2439 |
//
|
|
2440 |
inline unsigned char* paged_bitmap::Addr() const {return iBase;}
|
|
2441 |
inline unsigned paged_bitmap::Size() const {return iNbits;}
|
|
2442 |
//
|
|
2443 |
|
|
2444 |
void paged_bitmap::Init(unsigned char* p, unsigned size, unsigned bit)
|
|
2445 |
{
|
|
2446 |
iBase = p;
|
|
2447 |
iNbits=size;
|
|
2448 |
int bytes=Ceiling(size,8)>>3;
|
|
2449 |
memset(p,bit?0xff:0,bytes);
|
|
2450 |
}
|
|
2451 |
|
|
2452 |
inline void paged_bitmap::Set(unsigned ix, unsigned bit)
|
|
2453 |
{
|
|
2454 |
if (bit)
|
|
2455 |
iBase[ix>>3] |= (1<<(ix&7));
|
|
2456 |
else
|
|
2457 |
iBase[ix>>3] &= ~(1<<(ix&7));
|
|
2458 |
}
|
|
2459 |
|
|
2460 |
inline unsigned paged_bitmap::operator[](unsigned ix) const
|
|
2461 |
{
|
|
2462 |
return 1U&(iBase[ix>>3] >> (ix&7));
|
|
2463 |
}
|
|
2464 |
|
|
2465 |
void paged_bitmap::Setn(unsigned ix, unsigned len, unsigned bit)
|
|
2466 |
{
|
|
2467 |
int l=len;
|
|
2468 |
while (--l>=0)
|
|
2469 |
Set(ix++,bit);
|
|
2470 |
}
|
|
2471 |
|
|
2472 |
void paged_bitmap::Set(unsigned ix, unsigned len, unsigned val)
|
|
2473 |
{
|
|
2474 |
int l=len;
|
|
2475 |
while (--l>=0)
|
|
2476 |
{
|
|
2477 |
Set(ix++,val&1);
|
|
2478 |
val>>=1;
|
|
2479 |
}
|
|
2480 |
}
|
|
2481 |
|
|
2482 |
unsigned paged_bitmap::Bits(unsigned ix, unsigned len) const
|
|
2483 |
{
|
|
2484 |
int l=len;
|
|
2485 |
unsigned val=0;
|
|
2486 |
unsigned bit=0;
|
|
2487 |
while (--l>=0)
|
|
2488 |
val |= (*this)[ix++]<<bit++;
|
|
2489 |
return val;
|
|
2490 |
}
|
|
2491 |
|
|
2492 |
bool paged_bitmap::Is(unsigned ix, unsigned len, unsigned bit) const
|
|
2493 |
{
|
|
2494 |
unsigned i2 = ix+len;
|
|
2495 |
if (i2 > iNbits)
|
|
2496 |
return false;
|
|
2497 |
for (;;)
|
|
2498 |
{
|
|
2499 |
if ((*this)[ix] != bit)
|
|
2500 |
return false;
|
|
2501 |
if (++ix==i2)
|
|
2502 |
return true;
|
|
2503 |
}
|
|
2504 |
}
|
|
2505 |
|
|
2506 |
int paged_bitmap::Find(unsigned start, unsigned bit) const
|
|
2507 |
{
|
|
2508 |
if (start<iNbits) do
|
|
2509 |
{
|
|
2510 |
if ((*this)[start]==bit)
|
|
2511 |
return start;
|
|
2512 |
} while (++start<iNbits);
|
|
2513 |
return -1;
|
|
2514 |
}
|
|
2515 |
|
|
2516 |
|
|
2517 |
//
|
|
2518 |
// Page allocator code
|
|
2519 |
//
|
|
2520 |
void RHybridHeap::PagedInit(TInt aPagePower)
|
|
2521 |
{
|
|
2522 |
if (aPagePower > 0)
|
|
2523 |
{
|
|
2524 |
if (aPagePower < MINPAGEPOWER)
|
|
2525 |
aPagePower = MINPAGEPOWER;
|
|
2526 |
}
|
|
2527 |
else aPagePower = 31;
|
|
2528 |
|
|
2529 |
iPageThreshold = aPagePower;
|
|
2530 |
/*-------------------------------------------------------------
|
|
2531 |
* Initialize page bitmap
|
|
2532 |
*-------------------------------------------------------------*/
|
|
2533 |
iPageMap.Init((unsigned char*)&iBitMapBuffer, MAXSMALLPAGEBITS, 0);
|
|
2534 |
}
|
|
2535 |
|
|
2536 |
void* RHybridHeap::PagedAllocate(unsigned size)
|
|
2537 |
{
|
|
2538 |
TInt nbytes = Ceiling(size, iPageSize);
|
|
2539 |
void* p = Map(0, nbytes);
|
|
2540 |
if (!p)
|
|
2541 |
return 0;
|
|
2542 |
if (!PagedSetSize(p, nbytes))
|
|
2543 |
{
|
|
2544 |
Unmap(p, nbytes);
|
|
2545 |
return 0;
|
|
2546 |
}
|
|
2547 |
return p;
|
|
2548 |
}
|
|
2549 |
|
|
2550 |
void* RHybridHeap::PagedReallocate(void* p, unsigned size, TInt mode)
|
|
2551 |
{
|
|
2552 |
|
|
2553 |
HEAP_ASSERT(Ceiling(p, iPageSize) == p);
|
|
2554 |
unsigned nbytes = Ceiling(size, iPageSize);
|
|
2555 |
|
|
2556 |
unsigned osize = PagedSize(p);
|
|
2557 |
if ( nbytes == 0 ) // Special case to handle shrinking below min page threshold
|
|
2558 |
nbytes = Min((1 << MINPAGEPOWER), osize);
|
|
2559 |
|
|
2560 |
if (osize == nbytes)
|
|
2561 |
return p;
|
|
2562 |
|
|
2563 |
if (nbytes < osize)
|
|
2564 |
{ // shrink in place, unmap final pages and rewrite the pagemap
|
|
2565 |
Unmap(Offset(p, nbytes), osize-nbytes);
|
|
2566 |
// zap old code and then write new code (will not fail)
|
|
2567 |
PagedZapSize(p, osize);
|
|
2568 |
|
|
2569 |
TBool check = PagedSetSize(p, nbytes);
|
|
2570 |
__ASSERT_ALWAYS(check, HEAP_PANIC(ETHeapBadCellAddress));
|
|
2571 |
|
|
2572 |
return p;
|
|
2573 |
}
|
|
2574 |
|
|
2575 |
// nbytes > osize
|
|
2576 |
// try and extend current region first
|
|
2577 |
|
|
2578 |
void* newp = Map(Offset(p, osize), nbytes-osize);
|
|
2579 |
if (newp)
|
|
2580 |
{ // In place growth. Possibility that pagemap may have to grow AND then fails
|
|
2581 |
if (!PagedSetSize(p, nbytes))
|
|
2582 |
{ // must release extra mapping
|
|
2583 |
Unmap(Offset(p, osize), nbytes-osize);
|
|
2584 |
return 0;
|
|
2585 |
}
|
|
2586 |
// if successful, the new length code will have overwritten the old one (it is at least as long)
|
|
2587 |
return p;
|
|
2588 |
}
|
|
2589 |
|
|
2590 |
// fallback to allocate/copy/free
|
|
2591 |
if (mode & ENeverMove)
|
|
2592 |
return 0; // not allowed to move cell
|
|
2593 |
|
|
2594 |
newp = PagedAllocate(nbytes);
|
|
2595 |
if (!newp)
|
|
2596 |
return 0;
|
|
2597 |
memcpy(newp, p, osize);
|
|
2598 |
PagedFree(p);
|
|
2599 |
return newp;
|
|
2600 |
}
|
|
2601 |
|
|
2602 |
void RHybridHeap::PagedFree(void* p)
|
|
2603 |
{
|
|
2604 |
HEAP_ASSERT(Ceiling(p, iPageSize) == p);
|
|
2605 |
|
|
2606 |
|
|
2607 |
unsigned size = PagedSize(p);
|
|
2608 |
|
|
2609 |
PagedZapSize(p, size); // clear page map
|
|
2610 |
Unmap(p, size);
|
|
2611 |
}
|
|
2612 |
|
|
2613 |
void RHybridHeap::PagedInfo(struct HeapInfo* i, SWalkInfo* wi) const
|
|
2614 |
{
|
|
2615 |
for (int ix = 0;(ix = iPageMap.Find(ix,1)) >= 0;)
|
|
2616 |
{
|
|
2617 |
int npage = PagedDecode(ix);
|
|
2618 |
// Introduce paged buffer to the walk function
|
|
2619 |
TAny* bfr = Bitmap2addr(ix);
|
|
2620 |
int len = npage << PAGESHIFT;
|
|
2621 |
if ( len > iPageSize )
|
|
2622 |
{ // If buffer is not larger than one page it must be a slab page mapped into bitmap
|
|
2623 |
i->iAllocBytes += len;
|
|
2624 |
++i->iAllocN;
|
|
2625 |
Walk(wi, bfr, len, EGoodAllocatedCell, EPageAllocator);
|
|
2626 |
}
|
|
2627 |
ix += (npage<<1);
|
|
2628 |
}
|
|
2629 |
}
|
|
2630 |
|
|
2631 |
void RHybridHeap::ResetBitmap()
|
|
2632 |
/*---------------------------------------------------------
|
|
2633 |
* Go through paged_bitmap and unmap all buffers to system
|
|
2634 |
* This method is called from RHybridHeap::Reset() to unmap all page
|
|
2635 |
* allocated - and slab pages which are stored in bitmap, too
|
|
2636 |
*---------------------------------------------------------*/
|
|
2637 |
{
|
|
2638 |
unsigned iNbits = iPageMap.Size();
|
|
2639 |
if ( iNbits )
|
|
2640 |
{
|
|
2641 |
for (int ix = 0;(ix = iPageMap.Find(ix,1)) >= 0;)
|
|
2642 |
{
|
|
2643 |
int npage = PagedDecode(ix);
|
|
2644 |
void* p = Bitmap2addr(ix);
|
|
2645 |
unsigned size = PagedSize(p);
|
|
2646 |
PagedZapSize(p, size); // clear page map
|
|
2647 |
Unmap(p, size);
|
|
2648 |
ix += (npage<<1);
|
|
2649 |
}
|
|
2650 |
if ( (TInt)iNbits > MAXSMALLPAGEBITS )
|
|
2651 |
{
|
|
2652 |
// unmap page reserved for enlarged bitmap
|
|
2653 |
Unmap(iPageMap.Addr(), (iNbits >> 3) );
|
|
2654 |
}
|
|
2655 |
}
|
|
2656 |
}
|
|
2657 |
|
|
2658 |
TBool RHybridHeap::CheckBitmap(void* aBfr, TInt aSize, TUint32& aDummy, TInt& aNPages)
|
|
2659 |
/*---------------------------------------------------------
|
|
2660 |
* If aBfr = NULL
|
|
2661 |
* Go through paged_bitmap and unmap all buffers to system
|
|
2662 |
* and assure that by reading the first word of each page of aBfr
|
|
2663 |
* that aBfr is still accessible
|
|
2664 |
* else
|
|
2665 |
* Assure that specified buffer is mapped with correct length in
|
|
2666 |
* page map
|
|
2667 |
*---------------------------------------------------------*/
|
|
2668 |
{
|
|
2669 |
TBool ret;
|
|
2670 |
if ( aBfr )
|
|
2671 |
{
|
|
2672 |
__ASSERT_ALWAYS((Ceiling(aBfr, iPageSize) == aBfr), HEAP_PANIC(ETHeapBadCellAddress));
|
|
2673 |
ret = ( aSize == (TInt)PagedSize(aBfr));
|
|
2674 |
}
|
|
2675 |
else
|
|
2676 |
{
|
|
2677 |
ret = ETrue;
|
|
2678 |
unsigned iNbits = iPageMap.Size();
|
|
2679 |
if ( iNbits )
|
|
2680 |
{
|
|
2681 |
TInt npage;
|
|
2682 |
aNPages = 0;
|
|
2683 |
for (int ix = 0;(ix = iPageMap.Find(ix,1)) >= 0;)
|
|
2684 |
{
|
|
2685 |
npage = PagedDecode(ix);
|
|
2686 |
aNPages += npage;
|
|
2687 |
void* p = Bitmap2addr(ix);
|
|
2688 |
__ASSERT_ALWAYS((Ceiling(p, iPageSize) == p), HEAP_PANIC(ETHeapBadCellAddress));
|
|
2689 |
unsigned s = PagedSize(p);
|
|
2690 |
__ASSERT_ALWAYS((Ceiling(s, iPageSize) == s), HEAP_PANIC(ETHeapBadCellAddress));
|
|
2691 |
while ( s )
|
|
2692 |
{
|
|
2693 |
aDummy += *(TUint32*)((TUint8*)p + (s-iPageSize));
|
|
2694 |
s -= iPageSize;
|
|
2695 |
}
|
|
2696 |
ix += (npage<<1);
|
|
2697 |
}
|
|
2698 |
if ( (TInt)iNbits > MAXSMALLPAGEBITS )
|
|
2699 |
{
|
|
2700 |
// add enlarged bitmap page(s) to total page count
|
|
2701 |
npage = (iNbits >> 3);
|
|
2702 |
__ASSERT_ALWAYS((Ceiling(npage, iPageSize) == npage), HEAP_PANIC(ETHeapBadCellAddress));
|
|
2703 |
aNPages += (npage / iPageSize);
|
|
2704 |
}
|
|
2705 |
}
|
|
2706 |
}
|
|
2707 |
|
|
2708 |
return ret;
|
|
2709 |
}
|
|
2710 |
|
|
2711 |
|
|
2712 |
// The paged allocations are tracked in a bitmap which has 2 bits per page
|
|
2713 |
// this allows us to store allocations as small as 4KB
|
|
2714 |
// The presence and size of an allocation is encoded as follows:
|
|
2715 |
// let N = number of pages in the allocation, then
|
|
2716 |
// 10 : N = 1 // 4KB
|
|
2717 |
// 110n : N = 2 + n // 8-12KB
|
|
2718 |
// 1110nnnn : N = nnnn // 16-60KB
|
|
2719 |
// 1111n[18] : N = n[18] // 64KB-1GB
|
|
2720 |
|
|
2721 |
const struct etab { unsigned char offset, len, codelen, code;} encode_table[] =
|
|
2722 |
{
|
|
2723 |
{1,2,2,0x1},
|
|
2724 |
{2,4,3,0x3},
|
|
2725 |
{0,8,4,0x7},
|
|
2726 |
{0,22,4,0xf}
|
|
2727 |
};
|
|
2728 |
|
|
2729 |
// Return code length for specified allocation Size(assumed to be aligned to pages)
|
|
2730 |
inline unsigned paged_codelen(unsigned size, unsigned pagesz)
|
|
2731 |
{
|
|
2732 |
HEAP_ASSERT(size == Ceiling(size, pagesz));
|
|
2733 |
|
|
2734 |
if (size == pagesz)
|
|
2735 |
return 2;
|
|
2736 |
else if (size < 4*pagesz)
|
|
2737 |
return 4;
|
|
2738 |
else if (size < 16*pagesz)
|
|
2739 |
return 8;
|
|
2740 |
else
|
|
2741 |
return 22;
|
|
2742 |
}
|
|
2743 |
|
|
2744 |
inline const etab& paged_coding(unsigned npage)
|
|
2745 |
{
|
|
2746 |
if (npage < 4)
|
|
2747 |
return encode_table[npage>>1];
|
|
2748 |
else if (npage < 16)
|
|
2749 |
return encode_table[2];
|
|
2750 |
else
|
|
2751 |
return encode_table[3];
|
|
2752 |
}
|
|
2753 |
|
|
2754 |
bool RHybridHeap::PagedEncode(unsigned pos, unsigned npage)
|
|
2755 |
{
|
|
2756 |
const etab& e = paged_coding(npage);
|
|
2757 |
if (pos + e.len > iPageMap.Size())
|
|
2758 |
{
|
|
2759 |
// need to grow the page bitmap to fit the cell length into the map
|
|
2760 |
// if we outgrow original bitmap buffer in RHybridHeap metadata, then just get enough pages to cover the full space:
|
|
2761 |
// * initial 68 byte bitmap mapped (68*8*4kB):2 = 1,1MB
|
|
2762 |
// * 4KB can Map(4096*8*4kB):2 = 64MB
|
|
2763 |
unsigned maxsize = Ceiling(iMaxLength, iPageSize);
|
|
2764 |
unsigned mapbits = maxsize >> (PAGESHIFT-1);
|
|
2765 |
maxsize = Ceiling(mapbits>>3, iPageSize);
|
|
2766 |
void* newb = Map(0, maxsize);
|
|
2767 |
if (!newb)
|
|
2768 |
return false;
|
|
2769 |
|
|
2770 |
unsigned char* oldb = iPageMap.Addr();
|
|
2771 |
iPageMap.Init((unsigned char*)newb, (maxsize<<3), 0);
|
|
2772 |
memcpy(newb, oldb, Ceiling(MAXSMALLPAGEBITS,8)>>3);
|
|
2773 |
}
|
|
2774 |
// encode the allocation block size into the bitmap, starting at the bit for the start page
|
|
2775 |
unsigned bits = e.code;
|
|
2776 |
bits |= (npage - e.offset) << e.codelen;
|
|
2777 |
iPageMap.Set(pos, e.len, bits);
|
|
2778 |
return true;
|
|
2779 |
}
|
|
2780 |
|
|
2781 |
unsigned RHybridHeap::PagedDecode(unsigned pos) const
|
|
2782 |
{
|
|
2783 |
__ASSERT_ALWAYS(pos + 2 <= iPageMap.Size(), HEAP_PANIC(ETHeapBadCellAddress));
|
|
2784 |
|
|
2785 |
unsigned bits = iPageMap.Bits(pos,2);
|
|
2786 |
__ASSERT_ALWAYS(bits & 1, HEAP_PANIC(ETHeapBadCellAddress));
|
|
2787 |
bits >>= 1;
|
|
2788 |
if (bits == 0)
|
|
2789 |
return 1;
|
|
2790 |
__ASSERT_ALWAYS(pos + 4 <= iPageMap.Size(), HEAP_PANIC(ETHeapBadCellAddress));
|
|
2791 |
bits = iPageMap.Bits(pos+2,2);
|
|
2792 |
if ((bits & 1) == 0)
|
|
2793 |
return 2 + (bits>>1);
|
|
2794 |
else if ((bits>>1) == 0)
|
|
2795 |
{
|
|
2796 |
__ASSERT_ALWAYS(pos + 8 <= iPageMap.Size(), HEAP_PANIC(ETHeapBadCellAddress));
|
|
2797 |
return iPageMap.Bits(pos+4, 4);
|
|
2798 |
}
|
|
2799 |
else
|
|
2800 |
{
|
|
2801 |
__ASSERT_ALWAYS(pos + 22 <= iPageMap.Size(), HEAP_PANIC(ETHeapBadCellAddress));
|
|
2802 |
return iPageMap.Bits(pos+4, 18);
|
|
2803 |
}
|
|
2804 |
}
|
|
2805 |
|
|
2806 |
inline void RHybridHeap::PagedZapSize(void* p, unsigned size)
|
|
2807 |
{iPageMap.Setn(PtrDiff(p, iMemBase) >> (PAGESHIFT-1), paged_codelen(size, iPageSize) ,0);}
|
|
2808 |
|
|
2809 |
inline unsigned RHybridHeap::PagedSize(void* p) const
|
|
2810 |
{ return PagedDecode(PtrDiff(p, iMemBase) >> (PAGESHIFT-1)) << PAGESHIFT; }
|
|
2811 |
|
|
2812 |
inline bool RHybridHeap::PagedSetSize(void* p, unsigned size)
|
|
2813 |
{ return PagedEncode(PtrDiff(p, iMemBase) >> (PAGESHIFT-1), size >> PAGESHIFT); }
|
|
2814 |
|
|
2815 |
inline void* RHybridHeap::Bitmap2addr(unsigned pos) const
|
|
2816 |
{ return iMemBase + (1 << (PAGESHIFT-1))*pos; }
|
|
2817 |
|
|
2818 |
|
|
2819 |
//////////////////////////////////////////////////////////////////////////
|
|
2820 |
//////////////////////////////////////////////////////////////////////////
|
|
2821 |
//////////////////////////////////////////////////////////////////////////
|
|
2822 |
/**
|
|
2823 |
Constructor where minimum and maximum length of the heap can be defined.
|
|
2824 |
It defaults the chunk heap to be created to have use a new local chunk,
|
|
2825 |
to have a grow by value of KMinHeapGrowBy, to be unaligned, not to be
|
|
2826 |
single threaded and not to have any mode flags set.
|
|
2827 |
|
|
2828 |
@param aMinLength The minimum length of the heap to be created.
|
|
2829 |
@param aMaxLength The maximum length to which the heap to be created can grow.
|
|
2830 |
If the supplied value is less than a page size, then it
|
|
2831 |
is discarded and the page size is used instead.
|
|
2832 |
*/
|
|
2833 |
EXPORT_C TChunkHeapCreateInfo::TChunkHeapCreateInfo(TInt aMinLength, TInt aMaxLength) :
|
|
2834 |
iVersionNumber(EVersion0), iMinLength(aMinLength), iMaxLength(aMaxLength),
|
|
2835 |
iAlign(0), iGrowBy(1), iSingleThread(EFalse),
|
|
2836 |
iOffset(0), iPaging(EUnspecified), iMode(0), iName(NULL)
|
|
2837 |
{
|
|
2838 |
}
|
|
2839 |
|
|
2840 |
|
|
2841 |
/**
|
|
2842 |
Sets the chunk heap to create a new chunk with the specified name.
|
|
2843 |
|
|
2844 |
This overriddes any previous call to TChunkHeapCreateInfo::SetNewChunkHeap() or
|
|
2845 |
TChunkHeapCreateInfo::SetExistingChunkHeap() for this TChunkHeapCreateInfo object.
|
|
2846 |
|
|
2847 |
@param aName The name to be given to the chunk heap to be created
|
|
2848 |
If NULL, the function constructs a local chunk to host the heap.
|
|
2849 |
If not NULL, a pointer to a descriptor containing the name to be
|
|
2850 |
assigned to the global chunk hosting the heap.
|
|
2851 |
*/
|
|
2852 |
EXPORT_C void TChunkHeapCreateInfo::SetCreateChunk(const TDesC* aName)
|
|
2853 |
{
|
|
2854 |
iName = (TDesC*)aName;
|
|
2855 |
iChunk.SetHandle(KNullHandle);
|
|
2856 |
}
|
|
2857 |
|
|
2858 |
|
|
2859 |
/**
|
|
2860 |
Sets the chunk heap to be created to use the chunk specified.
|
|
2861 |
|
|
2862 |
This overriddes any previous call to TChunkHeapCreateInfo::SetNewChunkHeap() or
|
|
2863 |
TChunkHeapCreateInfo::SetExistingChunkHeap() for this TChunkHeapCreateInfo object.
|
|
2864 |
|
|
2865 |
@param aChunk A handle to the chunk to use for the heap.
|
|
2866 |
*/
|
|
2867 |
EXPORT_C void TChunkHeapCreateInfo::SetUseChunk(const RChunk aChunk)
|
|
2868 |
{
|
|
2869 |
iName = NULL;
|
|
2870 |
iChunk = aChunk;
|
|
2871 |
}
|
|
2872 |
|
|
2873 |
EXPORT_C RHeap* UserHeap::FixedHeap(TAny* aBase, TInt aMaxLength, TInt aAlign, TBool aSingleThread)
|
|
2874 |
/**
|
|
2875 |
Creates a fixed length heap at a specified location.
|
|
2876 |
|
|
2877 |
On successful return from this function, the heap is ready to use. This assumes that
|
|
2878 |
the memory pointed to by aBase is mapped and able to be used. You must ensure that you
|
|
2879 |
pass in a large enough value for aMaxLength. Passing in a value that is too small to
|
|
2880 |
hold the metadata for the heap (~1 KB) will result in the size being rounded up and the
|
|
2881 |
heap thereby running over the end of the memory assigned to it. But then if you were to
|
|
2882 |
pass in such as small value then you would not be able to do any allocations from the
|
|
2883 |
heap anyway. Moral of the story: Use a sensible value for aMaxLength!
|
|
2884 |
|
|
2885 |
@param aBase A pointer to the location where the heap is to be constructed.
|
|
2886 |
@param aMaxLength The maximum length in bytes to which the heap can grow. If the
|
|
2887 |
supplied value is too small to hold the heap's metadata, it
|
|
2888 |
will be increased.
|
|
2889 |
@param aAlign From Symbian^4 onwards, this value is ignored but EABI 8
|
|
2890 |
byte alignment is guaranteed for all allocations 8 bytes or
|
|
2891 |
more in size. 4 byte allocations will be aligned to a 4
|
|
2892 |
byte boundary. Best to pass in zero.
|
|
2893 |
@param aSingleThread ETrue if the heap is to be accessed from multiple threads.
|
|
2894 |
This will cause internal locks to be created, guaranteeing
|
|
2895 |
thread safety.
|
|
2896 |
|
|
2897 |
@return A pointer to the new heap, or NULL if the heap could not be created.
|
|
2898 |
|
|
2899 |
@panic USER 56 if aMaxLength is negative.
|
|
2900 |
*/
|
|
2901 |
{
|
|
2902 |
__ASSERT_ALWAYS( aMaxLength>=0, ::Panic(ETHeapMaxLengthNegative));
|
|
2903 |
if ( aMaxLength < (TInt)sizeof(RHybridHeap) )
|
|
2904 |
aMaxLength = sizeof(RHybridHeap);
|
|
2905 |
|
|
2906 |
RHybridHeap* h = new(aBase) RHybridHeap(aMaxLength, aAlign, aSingleThread);
|
|
2907 |
|
|
2908 |
if (!aSingleThread)
|
|
2909 |
{
|
|
2910 |
TInt r = h->iLock.CreateLocal();
|
|
2911 |
if (r!=KErrNone)
|
|
2912 |
return NULL; // No need to delete the RHybridHeap instance as the new above is only a placement new
|
|
2913 |
h->iHandles = (TInt*)&h->iLock;
|
|
2914 |
h->iHandleCount = 1;
|
|
2915 |
}
|
|
2916 |
return h;
|
|
2917 |
}
|
|
2918 |
|
|
2919 |
/**
|
|
2920 |
Creates a chunk heap of the type specified by the parameter aCreateInfo.
|
|
2921 |
|
|
2922 |
@param aCreateInfo A reference to a TChunkHeapCreateInfo object specifying the
|
|
2923 |
type of chunk heap to create.
|
|
2924 |
|
|
2925 |
@return A pointer to the new heap or NULL if the heap could not be created.
|
|
2926 |
|
|
2927 |
@panic USER 41 if the heap's specified minimum length is greater than the specified maximum length.
|
|
2928 |
@panic USER 55 if the heap's specified minimum length is negative.
|
|
2929 |
@panic USER 172 if the heap's specified alignment is not a power of 2 or is less than the size of a TAny*.
|
|
2930 |
*/
|
|
2931 |
EXPORT_C RHeap* UserHeap::ChunkHeap(const TChunkHeapCreateInfo& aCreateInfo)
|
|
2932 |
{
|
|
2933 |
// aCreateInfo must have been configured to use a new chunk or an exiting chunk.
|
|
2934 |
__ASSERT_ALWAYS(!(aCreateInfo.iMode & (TUint32)~EChunkHeapMask), ::Panic(EHeapCreateInvalidMode));
|
|
2935 |
RHeap* h = NULL;
|
|
2936 |
|
|
2937 |
if (aCreateInfo.iChunk.Handle() == KNullHandle)
|
|
2938 |
{
|
|
2939 |
// A new chunk is to be created for this heap.
|
|
2940 |
|
|
2941 |
__ASSERT_ALWAYS(aCreateInfo.iMinLength >= 0, ::Panic(ETHeapMinLengthNegative));
|
|
2942 |
__ASSERT_ALWAYS(aCreateInfo.iMaxLength >= aCreateInfo.iMinLength, ::Panic(ETHeapCreateMaxLessThanMin));
|
|
2943 |
|
|
2944 |
TInt maxLength = aCreateInfo.iMaxLength;
|
|
2945 |
TInt page_size;
|
|
2946 |
GET_PAGE_SIZE(page_size);
|
|
2947 |
|
|
2948 |
if (maxLength < page_size)
|
|
2949 |
maxLength = page_size;
|
|
2950 |
|
|
2951 |
TChunkCreateInfo chunkInfo;
|
|
2952 |
#if USE_HYBRID_HEAP
|
|
2953 |
if ( aCreateInfo.iOffset )
|
|
2954 |
chunkInfo.SetNormal(0, maxLength); // Create DL only heap
|
|
2955 |
else
|
|
2956 |
{
|
|
2957 |
maxLength = 2*maxLength;
|
|
2958 |
chunkInfo.SetDisconnected(0, 0, maxLength); // Create hybrid heap
|
|
2959 |
}
|
|
2960 |
#else
|
|
2961 |
chunkInfo.SetNormal(0, maxLength); // Create DL only heap
|
|
2962 |
#endif
|
|
2963 |
chunkInfo.SetOwner((aCreateInfo.iSingleThread)? EOwnerThread : EOwnerProcess);
|
|
2964 |
if (aCreateInfo.iName)
|
|
2965 |
chunkInfo.SetGlobal(*aCreateInfo.iName);
|
|
2966 |
// Set the paging attributes of the chunk.
|
|
2967 |
if (aCreateInfo.iPaging == TChunkHeapCreateInfo::EPaged)
|
|
2968 |
chunkInfo.SetPaging(TChunkCreateInfo::EPaged);
|
|
2969 |
if (aCreateInfo.iPaging == TChunkHeapCreateInfo::EUnpaged)
|
|
2970 |
chunkInfo.SetPaging(TChunkCreateInfo::EUnpaged);
|
|
2971 |
// Create the chunk.
|
|
2972 |
RChunk chunk;
|
|
2973 |
if (chunk.Create(chunkInfo) != KErrNone)
|
|
2974 |
return NULL;
|
|
2975 |
// Create the heap using the new chunk.
|
|
2976 |
TUint mode = aCreateInfo.iMode | EChunkHeapDuplicate; // Must duplicate the handle.
|
|
2977 |
h = OffsetChunkHeap(chunk, aCreateInfo.iMinLength, aCreateInfo.iOffset,
|
|
2978 |
aCreateInfo.iGrowBy, maxLength, aCreateInfo.iAlign,
|
|
2979 |
aCreateInfo.iSingleThread, mode);
|
|
2980 |
chunk.Close();
|
|
2981 |
}
|
|
2982 |
else
|
|
2983 |
{
|
|
2984 |
h = OffsetChunkHeap(aCreateInfo.iChunk, aCreateInfo.iMinLength, aCreateInfo.iOffset,
|
|
2985 |
aCreateInfo.iGrowBy, aCreateInfo.iMaxLength, aCreateInfo.iAlign,
|
|
2986 |
aCreateInfo.iSingleThread, aCreateInfo.iMode);
|
|
2987 |
}
|
|
2988 |
return h;
|
|
2989 |
}
|
|
2990 |
|
|
2991 |
|
|
2992 |
|
|
2993 |
EXPORT_C RHeap* UserHeap::ChunkHeap(const TDesC* aName, TInt aMinLength, TInt aMaxLength, TInt aGrowBy, TInt aAlign, TBool aSingleThread)
|
|
2994 |
/**
|
|
2995 |
Creates a heap in a local or global chunk.
|
|
2996 |
|
|
2997 |
The chunk hosting the heap can be local or global.
|
|
2998 |
|
|
2999 |
A local chunk is one which is private to the process creating it and is not
|
|
3000 |
intended for access by other user processes. A global chunk is one which is
|
|
3001 |
visible to all processes.
|
|
3002 |
|
|
3003 |
The hosting chunk is local, if the pointer aName is NULL, otherwise the
|
|
3004 |
hosting chunk is global and the descriptor *aName is assumed to contain
|
|
3005 |
the name to be assigned to it.
|
|
3006 |
|
|
3007 |
Ownership of the host chunk is vested in the current process.
|
|
3008 |
|
|
3009 |
A minimum and a maximum size for the heap can be specified. On successful
|
|
3010 |
return from this function, the size of the heap is at least aMinLength.
|
|
3011 |
If subsequent requests for allocation of memory from the heap cannot be
|
|
3012 |
satisfied by compressing the heap, the size of the heap is extended in
|
|
3013 |
increments of aGrowBy until the request can be satisfied. Attempts to extend
|
|
3014 |
the heap causes the size of the host chunk to be adjusted.
|
|
3015 |
|
|
3016 |
Note that the size of the heap cannot be adjusted by more than aMaxLength.
|
|
3017 |
|
|
3018 |
@param aName If NULL, the function constructs a local chunk to host
|
|
3019 |
the heap. If not NULL, a pointer to a descriptor containing
|
|
3020 |
the name to be assigned to the global chunk hosting the heap.
|
|
3021 |
@param aMinLength The minimum length of the heap in bytes. This will be
|
|
3022 |
rounded up to the nearest page size by the allocator.
|
|
3023 |
@param aMaxLength The maximum length in bytes to which the heap can grow. This
|
|
3024 |
will be rounded up to the nearest page size by the allocator.
|
|
3025 |
@param aGrowBy The number of bytes by which the heap will grow when more
|
|
3026 |
memory is required. This will be rounded up to the nearest
|
|
3027 |
page size by the allocator. If a value is not explicitly
|
|
3028 |
specified, the page size is taken by default.
|
|
3029 |
@param aAlign From Symbian^4 onwards, this value is ignored but EABI 8
|
|
3030 |
byte alignment is guaranteed for all allocations 8 bytes or
|
|
3031 |
more in size. 4 byte allocations will be aligned to a 4
|
|
3032 |
byte boundary. Best to pass in zero.
|
|
3033 |
@param aSingleThread ETrue if the heap is to be accessed from multiple threads.
|
|
3034 |
This will cause internal locks to be created, guaranteeing
|
|
3035 |
thread safety.
|
|
3036 |
|
|
3037 |
@return A pointer to the new heap or NULL if the heap could not be created.
|
|
3038 |
|
|
3039 |
@panic USER 41 if aMaxLength is < aMinLength.
|
|
3040 |
@panic USER 55 if aMinLength is negative.
|
|
3041 |
@panic USER 56 if aMaxLength is negative.
|
|
3042 |
*/
|
|
3043 |
{
|
|
3044 |
TInt page_size;
|
|
3045 |
GET_PAGE_SIZE(page_size);
|
|
3046 |
TInt minLength = _ALIGN_UP(aMinLength, page_size);
|
|
3047 |
TInt maxLength = Max(aMaxLength, minLength);
|
|
3048 |
|
|
3049 |
TChunkHeapCreateInfo createInfo(minLength, maxLength);
|
|
3050 |
createInfo.SetCreateChunk(aName);
|
|
3051 |
createInfo.SetGrowBy(aGrowBy);
|
|
3052 |
createInfo.SetAlignment(aAlign);
|
|
3053 |
createInfo.SetSingleThread(aSingleThread);
|
|
3054 |
|
|
3055 |
return ChunkHeap(createInfo);
|
|
3056 |
}
|
|
3057 |
|
|
3058 |
EXPORT_C RHeap* UserHeap::ChunkHeap(RChunk aChunk, TInt aMinLength, TInt aGrowBy, TInt aMaxLength, TInt aAlign, TBool aSingleThread, TUint32 aMode)
|
|
3059 |
/**
|
|
3060 |
Creates a heap in an existing chunk.
|
|
3061 |
|
|
3062 |
This function is intended to be used to create a heap in a user writable code
|
|
3063 |
chunk as created by a call to RChunk::CreateLocalCode(). This type of heap can
|
|
3064 |
be used to hold code fragments from a JIT compiler.
|
|
3065 |
|
|
3066 |
@param aChunk The chunk that will host the heap.
|
|
3067 |
@param aMinLength The minimum length of the heap in bytes. This will be
|
|
3068 |
rounded up to the nearest page size by the allocator.
|
|
3069 |
@param aGrowBy The number of bytes by which the heap will grow when more
|
|
3070 |
memory is required. This will be rounded up to the nearest
|
|
3071 |
page size by the allocator. If a value is not explicitly
|
|
3072 |
specified, the page size is taken by default.
|
|
3073 |
@param aMaxLength The maximum length in bytes to which the heap can grow. This
|
|
3074 |
will be rounded up to the nearest page size by the allocator.
|
|
3075 |
If 0 is passed in, the maximum lengt of the chunk is used.
|
|
3076 |
@param aAlign From Symbian^4 onwards, this value is ignored but EABI 8
|
|
3077 |
byte alignment is guaranteed for all allocations 8 bytes or
|
|
3078 |
more in size. 4 byte allocations will be aligned to a 4
|
|
3079 |
byte boundary. Best to pass in zero.
|
|
3080 |
@param aSingleThread ETrue if the heap is to be accessed from multiple threads.
|
|
3081 |
This will cause internal locks to be created, guaranteeing
|
|
3082 |
thread safety.
|
|
3083 |
@param aMode Flags controlling the heap creation. See RAllocator::TFlags.
|
|
3084 |
|
|
3085 |
@return A pointer to the new heap or NULL if the heap could not be created.
|
|
3086 |
|
|
3087 |
@see UserHeap::OffsetChunkHeap()
|
|
3088 |
*/
|
|
3089 |
{
|
|
3090 |
return OffsetChunkHeap(aChunk, aMinLength, 0, aGrowBy, aMaxLength, aAlign, aSingleThread, aMode);
|
|
3091 |
}
|
|
3092 |
|
|
3093 |
EXPORT_C RHeap* UserHeap::OffsetChunkHeap(RChunk aChunk, TInt aMinLength, TInt aOffset, TInt aGrowBy, TInt aMaxLength, TInt aAlign, TBool aSingleThread, TUint32 aMode)
|
|
3094 |
/**
|
|
3095 |
Creates a heap in an existing chunk, offset from the beginning of the chunk.
|
|
3096 |
|
|
3097 |
This function is intended to be used to create a heap using a chunk which has
|
|
3098 |
some of its memory already used, at the start of that that chunk. The maximum
|
|
3099 |
length to which the heap can grow is the maximum size of the chunk, minus the
|
|
3100 |
data at the start of the chunk.
|
|
3101 |
|
|
3102 |
The offset at which to create the heap is passed in as the aOffset parameter.
|
|
3103 |
Legacy heap implementations always respected the aOffset value, however more
|
|
3104 |
modern heap implementations are more sophisticated and cannot necessarily respect
|
|
3105 |
this value. Therefore, if possible, you should always use an aOffset of 0 unless
|
|
3106 |
you have a very explicit requirement for using a non zero value. Using a non zero
|
|
3107 |
value will result in a less efficient heap algorithm being used in order to respect
|
|
3108 |
the offset.
|
|
3109 |
|
|
3110 |
Another issue to consider when using this function is the type of the chunk passed
|
|
3111 |
in. In order for the most efficient heap algorithms to be used, the chunk passed
|
|
3112 |
in should always be a disconnected chunk. Passing in a non disconnected chunk will
|
|
3113 |
again result in a less efficient heap algorithm being used.
|
|
3114 |
|
|
3115 |
Finally, another requirement for the most efficient heap algorithms to be used is
|
|
3116 |
for the heap to be able to expand. Therefore, unless you have a specific reason to
|
|
3117 |
do so, always specify aMaxLength > aMinLength.
|
|
3118 |
|
|
3119 |
So, if possible, use aOffset == zero, aMaxLength > aMinLength and a disconnected
|
|
3120 |
chunk for best results!
|
|
3121 |
|
|
3122 |
@param aChunk The chunk that will host the heap.
|
|
3123 |
@param aMinLength The minimum length of the heap in bytes. This will be
|
|
3124 |
rounded up to the nearest page size by the allocator.
|
|
3125 |
@param aOffset The offset in bytes from the start of the chunk at which to
|
|
3126 |
create the heap. If used (and it shouldn't really be!)
|
|
3127 |
then it will be rounded up to a multiple of 8, to respect
|
|
3128 |
EABI 8 byte alignment requirements.
|
|
3129 |
@param aGrowBy The number of bytes by which the heap will grow when more
|
|
3130 |
memory is required. This will be rounded up to the nearest
|
|
3131 |
page size by the allocator. If a value is not explicitly
|
|
3132 |
specified, the page size is taken by default.
|
|
3133 |
@param aMaxLength The maximum length in bytes to which the heap can grow. This
|
|
3134 |
will be rounded up to the nearest page size by the allocator.
|
|
3135 |
If 0 is passed in, the maximum length of the chunk is used.
|
|
3136 |
@param aAlign From Symbian^4 onwards, this value is ignored but EABI 8
|
|
3137 |
byte alignment is guaranteed for all allocations 8 bytes or
|
|
3138 |
more in size. 4 byte allocations will be aligned to a 4
|
|
3139 |
byte boundary. Best to pass in zero.
|
|
3140 |
@param aSingleThread ETrue if the heap is to be accessed from multiple threads.
|
|
3141 |
This will cause internal locks to be created, guaranteeing
|
|
3142 |
thread safety.
|
|
3143 |
@param aMode Flags controlling the heap creation. See RAllocator::TFlags.
|
|
3144 |
|
|
3145 |
@return A pointer to the new heap or NULL if the heap could not be created.
|
|
3146 |
|
|
3147 |
@panic USER 41 if aMaxLength is < aMinLength.
|
|
3148 |
@panic USER 55 if aMinLength is negative.
|
|
3149 |
@panic USER 56 if aMaxLength is negative.
|
|
3150 |
@panic USER 168 if aOffset is negative.
|
|
3151 |
*/
|
|
3152 |
{
|
|
3153 |
TBool dlOnly = EFalse;
|
|
3154 |
TInt pageSize;
|
|
3155 |
GET_PAGE_SIZE(pageSize);
|
|
3156 |
TInt align = RHybridHeap::ECellAlignment; // Always use EABI 8 byte alignment
|
|
3157 |
|
|
3158 |
__ASSERT_ALWAYS(aMinLength>=0, ::Panic(ETHeapMinLengthNegative));
|
|
3159 |
__ASSERT_ALWAYS(aMaxLength>=0, ::Panic(ETHeapMaxLengthNegative));
|
|
3160 |
|
|
3161 |
if ( aMaxLength > 0 )
|
|
3162 |
__ASSERT_ALWAYS(aMaxLength>=aMinLength, ::Panic(ETHeapCreateMaxLessThanMin));
|
|
3163 |
|
|
3164 |
// Stick to EABI alignment for the start offset, if any
|
|
3165 |
aOffset = _ALIGN_UP(aOffset, align);
|
|
3166 |
|
|
3167 |
// Using an aOffset > 0 means that we can't use the hybrid allocator and have to revert to Doug Lea only
|
|
3168 |
if (aOffset > 0)
|
|
3169 |
dlOnly = ETrue;
|
|
3170 |
|
|
3171 |
// Ensure that the minimum length is enough to hold the RHybridHeap object itself
|
|
3172 |
TInt minCell = _ALIGN_UP(Max((TInt)RHybridHeap::EAllocCellSize, (TInt)RHybridHeap::EFreeCellSize), align);
|
|
3173 |
TInt hybridHeapSize = (sizeof(RHybridHeap) + minCell);
|
|
3174 |
if (aMinLength < hybridHeapSize)
|
|
3175 |
aMinLength = hybridHeapSize;
|
|
3176 |
|
|
3177 |
// Round the minimum length up to a multiple of the page size, taking into account that the
|
|
3178 |
// offset takes up a part of the chunk's memory
|
|
3179 |
aMinLength = _ALIGN_UP((aMinLength + aOffset), pageSize);
|
|
3180 |
|
|
3181 |
// If aMaxLength is 0 then use the entire chunk
|
|
3182 |
TInt chunkSize = aChunk.MaxSize();
|
|
3183 |
if (aMaxLength == 0)
|
|
3184 |
{
|
|
3185 |
aMaxLength = chunkSize;
|
|
3186 |
}
|
|
3187 |
// Otherwise round the maximum length up to a multiple of the page size, taking into account that
|
|
3188 |
// the offset takes up a part of the chunk's memory. We also clip the maximum length to the chunk
|
|
3189 |
// size, so the user may get a little less than requested if the chunk size is not large enough
|
|
3190 |
else
|
|
3191 |
{
|
|
3192 |
aMaxLength = _ALIGN_UP((aMaxLength + aOffset), pageSize);
|
|
3193 |
if (aMaxLength > chunkSize)
|
|
3194 |
aMaxLength = chunkSize;
|
|
3195 |
}
|
|
3196 |
|
|
3197 |
// If the rounded up values don't make sense then a crazy aMinLength or aOffset must have been passed
|
|
3198 |
// in, so fail the heap creation
|
|
3199 |
if (aMinLength > aMaxLength)
|
|
3200 |
return NULL;
|
|
3201 |
|
|
3202 |
// Adding the offset into the minimum and maximum length was only necessary for ensuring a good fit of
|
|
3203 |
// the heap into the chunk. Re-adjust them now back to non offset relative sizes
|
|
3204 |
aMinLength -= aOffset;
|
|
3205 |
aMaxLength -= aOffset;
|
|
3206 |
|
|
3207 |
// If we are still creating the hybrid allocator (call parameter
|
|
3208 |
// aOffset is 0 and aMaxLength > aMinLength), we must reduce heap
|
|
3209 |
// aMaxLength size to the value aMaxLength/2 and set the aOffset to point in the middle of chunk.
|
|
3210 |
TInt offset = aOffset;
|
|
3211 |
TInt maxLength = aMaxLength;
|
|
3212 |
if (!dlOnly && (aMaxLength > aMinLength))
|
|
3213 |
maxLength = offset = _ALIGN_UP(aMaxLength >> 1, pageSize);
|
|
3214 |
|
|
3215 |
// Try to use commit to map aMinLength physical memory for the heap, taking into account the offset. If
|
|
3216 |
// the operation fails, suppose that the chunk is not a disconnected heap and try to map physical memory
|
|
3217 |
// with adjust. In this case, we also can't use the hybrid allocator and have to revert to Doug Lea only
|
|
3218 |
TBool useAdjust = EFalse;
|
|
3219 |
TInt r = aChunk.Commit(offset, aMinLength);
|
|
3220 |
if (r == KErrGeneral)
|
|
3221 |
{
|
|
3222 |
dlOnly = useAdjust = ETrue;
|
|
3223 |
r = aChunk.Adjust(aMinLength);
|
|
3224 |
if (r != KErrNone)
|
|
3225 |
return NULL;
|
|
3226 |
}
|
|
3227 |
else if (r == KErrNone)
|
|
3228 |
{
|
|
3229 |
// We have a disconnected chunk reset aOffset and aMaxlength
|
|
3230 |
aOffset = offset;
|
|
3231 |
aMaxLength = maxLength;
|
|
3232 |
}
|
|
3233 |
|
|
3234 |
else
|
|
3235 |
return NULL;
|
|
3236 |
|
|
3237 |
// Parameters have been mostly verified and we know whether to use the hybrid allocator or Doug Lea only. The
|
|
3238 |
// constructor for the hybrid heap will automatically drop back to Doug Lea if it determines that aMinLength
|
|
3239 |
// == aMaxLength, so no need to worry about that requirement here. The user specified alignment is not used but
|
|
3240 |
// is passed in so that it can be sanity checked in case the user is doing something totally crazy with it
|
|
3241 |
RHybridHeap* h = new (aChunk.Base() + aOffset) RHybridHeap(aChunk.Handle(), aOffset, aMinLength, aMaxLength,
|
|
3242 |
aGrowBy, aAlign, aSingleThread, dlOnly, useAdjust);
|
|
3243 |
|
|
3244 |
if (h->ConstructLock(aMode) != KErrNone)
|
|
3245 |
return NULL;
|
|
3246 |
|
|
3247 |
// Return the heap address
|
|
3248 |
return h;
|
|
3249 |
}
|
|
3250 |
|
|
3251 |
#define UserTestDebugMaskBit(bit) (TBool)(UserSvr::DebugMask(bit>>5) & (1<<(bit&31)))
|
|
3252 |
|
|
3253 |
_LIT(KLitDollarHeap,"$HEAP");
|
|
3254 |
EXPORT_C TInt UserHeap::CreateThreadHeap(SStdEpocThreadCreateInfo& aInfo, RHeap*& aHeap, TInt aAlign, TBool aSingleThread)
|
|
3255 |
/**
|
|
3256 |
@internalComponent
|
|
3257 |
*/
|
|
3258 |
//
|
|
3259 |
// Create a user-side heap
|
|
3260 |
//
|
|
3261 |
{
|
|
3262 |
TInt page_size;
|
|
3263 |
GET_PAGE_SIZE(page_size);
|
|
3264 |
TInt minLength = _ALIGN_UP(aInfo.iHeapInitialSize, page_size);
|
|
3265 |
TInt maxLength = Max(aInfo.iHeapMaxSize, minLength);
|
|
3266 |
if (UserTestDebugMaskBit(96)) // 96 == KUSERHEAPTRACE in nk_trace.h
|
|
3267 |
aInfo.iFlags |= ETraceHeapAllocs;
|
|
3268 |
// Create the thread's heap chunk.
|
|
3269 |
RChunk c;
|
|
3270 |
TChunkCreateInfo createInfo;
|
|
3271 |
|
|
3272 |
createInfo.SetThreadHeap(0, maxLength, KLitDollarHeap()); // Initialise with no memory committed.
|
|
3273 |
#if USE_HYBRID_HEAP
|
|
3274 |
//
|
|
3275 |
// Create disconnected chunk for hybrid heap with double max length value
|
|
3276 |
//
|
|
3277 |
maxLength = 2*maxLength;
|
|
3278 |
createInfo.SetDisconnected(0, 0, maxLength);
|
|
3279 |
#endif
|
|
3280 |
// Set the paging policy of the heap chunk based on the thread's paging policy.
|
|
3281 |
TUint pagingflags = aInfo.iFlags & EThreadCreateFlagPagingMask;
|
|
3282 |
switch (pagingflags)
|
|
3283 |
{
|
|
3284 |
case EThreadCreateFlagPaged:
|
|
3285 |
createInfo.SetPaging(TChunkCreateInfo::EPaged);
|
|
3286 |
break;
|
|
3287 |
case EThreadCreateFlagUnpaged:
|
|
3288 |
createInfo.SetPaging(TChunkCreateInfo::EUnpaged);
|
|
3289 |
break;
|
|
3290 |
case EThreadCreateFlagPagingUnspec:
|
|
3291 |
// Leave the chunk paging policy unspecified so the process's
|
|
3292 |
// paging policy is used.
|
|
3293 |
break;
|
|
3294 |
}
|
|
3295 |
|
|
3296 |
TInt r = c.Create(createInfo);
|
|
3297 |
if (r!=KErrNone)
|
|
3298 |
return r;
|
|
3299 |
|
|
3300 |
aHeap = ChunkHeap(c, minLength, page_size, maxLength, aAlign, aSingleThread, EChunkHeapSwitchTo|EChunkHeapDuplicate);
|
|
3301 |
c.Close();
|
|
3302 |
|
|
3303 |
if ( !aHeap )
|
|
3304 |
return KErrNoMemory;
|
|
3305 |
|
|
3306 |
if (aInfo.iFlags & ETraceHeapAllocs)
|
|
3307 |
{
|
|
3308 |
aHeap->iFlags |= RHeap::ETraceAllocs;
|
|
3309 |
BTraceContext8(BTrace::EHeap, BTrace::EHeapCreate,(TUint32)aHeap, RHybridHeap::EAllocCellSize);
|
|
3310 |
TInt chunkId = ((RHandleBase&)((RHybridHeap*)aHeap)->iChunkHandle).BTraceId();
|
|
3311 |
BTraceContext8(BTrace::EHeap, BTrace::EHeapChunkCreate, (TUint32)aHeap, chunkId);
|
|
3312 |
}
|
|
3313 |
if (aInfo.iFlags & EMonitorHeapMemory)
|
|
3314 |
aHeap->iFlags |= RHeap::EMonitorMemory;
|
|
3315 |
|
|
3316 |
return KErrNone;
|
|
3317 |
}
|
|
3318 |
|
|
3319 |
#endif // __KERNEL_MODE__
|