44
|
1 |
// Copyright (c) 1994-2009 Nokia Corporation and/or its subsidiary(-ies).
|
|
2 |
// All rights reserved.
|
|
3 |
// This component and the accompanying materials are made available
|
|
4 |
// under the terms of the License "Eclipse Public License v1.0"
|
|
5 |
// which accompanies this distribution, and is available
|
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
|
|
7 |
//
|
|
8 |
// Initial Contributors:
|
|
9 |
// Nokia Corporation - initial contribution.
|
|
10 |
//
|
|
11 |
// Contributors:
|
|
12 |
//
|
|
13 |
// Description:
|
|
14 |
// e32\common\heap.cpp
|
|
15 |
//
|
|
16 |
//
|
|
17 |
|
|
18 |
#include "common.h"
|
|
19 |
#ifdef __KERNEL_MODE__
|
|
20 |
#include <kernel/kern_priv.h>
|
|
21 |
#endif
|
|
22 |
|
|
23 |
#ifdef _DEBUG
|
|
24 |
#define __SIMULATE_ALLOC_FAIL(s) if (CheckForSimulatedAllocFail()) {s}
|
|
25 |
#define __CHECK_CELL(p) CheckCell(p)
|
|
26 |
#define __ZAP_CELL(p) memset( ((TUint8*)p) + RHeap::EAllocCellSize, 0xde, p->len - RHeap::EAllocCellSize)
|
|
27 |
#define __DEBUG_SAVE(p) TInt dbgNestLevel = ((SDebugCell*)p)->nestingLevel
|
|
28 |
#define __DEBUG_RESTORE(p) ((SDebugCell*)(((TUint8*)p)-EAllocCellSize))->nestingLevel = dbgNestLevel
|
|
29 |
#else
|
|
30 |
#define __SIMULATE_ALLOC_FAIL(s)
|
|
31 |
#define __CHECK_CELL(p)
|
|
32 |
#define __ZAP_CELL(p)
|
|
33 |
#define __DEBUG_SAVE(p)
|
|
34 |
#define __DEBUG_RESTORE(p)
|
|
35 |
#endif
|
|
36 |
|
|
37 |
#define __NEXT_CELL(p) ((SCell*)(((TUint8*)p)+p->len))
|
|
38 |
|
|
39 |
#define __POWER_OF_2(x) ((TUint32)((x)^((x)-1))>=(TUint32)(x))
|
|
40 |
|
|
41 |
#define __MEMORY_MONITOR_CHECK_CELL(p) \
|
|
42 |
{ \
|
|
43 |
TLinAddr m = TLinAddr(iAlign-1); \
|
|
44 |
SCell* c = (SCell*)(((TUint8*)p)-EAllocCellSize); \
|
|
45 |
if((c->len & m) || (c->len<iMinCell) || ((TUint8*)c<iBase) || ((TUint8*)__NEXT_CELL(c)>iTop)) \
|
|
46 |
BTraceContext12(BTrace::EHeap, BTrace::EHeapCorruption, (TUint32)this, (TUint32)p, (TUint32)c->len-EAllocCellSize); \
|
|
47 |
}
|
|
48 |
|
|
49 |
/**
|
|
50 |
@SYMPatchable
|
|
51 |
@publishedPartner
|
|
52 |
@released
|
|
53 |
|
|
54 |
Defines the minimum cell size of a heap.
|
|
55 |
|
|
56 |
The constant can be changed at ROM build time using patchdata OBY keyword.
|
|
57 |
*/
|
|
58 |
#ifdef __X86GCC__ // For X86GCC we dont use the proper data import attribute
|
|
59 |
#undef IMPORT_D // since the constant is not really imported. GCC doesn't
|
|
60 |
#define IMPORT_D // allow imports from self.
|
|
61 |
#endif
|
|
62 |
IMPORT_D extern const TInt KHeapMinCellSize;
|
|
63 |
|
|
64 |
/**
|
|
65 |
@SYMPatchable
|
|
66 |
@publishedPartner
|
|
67 |
@released
|
|
68 |
|
|
69 |
This constant defines the ratio that determines the amount of hysteresis between heap growing and heap
|
|
70 |
shrinking.
|
|
71 |
It is a 32-bit fixed point number where the radix point is defined to be
|
|
72 |
between bits 7 and 8 (where the LSB is bit 0) i.e. using standard notation, a Q8 or a fx24.8
|
|
73 |
fixed point number. For example, for a ratio of 2.0, set KHeapShrinkHysRatio=0x200.
|
|
74 |
|
|
75 |
The heap shrinking hysteresis value is calculated to be:
|
|
76 |
@code
|
|
77 |
KHeapShrinkHysRatio*(iGrowBy>>8)
|
|
78 |
@endcode
|
|
79 |
where iGrowBy is a page aligned value set by the argument, aGrowBy, to the RHeap constructor.
|
|
80 |
The default hysteresis value is iGrowBy bytes i.e. KHeapShrinkHysRatio=2.0.
|
|
81 |
|
|
82 |
Memory usage may be improved by reducing the heap shrinking hysteresis
|
|
83 |
by setting 1.0 < KHeapShrinkHysRatio < 2.0. Heap shrinking hysteresis is disabled/removed
|
|
84 |
when KHeapShrinkHysRatio <= 1.0.
|
|
85 |
|
|
86 |
The constant can be changed at ROM build time using patchdata OBY keyword.
|
|
87 |
*/
|
|
88 |
IMPORT_D extern const TInt KHeapShrinkHysRatio;
|
|
89 |
|
|
90 |
#pragma warning( disable : 4705 ) // statement has no effect
|
|
91 |
UEXPORT_C RHeap::RHeap(TInt aMaxLength, TInt aAlign, TBool aSingleThread)
|
|
92 |
/**
|
|
93 |
@internalComponent
|
|
94 |
*/
|
|
95 |
//
|
|
96 |
// Constructor for fixed size heap
|
|
97 |
//
|
|
98 |
: iMinLength(aMaxLength), iMaxLength(aMaxLength), iOffset(0), iGrowBy(0), iChunkHandle(0),
|
|
99 |
iNestingLevel(0), iAllocCount(0), iFailType(ENone), iTestData(NULL)
|
|
100 |
{
|
|
101 |
iAlign = aAlign ? aAlign : ECellAlignment;
|
|
102 |
iPageSize = 0;
|
|
103 |
iFlags = aSingleThread ? (ESingleThreaded|EFixedSize) : EFixedSize;
|
|
104 |
Initialise();
|
|
105 |
}
|
|
106 |
#pragma warning( default : 4705 )
|
|
107 |
|
|
108 |
|
|
109 |
|
|
110 |
|
|
111 |
UEXPORT_C RHeap::RHeap(TInt aChunkHandle, TInt aOffset, TInt aMinLength, TInt aMaxLength, TInt aGrowBy, TInt aAlign, TBool aSingleThread)
|
|
112 |
/**
|
|
113 |
@internalComponent
|
|
114 |
*/
|
|
115 |
//
|
|
116 |
// Constructor for chunk heaps.
|
|
117 |
//
|
|
118 |
: iOffset(aOffset), iChunkHandle(aChunkHandle),
|
|
119 |
iNestingLevel(0), iAllocCount(0), iFailType(ENone), iTestData(NULL)
|
|
120 |
{
|
|
121 |
TInt sz = iBase - ((TUint8*)this - iOffset);
|
|
122 |
GET_PAGE_SIZE(iPageSize);
|
|
123 |
__ASSERT_ALWAYS(iOffset>=0, HEAP_PANIC(ETHeapNewBadOffset));
|
|
124 |
iMinLength = Max(aMinLength, sz + EAllocCellSize);
|
|
125 |
iMinLength = _ALIGN_UP(iMinLength, iPageSize);
|
|
126 |
iMaxLength = Max(aMaxLength, iMinLength);
|
|
127 |
iMaxLength = _ALIGN_UP(iMaxLength, iPageSize);
|
|
128 |
iGrowBy = _ALIGN_UP(aGrowBy, iPageSize);
|
|
129 |
iFlags = aSingleThread ? ESingleThreaded : 0;
|
|
130 |
iAlign = aAlign ? aAlign : ECellAlignment;
|
|
131 |
Initialise();
|
|
132 |
}
|
|
133 |
|
|
134 |
|
|
135 |
|
|
136 |
|
|
137 |
UEXPORT_C TAny* RHeap::operator new(TUint aSize, TAny* aBase) __NO_THROW
|
|
138 |
/**
|
|
139 |
@internalComponent
|
|
140 |
*/
|
|
141 |
{
|
|
142 |
__ASSERT_ALWAYS(aSize>=sizeof(RHeap), HEAP_PANIC(ETHeapNewBadSize));
|
|
143 |
RHeap* h = (RHeap*)aBase;
|
|
144 |
h->iAlign = 0x80000000; // garbage value
|
|
145 |
h->iBase = ((TUint8*)aBase) + aSize;
|
|
146 |
return aBase;
|
|
147 |
}
|
|
148 |
|
|
149 |
void RHeap::Initialise()
|
|
150 |
//
|
|
151 |
// Initialise the heap.
|
|
152 |
//
|
|
153 |
{
|
|
154 |
|
|
155 |
__ASSERT_ALWAYS((TUint32)iAlign>=sizeof(TAny*) && __POWER_OF_2(iAlign), HEAP_PANIC(ETHeapNewBadAlignment));
|
|
156 |
iCellCount = 0;
|
|
157 |
iTotalAllocSize = 0;
|
|
158 |
iBase = (TUint8*)Align(iBase + EAllocCellSize);
|
|
159 |
iBase -= EAllocCellSize;
|
|
160 |
TInt b = iBase - ((TUint8*)this - iOffset);
|
|
161 |
TInt len = _ALIGN_DOWN(iMinLength - b, iAlign);
|
|
162 |
iTop = iBase + len;
|
|
163 |
iMinLength = iTop - ((TUint8*)this - iOffset);
|
|
164 |
iMinCell = Align(KHeapMinCellSize + Max((TInt)EAllocCellSize, (TInt)EFreeCellSize));
|
|
165 |
#ifdef _DEBUG
|
|
166 |
memset(iBase, 0xa5, len);
|
|
167 |
#endif
|
|
168 |
SCell* pM=(SCell*)iBase; // First free cell
|
|
169 |
iFree.next=pM; // Free list points to first free cell
|
|
170 |
iFree.len=0; // Stop free from joining this with a free block
|
|
171 |
pM->next=NULL; // Terminate the free list
|
|
172 |
pM->len=len; // Set the size of the free cell
|
|
173 |
}
|
|
174 |
|
|
175 |
#ifdef _DEBUG
|
|
176 |
void RHeap::CheckCell(const SCell* aCell) const
|
|
177 |
{
|
|
178 |
TLinAddr m = TLinAddr(iAlign - 1);
|
|
179 |
|
|
180 |
__ASSERT_DEBUG(!(aCell->len & m), HEAP_PANIC(ETHeapBadCellAddress));
|
|
181 |
__ASSERT_DEBUG(aCell->len >= iMinCell, HEAP_PANIC(ETHeapBadCellAddress));
|
|
182 |
__ASSERT_DEBUG((TUint8*)aCell>=iBase, HEAP_PANIC(ETHeapBadCellAddress));
|
|
183 |
__ASSERT_DEBUG((TUint8*)__NEXT_CELL(aCell)<=iTop, HEAP_PANIC(ETHeapBadCellAddress));
|
|
184 |
}
|
|
185 |
#endif
|
|
186 |
|
|
187 |
UEXPORT_C RHeap::SCell* RHeap::GetAddress(const TAny* aCell) const
|
|
188 |
//
|
|
189 |
// As much as possible, check a cell address and backspace it
|
|
190 |
// to point at the cell header.
|
|
191 |
//
|
|
192 |
{
|
|
193 |
|
|
194 |
TLinAddr m = TLinAddr(iAlign - 1);
|
|
195 |
__ASSERT_ALWAYS(!(TLinAddr(aCell)&m), HEAP_PANIC(ETHeapBadCellAddress));
|
|
196 |
|
|
197 |
SCell* pC = (SCell*)(((TUint8*)aCell)-EAllocCellSize);
|
|
198 |
__CHECK_CELL(pC);
|
|
199 |
|
|
200 |
return pC;
|
|
201 |
}
|
|
202 |
|
|
203 |
|
|
204 |
|
|
205 |
|
|
206 |
UEXPORT_C TInt RHeap::AllocLen(const TAny* aCell) const
|
|
207 |
/**
|
|
208 |
Gets the length of the available space in the specified allocated cell.
|
|
209 |
|
|
210 |
@param aCell A pointer to the allocated cell.
|
|
211 |
|
|
212 |
@return The length of the available space in the allocated cell.
|
|
213 |
|
|
214 |
@panic USER 42 if aCell does not point to a valid cell.
|
|
215 |
*/
|
|
216 |
{
|
|
217 |
|
|
218 |
SCell* pC = GetAddress(aCell);
|
|
219 |
return pC->len - EAllocCellSize;
|
|
220 |
}
|
|
221 |
|
|
222 |
|
|
223 |
|
|
224 |
|
|
225 |
|
|
226 |
#if !defined(__HEAP_MACHINE_CODED__) || defined(_DEBUG)
|
|
227 |
RHeap::SCell* RHeap::DoAlloc(TInt aSize, SCell*& aLastFree)
|
|
228 |
//
|
|
229 |
// Allocate without growing. aSize includes cell header and alignment.
|
|
230 |
// Lock already held.
|
|
231 |
//
|
|
232 |
{
|
|
233 |
SCell* pP = &iFree;
|
|
234 |
SCell* pC = pP->next;
|
|
235 |
for (; pC; pP=pC, pC=pC->next) // Scan the free list
|
|
236 |
{
|
|
237 |
__CHECK_CELL(pC);
|
|
238 |
SCell* pE;
|
|
239 |
if (pC->len >= aSize) // Block size bigger than request
|
|
240 |
{
|
|
241 |
if (pC->len - aSize < iMinCell) // Leftover must be large enough to hold an SCell
|
|
242 |
{
|
|
243 |
aSize = pC->len; // It isn't, so take it all
|
|
244 |
pE = pC->next; // Set the next field
|
|
245 |
}
|
|
246 |
else
|
|
247 |
{
|
|
248 |
pE = (SCell*)(((TUint8*)pC)+aSize); // Take amount required
|
|
249 |
pE->len = pC->len - aSize; // Initialize new free cell
|
|
250 |
pE->next = pC->next;
|
|
251 |
}
|
|
252 |
pP->next = pE; // Update previous pointer
|
|
253 |
pC->len = aSize; // Set control size word
|
|
254 |
#if defined(_DEBUG)
|
|
255 |
((SDebugCell*)pC)->nestingLevel = iNestingLevel;
|
|
256 |
((SDebugCell*)pC)->allocCount = ++iAllocCount;
|
|
257 |
#endif
|
|
258 |
return pC;
|
|
259 |
}
|
|
260 |
}
|
|
261 |
aLastFree = pP;
|
|
262 |
return NULL;
|
|
263 |
}
|
|
264 |
#endif
|
|
265 |
|
|
266 |
|
|
267 |
|
|
268 |
|
|
269 |
UEXPORT_C TAny* RHeap::Alloc(TInt aSize)
|
|
270 |
/**
|
|
271 |
Allocates a cell of the specified size from the heap.
|
|
272 |
|
|
273 |
If there is insufficient memory available on the heap from which to allocate
|
|
274 |
a cell of the required size, the function returns NULL.
|
|
275 |
|
|
276 |
The cell is aligned according to the alignment value specified at construction,
|
|
277 |
or the default alignment value, if an explict value was not specified.
|
|
278 |
|
|
279 |
The resulting size of the allocated cell may be rounded up to a
|
|
280 |
value greater than aSize, but is guaranteed to be not less than aSize.
|
|
281 |
|
|
282 |
@param aSize The
|
|
283 |
size of the cell to be allocated from the heap
|
|
284 |
|
|
285 |
@return A pointer to the allocated cell. NULL if there is insufficient memory
|
|
286 |
available.
|
|
287 |
|
|
288 |
@panic USER 47 if the maximum unsigned value of aSize is greater than or equal
|
|
289 |
to the value of KMaxTInt/2; for example, calling Alloc(-1) raises
|
|
290 |
this panic.
|
|
291 |
|
|
292 |
@see KMaxTInt
|
|
293 |
*/
|
|
294 |
{
|
|
295 |
|
|
296 |
__CHECK_THREAD_STATE;
|
|
297 |
__ASSERT_ALWAYS((TUint)aSize<(KMaxTInt/2),HEAP_PANIC(ETHeapBadAllocatedCellSize));
|
|
298 |
__SIMULATE_ALLOC_FAIL(return NULL;)
|
|
299 |
|
|
300 |
TInt origSize = aSize;
|
|
301 |
aSize = Max(Align(aSize + EAllocCellSize), iMinCell);
|
|
302 |
SCell* pL = NULL;
|
|
303 |
Lock();
|
|
304 |
SCell* pC = (SCell*)DoAlloc(aSize, pL);
|
|
305 |
if (!pC && !(iFlags & EFixedSize))
|
|
306 |
{
|
|
307 |
// try to grow chunk heap
|
|
308 |
TInt r = TryToGrowHeap(aSize, pL);
|
|
309 |
if (r==KErrNone)
|
|
310 |
pC = DoAlloc(aSize, pL);
|
|
311 |
}
|
|
312 |
if (pC)
|
|
313 |
++iCellCount, iTotalAllocSize += (pC->len - EAllocCellSize);
|
|
314 |
Unlock();
|
|
315 |
if (pC)
|
|
316 |
{
|
|
317 |
TAny* result=((TUint8*)pC) + EAllocCellSize;
|
|
318 |
if (iFlags & ETraceAllocs)
|
|
319 |
{
|
|
320 |
TUint32 traceData[2];
|
|
321 |
traceData[0] = AllocLen(result);
|
|
322 |
traceData[1] = origSize;
|
|
323 |
BTraceContextN(BTrace::EHeap, BTrace::EHeapAlloc, (TUint32)this, (TUint32)result, traceData, sizeof(traceData));
|
|
324 |
}
|
|
325 |
#ifdef __KERNEL_MODE__
|
|
326 |
memclr(result, pC->len - EAllocCellSize);
|
|
327 |
#endif
|
|
328 |
return result;
|
|
329 |
}
|
|
330 |
if (iFlags & ETraceAllocs)
|
|
331 |
BTraceContext8(BTrace::EHeap, BTrace::EHeapAllocFail, (TUint32)this, (TUint32)origSize);
|
|
332 |
return NULL;
|
|
333 |
}
|
|
334 |
|
|
335 |
|
|
336 |
|
|
337 |
|
|
338 |
TInt RHeap::TryToGrowHeap(TInt aSize, SCell* aLastFree)
|
|
339 |
{
|
|
340 |
TBool at_end = IsLastCell(aLastFree);
|
|
341 |
TInt extra = at_end ? aSize - aLastFree->len : aSize;
|
|
342 |
extra = (extra + iGrowBy - 1) / iGrowBy;
|
|
343 |
extra *= iGrowBy;
|
|
344 |
TInt cur_len = _ALIGN_UP(iTop - ((TUint8*)this - iOffset), iPageSize);
|
|
345 |
TInt new_len = cur_len + extra;
|
|
346 |
TInt r = KErrNoMemory;
|
|
347 |
if (new_len <= iMaxLength)
|
|
348 |
{
|
|
349 |
r = SetBrk(new_len);
|
|
350 |
if (r == KErrNone)
|
|
351 |
{
|
|
352 |
if (at_end)
|
|
353 |
aLastFree->len += extra;
|
|
354 |
else
|
|
355 |
{
|
|
356 |
SCell* pC = (SCell*)iTop;
|
|
357 |
pC->len = extra;
|
|
358 |
pC->next = NULL;
|
|
359 |
aLastFree->next = pC;
|
|
360 |
}
|
|
361 |
iTop += extra;
|
|
362 |
}
|
|
363 |
}
|
|
364 |
return r;
|
|
365 |
}
|
|
366 |
|
|
367 |
|
|
368 |
|
|
369 |
|
|
370 |
#ifndef __KERNEL_MODE__
|
|
371 |
EXPORT_C TInt RHeap::Compress()
|
|
372 |
/**
|
|
373 |
Compresses the heap.
|
|
374 |
|
|
375 |
The function frees excess committed space from the top
|
|
376 |
of the heap. The size of the heap is never reduced below the minimum size
|
|
377 |
specified during creation of the heap.
|
|
378 |
|
|
379 |
@return The space reclaimed. If no space can be reclaimed, then this value
|
|
380 |
is zero.
|
|
381 |
*/
|
|
382 |
{
|
|
383 |
|
|
384 |
if (iFlags & EFixedSize)
|
|
385 |
return 0;
|
|
386 |
TInt r = 0;
|
|
387 |
Lock();
|
|
388 |
SCell* pC = &iFree;
|
|
389 |
for (; pC->next; pC=pC->next) {}
|
|
390 |
if (pC!=&iFree)
|
|
391 |
{
|
|
392 |
__CHECK_CELL(pC);
|
|
393 |
if (IsLastCell(pC))
|
|
394 |
r = Reduce(pC);
|
|
395 |
}
|
|
396 |
Unlock();
|
|
397 |
return r;
|
|
398 |
}
|
|
399 |
#endif
|
|
400 |
|
|
401 |
|
|
402 |
|
|
403 |
|
|
404 |
#if !defined(__HEAP_MACHINE_CODED__) || defined(_DEBUG)
|
|
405 |
void RHeap::DoFree(SCell* pC)
|
|
406 |
{
|
|
407 |
__ZAP_CELL(pC);
|
|
408 |
|
|
409 |
SCell* pP = &iFree;
|
|
410 |
SCell* pE = pP->next;
|
|
411 |
for (; pE && pE<pC; pP=pE, pE=pE->next) {}
|
|
412 |
if (pE) // Is there a following free cell?
|
|
413 |
{
|
|
414 |
SCell* pN = __NEXT_CELL(pC);
|
|
415 |
__ASSERT_ALWAYS(pN<=pE, HEAP_PANIC(ETHeapFreeBadNextCell)); // Following cell overlaps
|
|
416 |
if (pN==pE) // Is it adjacent
|
|
417 |
{
|
|
418 |
pC->len += pE->len; // Yes - coalesce adjacent free cells
|
|
419 |
pC->next = pE->next;
|
|
420 |
}
|
|
421 |
else // pN<pE, non-adjacent free cells
|
|
422 |
pC->next = pE; // Otherwise just point to it
|
|
423 |
}
|
|
424 |
else
|
|
425 |
pC->next = NULL; // No following free cell
|
|
426 |
SCell* pN = __NEXT_CELL(pP); // pN=pP=&iFree if no preceding free cell
|
|
427 |
__ASSERT_ALWAYS(pN<=pC, HEAP_PANIC(ETHeapFreeBadPrevCell)); // Previous cell overlaps
|
|
428 |
if (pN==pC) // Is it adjacent
|
|
429 |
{
|
|
430 |
pP->len += pC->len; // Yes - coalesce adjacent free cells
|
|
431 |
pP->next = pC->next;
|
|
432 |
pC = pP; // for size reduction check
|
|
433 |
}
|
|
434 |
else // pN<pC, non-adjacent free cells
|
|
435 |
pP->next = pC; // point previous cell to the one being freed
|
|
436 |
pN = __NEXT_CELL(pC); // End of amalgamated free cell
|
|
437 |
if ((TUint8*)pN==iTop && !(iFlags & EFixedSize) &&
|
|
438 |
pC->len >= KHeapShrinkHysRatio*(iGrowBy>>8))
|
|
439 |
Reduce(pC);
|
|
440 |
}
|
|
441 |
#endif
|
|
442 |
|
|
443 |
|
|
444 |
|
|
445 |
|
|
446 |
UEXPORT_C void RHeap::Free(TAny* aCell)
|
|
447 |
/**
|
|
448 |
Frees the specified cell and returns it to the heap.
|
|
449 |
|
|
450 |
@param aCell A pointer to a valid cell; this pointer can also be NULL,
|
|
451 |
in which case the function does nothing and just returns.
|
|
452 |
|
|
453 |
@panic USER 42 if aCell points to an invalid cell.
|
|
454 |
*/
|
|
455 |
{
|
|
456 |
__CHECK_THREAD_STATE;
|
|
457 |
if (!aCell)
|
|
458 |
return;
|
|
459 |
Lock();
|
|
460 |
if (iFlags & EMonitorMemory)
|
|
461 |
__MEMORY_MONITOR_CHECK_CELL(aCell);
|
|
462 |
SCell* pC = GetAddress(aCell);
|
|
463 |
--iCellCount;
|
|
464 |
iTotalAllocSize -= (pC->len - EAllocCellSize);
|
|
465 |
DoFree(pC);
|
|
466 |
if (iFlags & ETraceAllocs)
|
|
467 |
BTraceContext8(BTrace::EHeap, BTrace::EHeapFree, (TUint32)this, (TUint32)aCell);
|
|
468 |
Unlock();
|
|
469 |
}
|
|
470 |
|
|
471 |
|
|
472 |
|
|
473 |
|
|
474 |
TInt RHeap::Reduce(SCell* aCell)
|
|
475 |
{
|
|
476 |
TInt reduce=0;
|
|
477 |
TInt offset=((TUint8*)aCell)-((TUint8*)this - iOffset);
|
|
478 |
if (offset>=iMinLength)
|
|
479 |
reduce = aCell->len; // length of entire free cell
|
|
480 |
else
|
|
481 |
reduce = offset + aCell->len - iMinLength; // length of free cell past minimum heap size
|
|
482 |
reduce = _ALIGN_DOWN(reduce, iPageSize); // round down to page multiple
|
|
483 |
if (reduce<=0)
|
|
484 |
return 0; // can't reduce this heap
|
|
485 |
TInt new_cell_len = aCell->len - reduce; // length of last free cell after reduction
|
|
486 |
if (new_cell_len == 0)
|
|
487 |
{
|
|
488 |
// the free cell can be entirely eliminated
|
|
489 |
SCell* pP = &iFree;
|
|
490 |
for (; pP->next!=aCell; pP=pP->next) {}
|
|
491 |
pP->next = NULL;
|
|
492 |
}
|
|
493 |
else
|
|
494 |
{
|
|
495 |
if (new_cell_len < iMinCell)
|
|
496 |
{
|
|
497 |
// max reduction would leave a cell too small
|
|
498 |
reduce -= iPageSize;
|
|
499 |
new_cell_len += iPageSize;
|
|
500 |
}
|
|
501 |
aCell->len = new_cell_len; // reduce the cell length
|
|
502 |
}
|
|
503 |
iTop -= reduce;
|
|
504 |
TInt new_len = _ALIGN_UP(iTop - ((TUint8*)this - iOffset), iPageSize);
|
|
505 |
TInt r = SetBrk(new_len);
|
|
506 |
__ASSERT_ALWAYS(r==KErrNone, HEAP_PANIC(ETHeapReduceFailed));
|
|
507 |
return reduce;
|
|
508 |
}
|
|
509 |
|
|
510 |
|
|
511 |
|
|
512 |
|
|
513 |
#ifndef __KERNEL_MODE__
|
|
514 |
EXPORT_C void RHeap::Reset()
|
|
515 |
/**
|
|
516 |
Frees all allocated cells on this heap.
|
|
517 |
*/
|
|
518 |
{
|
|
519 |
|
|
520 |
Lock();
|
|
521 |
if (!(iFlags & EFixedSize))
|
|
522 |
{
|
|
523 |
TInt r = SetBrk(iMinLength);
|
|
524 |
__ASSERT_ALWAYS(r==KErrNone, HEAP_PANIC(ETHeapResetFailed));
|
|
525 |
}
|
|
526 |
Initialise();
|
|
527 |
Unlock();
|
|
528 |
}
|
|
529 |
#endif
|
|
530 |
|
|
531 |
|
|
532 |
|
|
533 |
|
|
534 |
inline void RHeap::FindFollowingFreeCell(SCell* aCell, SCell*& aPrev, SCell*& aNext)
|
|
535 |
//
|
|
536 |
// Find the free cell that immediately follows aCell, if one exists
|
|
537 |
// If found, aNext is set to point to it, else it is set to NULL.
|
|
538 |
// aPrev is set to the free cell before aCell or the dummy free cell where there are no free cells before aCell.
|
|
539 |
// Called with lock enabled.
|
|
540 |
//
|
|
541 |
{
|
|
542 |
aPrev = &iFree;
|
|
543 |
aNext = aPrev->next;
|
|
544 |
for (; aNext && aNext<aCell; aPrev=aNext, aNext=aNext->next) {}
|
|
545 |
|
|
546 |
if (aNext) // If there is a following free cell, check its directly after aCell.
|
|
547 |
{
|
|
548 |
SCell* pNextCell = __NEXT_CELL(aCell); // end of this cell
|
|
549 |
__ASSERT_ALWAYS(pNextCell<=aNext, (Unlock(), HEAP_PANIC(ETHeapReAllocBadNextCell))); // Following free cell overlaps
|
|
550 |
if (pNextCell!=aNext)
|
|
551 |
aNext=NULL;
|
|
552 |
}
|
|
553 |
}
|
|
554 |
|
|
555 |
|
|
556 |
|
|
557 |
|
|
558 |
TInt RHeap::TryToGrowCell(SCell* aCell,SCell* aPrev, SCell* aNext, TInt aSize)
|
|
559 |
//
|
|
560 |
// Try to grow the heap cell 'aCell' in place, to size 'aSize'.
|
|
561 |
// Requires the free cell immediately after aCell (aNext), and the free cell prior to
|
|
562 |
// that (aPrev), to be provided. (As found by FindFollowingFreeCell)
|
|
563 |
//
|
|
564 |
|
|
565 |
{
|
|
566 |
TInt extra = aSize - aCell->len;
|
|
567 |
if (aNext && (aNext->len>=extra)) // Is there a following free cell big enough?
|
|
568 |
{
|
|
569 |
if (aNext->len - extra >= iMinCell) // take part of free cell ?
|
|
570 |
{
|
|
571 |
SCell* pX = (SCell*)((TUint8*)aNext + extra); // remainder of free cell
|
|
572 |
pX->next = aNext->next; // remainder->next = original free cell->next
|
|
573 |
pX->len = aNext->len - extra; // remainder length = original free cell length - extra
|
|
574 |
aPrev->next = pX; // put remainder into free chain
|
|
575 |
}
|
|
576 |
else
|
|
577 |
{
|
|
578 |
extra = aNext->len; // Take whole free cell
|
|
579 |
aPrev->next = aNext->next; // remove from free chain
|
|
580 |
}
|
|
581 |
#ifdef __KERNEL_MODE__
|
|
582 |
memclr(((TUint8*)aCell) + aCell->len, extra);
|
|
583 |
#endif
|
|
584 |
aCell->len += extra; // update reallocated cell length
|
|
585 |
iTotalAllocSize += extra;
|
|
586 |
return KErrNone;
|
|
587 |
}
|
|
588 |
return KErrGeneral; // No space to grow cell
|
|
589 |
}
|
|
590 |
|
|
591 |
|
|
592 |
|
|
593 |
|
|
594 |
// UEXPORT_C TAny* RHeap::ReAlloc(TAny* aCell, TInt aSize, TInt aMode)
|
|
595 |
/**
|
|
596 |
Increases or decreases the size of an existing cell in the heap.
|
|
597 |
|
|
598 |
If the cell is being decreased in size, then it is guaranteed not to move,
|
|
599 |
and the function returns the pointer originally passed in aCell. Note that the
|
|
600 |
length of the cell will be the same if the difference between the old size
|
|
601 |
and the new size is smaller than the minimum cell size.
|
|
602 |
|
|
603 |
If the cell is being increased in size, i.e. aSize is bigger than its
|
|
604 |
current size, then the function tries to grow the cell in place.
|
|
605 |
If successful, then the function returns the pointer originally
|
|
606 |
passed in aCell. If unsuccessful, then:
|
|
607 |
|
|
608 |
1. if the cell cannot be moved, i.e. aMode has the ENeverMove bit set, then
|
|
609 |
the function returns NULL.
|
|
610 |
2. if the cell can be moved, i.e. aMode does not have the ENeverMove bit set,
|
|
611 |
then the function tries to allocate a new replacement cell, and, if
|
|
612 |
successful, returns a pointer to the new cell; if unsuccessful, it
|
|
613 |
returns NULL.
|
|
614 |
|
|
615 |
Note that in debug mode, the function returns NULL if the cell cannot be grown
|
|
616 |
in place, regardless of whether the ENeverMove bit is set.
|
|
617 |
|
|
618 |
If the reallocated cell is at a different location from the original cell, then
|
|
619 |
the content of the original cell is copied to the reallocated cell.
|
|
620 |
|
|
621 |
If the supplied pointer, aCell is NULL, then the function attempts to allocate
|
|
622 |
a new cell, but only if the cell can be moved, i.e. aMode does not have
|
|
623 |
the ENeverMove bit set.
|
|
624 |
|
|
625 |
Note the following general points:
|
|
626 |
|
|
627 |
1. If reallocation fails, the content of the original cell is preserved.
|
|
628 |
|
|
629 |
2. The resulting size of the re-allocated cell may be rounded up to a value
|
|
630 |
greater than aSize, but is guaranteed to be not less than aSize.
|
|
631 |
|
|
632 |
@param aCell A pointer to the cell to be reallocated. This may be NULL.
|
|
633 |
|
|
634 |
@param aSize The new size of the cell. This may be bigger or smaller than the
|
|
635 |
size of the original cell.
|
|
636 |
|
|
637 |
@param aMode Flags controlling the reallocation. The only bit which has any
|
|
638 |
effect on this function is that defined by the enumeration
|
|
639 |
ENeverMove of the enum RAllocator::TReAllocMode.
|
|
640 |
If this is set, then any successful reallocation guarantees not
|
|
641 |
to have changed the start address of the cell.
|
|
642 |
By default, this parameter is zero.
|
|
643 |
|
|
644 |
@return A pointer to the reallocated cell. This may be the same as the original
|
|
645 |
pointer supplied through aCell. NULL if there is insufficient memory to
|
|
646 |
reallocate the cell, or to grow it in place.
|
|
647 |
|
|
648 |
@panic USER 42, if aCell is not NULL, and does not point to a valid cell.
|
|
649 |
@panic USER 47, if the maximum unsigned value of aSize is greater
|
|
650 |
than or equal to KMaxTInt/2. For example,
|
|
651 |
calling ReAlloc(someptr,-1) raises this panic.
|
|
652 |
|
|
653 |
@see RAllocator::TReAllocMode
|
|
654 |
*/
|
|
655 |
UEXPORT_C TAny* RHeap::ReAlloc(TAny* aCell, TInt aSize, TInt aMode)
|
|
656 |
{
|
|
657 |
if (aCell && iFlags&EMonitorMemory)
|
|
658 |
__MEMORY_MONITOR_CHECK_CELL(aCell);
|
|
659 |
TAny* retval = ReAllocImpl(aCell, aSize, aMode);
|
|
660 |
if (iFlags & ETraceAllocs)
|
|
661 |
{
|
|
662 |
if (retval)
|
|
663 |
{
|
|
664 |
TUint32 traceData[3];
|
|
665 |
traceData[0] = AllocLen(retval);
|
|
666 |
traceData[1] = aSize;
|
|
667 |
traceData[2] = (TUint32)aCell;
|
|
668 |
BTraceContextN(BTrace::EHeap, BTrace::EHeapReAlloc,(TUint32)this, (TUint32)retval,traceData, sizeof(traceData));
|
|
669 |
}
|
|
670 |
else
|
|
671 |
BTraceContext12(BTrace::EHeap, BTrace::EHeapReAllocFail, (TUint32)this, (TUint32)aCell, (TUint32)aSize);
|
|
672 |
}
|
|
673 |
return retval;
|
|
674 |
}
|
|
675 |
inline TAny* RHeap::ReAllocImpl(TAny* aCell, TInt aSize, TInt aMode)
|
|
676 |
{
|
|
677 |
__CHECK_THREAD_STATE;
|
|
678 |
if (!aCell)
|
|
679 |
return (aMode & ENeverMove) ? NULL : Alloc(aSize);
|
|
680 |
__ASSERT_ALWAYS((TUint)aSize<(KMaxTInt/2),HEAP_PANIC(ETHeapBadAllocatedCellSize));
|
|
681 |
Lock();
|
|
682 |
SCell* pC = GetAddress(aCell);
|
|
683 |
TInt old_len = pC->len;
|
|
684 |
__DEBUG_SAVE(pC);
|
|
685 |
aSize = Max(Align(aSize + EAllocCellSize), iMinCell);
|
|
686 |
if (aSize > old_len) // Trying to grow cell
|
|
687 |
{
|
|
688 |
__SIMULATE_ALLOC_FAIL({ Unlock(); return NULL;})
|
|
689 |
|
|
690 |
// Try to grow cell in place, without reallocation
|
|
691 |
SCell* pPrev;
|
|
692 |
SCell* pNext;
|
|
693 |
FindFollowingFreeCell(pC,pPrev, pNext);
|
|
694 |
TInt r = TryToGrowCell(pC, pPrev, pNext, aSize);
|
|
695 |
|
|
696 |
if (r==KErrNone)
|
|
697 |
{
|
|
698 |
Unlock();
|
|
699 |
return aCell;
|
|
700 |
}
|
|
701 |
|
|
702 |
if (!(aMode & ENeverMove))
|
|
703 |
// If moving allowed, try re-alloc.
|
|
704 |
// If we need to extend heap,and cell is at the end, try and grow in place
|
|
705 |
{
|
|
706 |
SCell* pLastFree;
|
|
707 |
SCell* pNewCell = (SCell*)DoAlloc(aSize, pLastFree);
|
|
708 |
if (!pNewCell && !(iFlags & EFixedSize))
|
|
709 |
// if we need to extend the heap to alloc
|
|
710 |
{
|
|
711 |
if (IsLastCell(pC) || (pNext && IsLastCell(pNext)))
|
|
712 |
// if last used Cell, try and extend heap and then cell
|
|
713 |
{
|
|
714 |
TInt r = TryToGrowHeap(aSize - old_len, pLastFree);
|
|
715 |
if (r==KErrNone)
|
|
716 |
{
|
|
717 |
r = TryToGrowCell(pC, pPrev, pPrev->next, aSize);
|
|
718 |
Unlock();
|
|
719 |
__ASSERT_DEBUG(r == KErrNone, HEAP_PANIC(ETHeapCellDidntGrow));
|
|
720 |
return aCell;
|
|
721 |
}
|
|
722 |
}
|
|
723 |
else
|
|
724 |
// try to grow chunk heap and Alloc on it
|
|
725 |
{
|
|
726 |
TInt r = TryToGrowHeap(aSize, pLastFree);
|
|
727 |
if (r==KErrNone)
|
|
728 |
pNewCell = DoAlloc(aSize, pLastFree);
|
|
729 |
}
|
|
730 |
}
|
|
731 |
|
|
732 |
if (pNewCell)
|
|
733 |
// if we created a new cell, adjust tellies, copy the contents and delete old cell.
|
|
734 |
{
|
|
735 |
iCellCount++;
|
|
736 |
iTotalAllocSize += (pNewCell->len - EAllocCellSize);
|
|
737 |
|
|
738 |
Unlock();
|
|
739 |
TUint8* raw = ((TUint8*) pNewCell);
|
|
740 |
|
|
741 |
memcpy(raw + EAllocCellSize, aCell, old_len - EAllocCellSize);
|
|
742 |
#ifdef __KERNEL_MODE__
|
|
743 |
memclr(raw + old_len, pNewCell->len - old_len);
|
|
744 |
#endif
|
|
745 |
Free(aCell);
|
|
746 |
__DEBUG_RESTORE(raw + EAllocCellSize);
|
|
747 |
return raw + EAllocCellSize;
|
|
748 |
}
|
|
749 |
}
|
|
750 |
else
|
|
751 |
// No moving, but still posible to extend the heap (if heap extendable)
|
|
752 |
{
|
|
753 |
if (!(iFlags & EFixedSize) && (IsLastCell(pC) || (pNext && IsLastCell(pNext))))
|
|
754 |
{
|
|
755 |
SCell* pLastFree = pNext ? pNext : pPrev;
|
|
756 |
TInt r = TryToGrowHeap(aSize - old_len, pLastFree);
|
|
757 |
if (r==KErrNone)
|
|
758 |
{
|
|
759 |
r = TryToGrowCell(pC, pPrev, pPrev->next, aSize);
|
|
760 |
Unlock();
|
|
761 |
__ASSERT_DEBUG(r==KErrNone, HEAP_PANIC(ETHeapCellDidntGrow));
|
|
762 |
return aCell;
|
|
763 |
}
|
|
764 |
}
|
|
765 |
}
|
|
766 |
Unlock();
|
|
767 |
return NULL;
|
|
768 |
}
|
|
769 |
if (old_len - aSize >= iMinCell)
|
|
770 |
{
|
|
771 |
// cell shrinking, remainder big enough to form a new free cell
|
|
772 |
SCell* pX = (SCell*)((TUint8*)pC + aSize); // pointer to new free cell
|
|
773 |
pC->len = aSize; // update cell size
|
|
774 |
pX->len = old_len - aSize; // size of remainder
|
|
775 |
iTotalAllocSize -= pX->len;
|
|
776 |
DoFree(pX); // link new free cell into chain, shrink heap if necessary
|
|
777 |
}
|
|
778 |
Unlock();
|
|
779 |
return aCell;
|
|
780 |
}
|
|
781 |
|
|
782 |
|
|
783 |
|
|
784 |
|
|
785 |
#ifndef __KERNEL_MODE__
|
|
786 |
|
|
787 |
EXPORT_C TInt RHeap::Available(TInt& aBiggestBlock) const
|
|
788 |
/**
|
|
789 |
Gets the total free space currently available on the heap and the space
|
|
790 |
available in the largest free block.
|
|
791 |
|
|
792 |
The space available represents the total space which can be allocated.
|
|
793 |
|
|
794 |
Note that compressing the heap may reduce the total free space available and
|
|
795 |
the space available in the largest free block.
|
|
796 |
|
|
797 |
@param aBiggestBlock On return, contains the space available
|
|
798 |
in the largest free block on the heap.
|
|
799 |
|
|
800 |
@return The total free space currently available on the heap.
|
|
801 |
*/
|
|
802 |
{
|
|
803 |
|
|
804 |
TInt total = 0;
|
|
805 |
TInt max = 0;
|
|
806 |
Lock();
|
|
807 |
SCell* pC = iFree.next;
|
|
808 |
for (; pC; pC=pC->next)
|
|
809 |
{
|
|
810 |
TInt l = pC->len - EAllocCellSize;
|
|
811 |
if (l > max)
|
|
812 |
max = l;
|
|
813 |
total += l;
|
|
814 |
}
|
|
815 |
Unlock();
|
|
816 |
aBiggestBlock = max;
|
|
817 |
return total;
|
|
818 |
}
|
|
819 |
|
|
820 |
|
|
821 |
|
|
822 |
|
|
823 |
EXPORT_C TInt RHeap::AllocSize(TInt& aTotalAllocSize) const
|
|
824 |
/**
|
|
825 |
Gets the number of cells allocated on this heap, and the total space
|
|
826 |
allocated to them.
|
|
827 |
|
|
828 |
@param aTotalAllocSize On return, contains the total space allocated
|
|
829 |
to the cells.
|
|
830 |
|
|
831 |
@return The number of cells allocated on this heap.
|
|
832 |
*/
|
|
833 |
{
|
|
834 |
Lock();
|
|
835 |
TInt c = iCellCount;
|
|
836 |
aTotalAllocSize = iTotalAllocSize;
|
|
837 |
Unlock();
|
|
838 |
return c;
|
|
839 |
}
|
|
840 |
|
|
841 |
|
|
842 |
|
|
843 |
|
|
844 |
EXPORT_C RHeap* UserHeap::FixedHeap(TAny* aBase, TInt aMaxLength, TInt aAlign, TBool aSingleThread)
|
|
845 |
/**
|
|
846 |
Creates a fixed length heap at a specified location.
|
|
847 |
|
|
848 |
On successful return from this function, aMaxLength bytes are committed by the chunk.
|
|
849 |
The heap cannot be extended.
|
|
850 |
|
|
851 |
@param aBase A pointer to the location where the heap is to be constructed.
|
|
852 |
@param aMaxLength The length of the heap. If the supplied value is less
|
|
853 |
than KMinHeapSize, it is discarded and the value KMinHeapSize
|
|
854 |
is used instead.
|
|
855 |
@param aAlign The alignment of heap cells.
|
|
856 |
@param aSingleThread Indicates whether single threaded or not.
|
|
857 |
|
|
858 |
@return A pointer to the new heap, or NULL if the heap could not be created.
|
|
859 |
|
|
860 |
@panic USER 56 if aMaxLength is negative.
|
|
861 |
@panic USER 172 if aAlign is not a power of 2 or is less than the size of a TAny*.
|
|
862 |
*/
|
|
863 |
//
|
|
864 |
// Force construction of the fixed memory.
|
|
865 |
//
|
|
866 |
{
|
|
867 |
|
|
868 |
__ASSERT_ALWAYS(aMaxLength>=0, ::Panic(ETHeapMaxLengthNegative));
|
|
869 |
if (aMaxLength<KMinHeapSize)
|
|
870 |
aMaxLength=KMinHeapSize;
|
|
871 |
RHeap* h = new(aBase) RHeap(aMaxLength, aAlign, aSingleThread);
|
|
872 |
if (!aSingleThread)
|
|
873 |
{
|
|
874 |
TInt r = h->iLock.CreateLocal();
|
|
875 |
if (r!=KErrNone)
|
|
876 |
return NULL;
|
|
877 |
h->iHandles = (TInt*)&h->iLock;
|
|
878 |
h->iHandleCount = 1;
|
|
879 |
}
|
|
880 |
return h;
|
|
881 |
}
|
|
882 |
|
|
883 |
|
|
884 |
/**
|
|
885 |
Constructor where minimum and maximum length of the heap can be defined.
|
|
886 |
It defaults the chunk heap to be created to have use a new local chunk,
|
|
887 |
to have a grow by value of KMinHeapGrowBy, to be unaligned, not to be
|
|
888 |
single threaded and not to have any mode flags set.
|
|
889 |
|
|
890 |
@param aMinLength The minimum length of the heap to be created.
|
|
891 |
@param aMaxLength The maximum length to which the heap to be created can grow.
|
|
892 |
If the supplied value is less than KMinHeapSize, then it
|
|
893 |
is discarded and the value KMinHeapSize used instead.
|
|
894 |
*/
|
|
895 |
EXPORT_C TChunkHeapCreateInfo::TChunkHeapCreateInfo(TInt aMinLength, TInt aMaxLength) :
|
|
896 |
iVersionNumber(EVersion0), iMinLength(aMinLength), iMaxLength(aMaxLength),
|
|
897 |
iAlign(0), iGrowBy(1), iSingleThread(EFalse),
|
|
898 |
iOffset(0), iPaging(EUnspecified), iMode(0), iName(NULL)
|
|
899 |
{
|
|
900 |
}
|
|
901 |
|
|
902 |
|
|
903 |
/**
|
|
904 |
Sets the chunk heap to create a new chunk with the specified name.
|
|
905 |
|
|
906 |
This overriddes any previous call to TChunkHeapCreateInfo::SetNewChunkHeap() or
|
|
907 |
TChunkHeapCreateInfo::SetExistingChunkHeap() for this TChunkHeapCreateInfo object.
|
|
908 |
|
|
909 |
@param aName The name to be given to the chunk heap to be created
|
|
910 |
If NULL, the function constructs a local chunk to host the heap.
|
|
911 |
If not NULL, a pointer to a descriptor containing the name to be
|
|
912 |
assigned to the global chunk hosting the heap.
|
|
913 |
*/
|
|
914 |
EXPORT_C void TChunkHeapCreateInfo::SetCreateChunk(const TDesC* aName)
|
|
915 |
{
|
|
916 |
iName = (TDesC*)aName;
|
|
917 |
iChunk.SetHandle(KNullHandle);
|
|
918 |
}
|
|
919 |
|
|
920 |
|
|
921 |
/**
|
|
922 |
Sets the chunk heap to be created to use the chunk specified.
|
|
923 |
|
|
924 |
This overriddes any previous call to TChunkHeapCreateInfo::SetNewChunkHeap() or
|
|
925 |
TChunkHeapCreateInfo::SetExistingChunkHeap() for this TChunkHeapCreateInfo object.
|
|
926 |
|
|
927 |
@param aChunk A handle to the chunk to use for the heap.
|
|
928 |
*/
|
|
929 |
EXPORT_C void TChunkHeapCreateInfo::SetUseChunk(const RChunk aChunk)
|
|
930 |
{
|
|
931 |
iName = NULL;
|
|
932 |
iChunk = aChunk;
|
|
933 |
}
|
|
934 |
|
|
935 |
|
|
936 |
/**
|
|
937 |
Creates a chunk heap of the type specified by the parameter aCreateInfo.
|
|
938 |
|
|
939 |
@param aCreateInfo A reference to a TChunkHeapCreateInfo object specifying the
|
|
940 |
type of chunk heap to create.
|
|
941 |
|
|
942 |
@return A pointer to the new heap or NULL if the heap could not be created.
|
|
943 |
|
|
944 |
@panic USER 41 if the heap's specified minimum length is greater than the specified maximum length.
|
|
945 |
@panic USER 55 if the heap's specified minimum length is negative.
|
|
946 |
@panic USER 172 if the heap's specified alignment is not a power of 2 or is less than the size of a TAny*.
|
|
947 |
*/
|
|
948 |
EXPORT_C RHeap* UserHeap::ChunkHeap(const TChunkHeapCreateInfo& aCreateInfo)
|
|
949 |
{
|
|
950 |
// aCreateInfo must have been configured to use a new chunk or an exiting chunk.
|
|
951 |
__ASSERT_ALWAYS(!(aCreateInfo.iMode & (TUint32)~EChunkHeapMask), ::Panic(EHeapCreateInvalidMode));
|
|
952 |
RHeap* h = NULL;
|
|
953 |
|
|
954 |
if (aCreateInfo.iChunk.Handle() == KNullHandle)
|
|
955 |
{// A new chunk is to be created for this heap.
|
|
956 |
__ASSERT_ALWAYS(aCreateInfo.iMinLength >= 0, ::Panic(ETHeapMinLengthNegative));
|
|
957 |
__ASSERT_ALWAYS(aCreateInfo.iMaxLength >= aCreateInfo.iMinLength, ::Panic(ETHeapCreateMaxLessThanMin));
|
|
958 |
|
|
959 |
TInt maxLength = aCreateInfo.iMaxLength;
|
|
960 |
if (maxLength < KMinHeapSize)
|
|
961 |
maxLength = KMinHeapSize;
|
|
962 |
|
|
963 |
TChunkCreateInfo chunkInfo;
|
|
964 |
chunkInfo.SetNormal(0, maxLength);
|
|
965 |
chunkInfo.SetOwner((aCreateInfo.iSingleThread)? EOwnerThread : EOwnerProcess);
|
|
966 |
if (aCreateInfo.iName)
|
|
967 |
chunkInfo.SetGlobal(*aCreateInfo.iName);
|
|
968 |
// Set the paging attributes of the chunk.
|
|
969 |
if (aCreateInfo.iPaging == TChunkHeapCreateInfo::EPaged)
|
|
970 |
chunkInfo.SetPaging(TChunkCreateInfo::EPaged);
|
|
971 |
if (aCreateInfo.iPaging == TChunkHeapCreateInfo::EUnpaged)
|
|
972 |
chunkInfo.SetPaging(TChunkCreateInfo::EUnpaged);
|
|
973 |
// Create the chunk.
|
|
974 |
RChunk chunk;
|
|
975 |
if (chunk.Create(chunkInfo) != KErrNone)
|
|
976 |
return NULL;
|
|
977 |
// Create the heap using the new chunk.
|
|
978 |
TUint mode = aCreateInfo.iMode | EChunkHeapDuplicate; // Must duplicate the handle.
|
|
979 |
h = OffsetChunkHeap(chunk, aCreateInfo.iMinLength, aCreateInfo.iOffset,
|
|
980 |
aCreateInfo.iGrowBy, maxLength, aCreateInfo.iAlign,
|
|
981 |
aCreateInfo.iSingleThread, mode);
|
|
982 |
chunk.Close();
|
|
983 |
}
|
|
984 |
else
|
|
985 |
{
|
|
986 |
h = OffsetChunkHeap(aCreateInfo.iChunk, aCreateInfo.iMinLength, aCreateInfo.iOffset,
|
|
987 |
aCreateInfo.iGrowBy, aCreateInfo.iMaxLength, aCreateInfo.iAlign,
|
|
988 |
aCreateInfo.iSingleThread, aCreateInfo.iMode);
|
|
989 |
}
|
|
990 |
return h;
|
|
991 |
}
|
|
992 |
|
|
993 |
|
|
994 |
EXPORT_C RHeap* UserHeap::ChunkHeap(const TDesC* aName, TInt aMinLength, TInt aMaxLength, TInt aGrowBy, TInt aAlign, TBool aSingleThread)
|
|
995 |
/**
|
|
996 |
Creates a heap in a local or global chunk.
|
|
997 |
|
|
998 |
The chunk hosting the heap can be local or global.
|
|
999 |
|
|
1000 |
A local chunk is one which is private to the process creating it and is not
|
|
1001 |
intended for access by other user processes.
|
|
1002 |
A global chunk is one which is visible to all processes.
|
|
1003 |
|
|
1004 |
The hosting chunk is local, if the pointer aName is NULL, otherwise
|
|
1005 |
the hosting chunk is global and the descriptor *aName is assumed to contain
|
|
1006 |
the name to be assigned to it.
|
|
1007 |
|
|
1008 |
Ownership of the host chunk is vested in the current process.
|
|
1009 |
|
|
1010 |
A minimum and a maximum size for the heap can be specified. On successful
|
|
1011 |
return from this function, the size of the heap is at least aMinLength.
|
|
1012 |
If subsequent requests for allocation of memory from the heap cannot be
|
|
1013 |
satisfied by compressing the heap, the size of the heap is extended in
|
|
1014 |
increments of aGrowBy until the request can be satisfied. Attempts to extend
|
|
1015 |
the heap causes the size of the host chunk to be adjusted.
|
|
1016 |
|
|
1017 |
Note that the size of the heap cannot be adjusted by more than aMaxLength.
|
|
1018 |
|
|
1019 |
@param aName If NULL, the function constructs a local chunk to host
|
|
1020 |
the heap.
|
|
1021 |
If not NULL, a pointer to a descriptor containing the name
|
|
1022 |
to be assigned to the global chunk hosting the heap.
|
|
1023 |
@param aMinLength The minimum length of the heap.
|
|
1024 |
@param aMaxLength The maximum length to which the heap can grow.
|
|
1025 |
If the supplied value is less than KMinHeapSize, then it
|
|
1026 |
is discarded and the value KMinHeapSize used instead.
|
|
1027 |
@param aGrowBy The increments to the size of the host chunk. If a value is
|
|
1028 |
not explicitly specified, the value KMinHeapGrowBy is taken
|
|
1029 |
by default
|
|
1030 |
@param aAlign The alignment of heap cells.
|
|
1031 |
@param aSingleThread Indicates whether single threaded or not.
|
|
1032 |
|
|
1033 |
@return A pointer to the new heap or NULL if the heap could not be created.
|
|
1034 |
|
|
1035 |
@panic USER 41 if aMinLength is greater than the supplied value of aMaxLength.
|
|
1036 |
@panic USER 55 if aMinLength is negative.
|
|
1037 |
@panic USER 172 if aAlign is not a power of 2 or is less than the size of a TAny*.
|
|
1038 |
*/
|
|
1039 |
//
|
|
1040 |
// Allocate a Chunk of the requested size and force construction.
|
|
1041 |
//
|
|
1042 |
{
|
|
1043 |
TChunkHeapCreateInfo createInfo(aMinLength, aMaxLength);
|
|
1044 |
createInfo.SetCreateChunk(aName);
|
|
1045 |
createInfo.SetGrowBy(aGrowBy);
|
|
1046 |
createInfo.SetAlignment(aAlign);
|
|
1047 |
createInfo.SetSingleThread(aSingleThread);
|
|
1048 |
return ChunkHeap(createInfo);
|
|
1049 |
}
|
|
1050 |
|
|
1051 |
|
|
1052 |
|
|
1053 |
|
|
1054 |
EXPORT_C RHeap* UserHeap::ChunkHeap(RChunk aChunk, TInt aMinLength, TInt aGrowBy, TInt aMaxLength, TInt aAlign, TBool aSingleThread, TUint32 aMode)
|
|
1055 |
/**
|
|
1056 |
Creates a heap in an existing chunk.
|
|
1057 |
|
|
1058 |
This function is intended to be used to create a heap in a user writable code
|
|
1059 |
chunk as created by a call to RChunk::CreateLocalCode().
|
|
1060 |
This type of heap can be used to hold code fragments from a JIT compiler.
|
|
1061 |
|
|
1062 |
The maximum length to which the heap can grow is the same as
|
|
1063 |
the maximum size of the chunk.
|
|
1064 |
|
|
1065 |
@param aChunk The chunk that will host the heap.
|
|
1066 |
@param aMinLength The minimum length of the heap.
|
|
1067 |
@param aGrowBy The increments to the size of the host chunk.
|
|
1068 |
@param aMaxLength The maximum length to which the heap can grow.
|
|
1069 |
@param aAlign The alignment of heap cells.
|
|
1070 |
@param aSingleThread Indicates whether single threaded or not.
|
|
1071 |
@param aMode Flags controlling the heap creation. This should be set
|
|
1072 |
from one or more of the values in TChunkHeapCreateMode.
|
|
1073 |
|
|
1074 |
@return A pointer to the new heap or NULL if the heap could not be created.
|
|
1075 |
|
|
1076 |
@panic USER 172 if aAlign is not a power of 2 or is less than the size of a TAny*.
|
|
1077 |
*/
|
|
1078 |
//
|
|
1079 |
// Construct a heap in an already existing chunk
|
|
1080 |
//
|
|
1081 |
{
|
|
1082 |
|
|
1083 |
return OffsetChunkHeap(aChunk, aMinLength, 0, aGrowBy, aMaxLength, aAlign, aSingleThread, aMode);
|
|
1084 |
}
|
|
1085 |
|
|
1086 |
|
|
1087 |
|
|
1088 |
|
|
1089 |
EXPORT_C RHeap* UserHeap::OffsetChunkHeap(RChunk aChunk, TInt aMinLength, TInt aOffset, TInt aGrowBy, TInt aMaxLength, TInt aAlign, TBool aSingleThread, TUint32 aMode)
|
|
1090 |
/**
|
|
1091 |
Creates a heap in an existing chunk, offset from the beginning of the chunk.
|
|
1092 |
|
|
1093 |
This function is intended to be used to create a heap where a fixed amount of
|
|
1094 |
additional data must be stored at a known location. The additional data can be
|
|
1095 |
placed at the base address of the chunk, allowing it to be located without
|
|
1096 |
depending on the internals of the heap structure.
|
|
1097 |
|
|
1098 |
The maximum length to which the heap can grow is the maximum size of the chunk,
|
|
1099 |
minus the offset.
|
|
1100 |
|
|
1101 |
@param aChunk The chunk that will host the heap.
|
|
1102 |
@param aMinLength The minimum length of the heap.
|
|
1103 |
@param aOffset The offset from the start of the chunk, to the start of the heap.
|
|
1104 |
@param aGrowBy The increments to the size of the host chunk.
|
|
1105 |
@param aMaxLength The maximum length to which the heap can grow.
|
|
1106 |
@param aAlign The alignment of heap cells.
|
|
1107 |
@param aSingleThread Indicates whether single threaded or not.
|
|
1108 |
@param aMode Flags controlling the heap creation. This should be set
|
|
1109 |
from one or more of the values in TChunkHeapCreateMode.
|
|
1110 |
|
|
1111 |
@return A pointer to the new heap or NULL if the heap could not be created.
|
|
1112 |
|
|
1113 |
@panic USER 172 if aAlign is not a power of 2 or is less than the size of a TAny*.
|
|
1114 |
*/
|
|
1115 |
//
|
|
1116 |
// Construct a heap in an already existing chunk
|
|
1117 |
//
|
|
1118 |
{
|
|
1119 |
|
|
1120 |
TInt page_size;
|
|
1121 |
UserHal::PageSizeInBytes(page_size);
|
|
1122 |
if (!aAlign)
|
|
1123 |
aAlign = RHeap::ECellAlignment;
|
|
1124 |
TInt maxLength = aChunk.MaxSize();
|
|
1125 |
TInt round_up = Max(aAlign, page_size);
|
|
1126 |
TInt min_cell = _ALIGN_UP(Max((TInt)RHeap::EAllocCellSize, (TInt)RHeap::EFreeCellSize), aAlign);
|
|
1127 |
aOffset = _ALIGN_UP(aOffset, 8);
|
|
1128 |
if (aMaxLength && aMaxLength+aOffset<maxLength)
|
|
1129 |
maxLength = _ALIGN_UP(aMaxLength+aOffset, round_up);
|
|
1130 |
__ASSERT_ALWAYS(aMinLength>=0, ::Panic(ETHeapMinLengthNegative));
|
|
1131 |
__ASSERT_ALWAYS(maxLength>=aMinLength, ::Panic(ETHeapCreateMaxLessThanMin));
|
|
1132 |
aMinLength = _ALIGN_UP(Max(aMinLength, (TInt)sizeof(RHeap) + min_cell) + aOffset, round_up);
|
|
1133 |
TInt r=aChunk.Adjust(aMinLength);
|
|
1134 |
if (r!=KErrNone)
|
|
1135 |
return NULL;
|
|
1136 |
|
|
1137 |
RHeap* h = new (aChunk.Base() + aOffset) RHeap(aChunk.Handle(), aOffset, aMinLength, maxLength, aGrowBy, aAlign, aSingleThread);
|
|
1138 |
|
|
1139 |
TBool duplicateLock = EFalse;
|
|
1140 |
if (!aSingleThread)
|
|
1141 |
{
|
|
1142 |
duplicateLock = aMode & EChunkHeapSwitchTo;
|
|
1143 |
if(h->iLock.CreateLocal(duplicateLock ? EOwnerThread : EOwnerProcess)!=KErrNone)
|
|
1144 |
{
|
|
1145 |
h->iChunkHandle = 0;
|
|
1146 |
return NULL;
|
|
1147 |
}
|
|
1148 |
}
|
|
1149 |
|
|
1150 |
if (aMode & EChunkHeapSwitchTo)
|
|
1151 |
User::SwitchHeap(h);
|
|
1152 |
|
|
1153 |
h->iHandles = &h->iChunkHandle;
|
|
1154 |
if (!aSingleThread)
|
|
1155 |
{
|
|
1156 |
// now change the thread-relative chunk/semaphore handles into process-relative handles
|
|
1157 |
h->iHandleCount = 2;
|
|
1158 |
if(duplicateLock)
|
|
1159 |
{
|
|
1160 |
RHandleBase s = h->iLock;
|
|
1161 |
r = h->iLock.Duplicate(RThread());
|
|
1162 |
s.Close();
|
|
1163 |
}
|
|
1164 |
if (r==KErrNone && (aMode & EChunkHeapDuplicate))
|
|
1165 |
{
|
|
1166 |
r = ((RChunk*)&h->iChunkHandle)->Duplicate(RThread());
|
|
1167 |
if (r!=KErrNone)
|
|
1168 |
h->iLock.Close(), h->iChunkHandle=0;
|
|
1169 |
}
|
|
1170 |
}
|
|
1171 |
else
|
|
1172 |
{
|
|
1173 |
h->iHandleCount = 1;
|
|
1174 |
if (aMode & EChunkHeapDuplicate)
|
|
1175 |
r = ((RChunk*)&h->iChunkHandle)->Duplicate(RThread(), EOwnerThread);
|
|
1176 |
}
|
|
1177 |
|
|
1178 |
// return the heap address
|
|
1179 |
return (r==KErrNone) ? h : NULL;
|
|
1180 |
}
|
|
1181 |
|
|
1182 |
|
|
1183 |
|
|
1184 |
#define UserTestDebugMaskBit(bit) (TBool)(UserSvr::DebugMask(bit>>5) & (1<<(bit&31)))
|
|
1185 |
|
|
1186 |
_LIT(KLitDollarHeap,"$HEAP");
|
|
1187 |
EXPORT_C TInt UserHeap::CreateThreadHeap(SStdEpocThreadCreateInfo& aInfo, RHeap*& aHeap, TInt aAlign, TBool aSingleThread)
|
|
1188 |
/**
|
|
1189 |
@internalComponent
|
|
1190 |
*/
|
|
1191 |
//
|
|
1192 |
// Create a user-side heap
|
|
1193 |
//
|
|
1194 |
{
|
|
1195 |
TInt page_size;
|
|
1196 |
UserHal::PageSizeInBytes(page_size);
|
|
1197 |
TInt minLength = _ALIGN_UP(aInfo.iHeapInitialSize, page_size);
|
|
1198 |
TInt maxLength = Max(aInfo.iHeapMaxSize, minLength);
|
|
1199 |
if (UserTestDebugMaskBit(96)) // 96 == KUSERHEAPTRACE in nk_trace.h
|
|
1200 |
aInfo.iFlags |= ETraceHeapAllocs;
|
|
1201 |
|
|
1202 |
// Create the thread's heap chunk.
|
|
1203 |
RChunk c;
|
|
1204 |
TChunkCreateInfo createInfo;
|
|
1205 |
createInfo.SetThreadHeap(0, maxLength, KLitDollarHeap()); // Initialise with no memory committed.
|
|
1206 |
|
|
1207 |
// Set the paging policy of the heap chunk based on the thread's paging policy.
|
|
1208 |
TUint pagingflags = aInfo.iFlags & EThreadCreateFlagPagingMask;
|
|
1209 |
switch (pagingflags)
|
|
1210 |
{
|
|
1211 |
case EThreadCreateFlagPaged:
|
|
1212 |
createInfo.SetPaging(TChunkCreateInfo::EPaged);
|
|
1213 |
break;
|
|
1214 |
case EThreadCreateFlagUnpaged:
|
|
1215 |
createInfo.SetPaging(TChunkCreateInfo::EUnpaged);
|
|
1216 |
break;
|
|
1217 |
case EThreadCreateFlagPagingUnspec:
|
|
1218 |
// Leave the chunk paging policy unspecified so the process's
|
|
1219 |
// paging policy is used.
|
|
1220 |
break;
|
|
1221 |
}
|
|
1222 |
|
|
1223 |
TInt r = c.Create(createInfo);
|
|
1224 |
if (r!=KErrNone)
|
|
1225 |
return r;
|
|
1226 |
|
|
1227 |
aHeap = ChunkHeap(c, minLength, page_size, maxLength, aAlign, aSingleThread, EChunkHeapSwitchTo|EChunkHeapDuplicate);
|
|
1228 |
c.Close();
|
|
1229 |
if (!aHeap)
|
|
1230 |
return KErrNoMemory;
|
|
1231 |
if (aInfo.iFlags & ETraceHeapAllocs)
|
|
1232 |
{
|
|
1233 |
aHeap->iFlags |= RHeap::ETraceAllocs;
|
|
1234 |
BTraceContext8(BTrace::EHeap, BTrace::EHeapCreate,(TUint32)aHeap, RHeap::EAllocCellSize);
|
|
1235 |
TInt handle = aHeap->ChunkHandle();
|
|
1236 |
TInt chunkId = ((RHandleBase&)handle).BTraceId();
|
|
1237 |
BTraceContext8(BTrace::EHeap, BTrace::EHeapChunkCreate, (TUint32)aHeap, chunkId);
|
|
1238 |
}
|
|
1239 |
if (aInfo.iFlags & EMonitorHeapMemory)
|
|
1240 |
aHeap->iFlags |= RHeap::EMonitorMemory;
|
|
1241 |
return KErrNone;
|
|
1242 |
}
|
|
1243 |
|
|
1244 |
#endif // __KERNEL_MODE__
|
|
1245 |
|
|
1246 |
void RHeap::WalkCheckCell(TAny* aPtr, TCellType aType, TAny* aCell, TInt aLen)
|
|
1247 |
{
|
|
1248 |
(void)aCell;
|
|
1249 |
SHeapCellInfo& info = *(SHeapCellInfo*)aPtr;
|
|
1250 |
switch(aType)
|
|
1251 |
{
|
|
1252 |
case EGoodAllocatedCell:
|
|
1253 |
{
|
|
1254 |
++info.iTotalAlloc;
|
|
1255 |
info.iTotalAllocSize += (aLen-EAllocCellSize);
|
|
1256 |
#if defined(_DEBUG)
|
|
1257 |
RHeap& h = *info.iHeap;
|
|
1258 |
if ( ((SDebugCell*)aCell)->nestingLevel == h.iNestingLevel )
|
|
1259 |
{
|
|
1260 |
if (++info.iLevelAlloc==1)
|
|
1261 |
info.iStranded = (SDebugCell*)aCell;
|
|
1262 |
#ifdef __KERNEL_MODE__
|
|
1263 |
if (KDebugNum(KSERVER) || KDebugNum(KTESTFAST))
|
|
1264 |
{
|
|
1265 |
// __KTRACE_OPT(KSERVER,Kern::Printf("LEAKED KERNEL HEAP CELL @ %08x : len=%d", aCell, aLen));
|
|
1266 |
Kern::Printf("LEAKED KERNEL HEAP CELL @ %08x : len=%d", aCell, aLen);
|
|
1267 |
TLinAddr base = ((TLinAddr)aCell)&~0x0f;
|
|
1268 |
TLinAddr end = ((TLinAddr)aCell)+(TLinAddr)aLen;
|
|
1269 |
while(base<end)
|
|
1270 |
{
|
|
1271 |
const TUint32* p = (const TUint32*)base;
|
|
1272 |
Kern::Printf("%08x: %08x %08x %08x %08x", p, p[0], p[1], p[2], p[3]);
|
|
1273 |
base += 16;
|
|
1274 |
}
|
|
1275 |
}
|
|
1276 |
#endif
|
|
1277 |
}
|
|
1278 |
#endif
|
|
1279 |
break;
|
|
1280 |
}
|
|
1281 |
case EGoodFreeCell:
|
|
1282 |
++info.iTotalFree;
|
|
1283 |
break;
|
|
1284 |
case EBadAllocatedCellSize:
|
|
1285 |
HEAP_PANIC(ETHeapBadAllocatedCellSize);
|
|
1286 |
case EBadAllocatedCellAddress:
|
|
1287 |
HEAP_PANIC(ETHeapBadAllocatedCellAddress);
|
|
1288 |
case EBadFreeCellAddress:
|
|
1289 |
HEAP_PANIC(ETHeapBadFreeCellAddress);
|
|
1290 |
case EBadFreeCellSize:
|
|
1291 |
HEAP_PANIC(ETHeapBadFreeCellSize);
|
|
1292 |
default:
|
|
1293 |
HEAP_PANIC(ETHeapWalkBadCellType);
|
|
1294 |
}
|
|
1295 |
}
|
|
1296 |
|
|
1297 |
TInt RHeap::DoCountAllocFree(TInt& aFree)
|
|
1298 |
{
|
|
1299 |
SHeapCellInfo info;
|
|
1300 |
memclr(&info, sizeof(info));
|
|
1301 |
info.iHeap = this;
|
|
1302 |
Walk(&WalkCheckCell, &info);
|
|
1303 |
aFree = info.iTotalFree;
|
|
1304 |
return info.iTotalAlloc;
|
|
1305 |
}
|
|
1306 |
|
|
1307 |
|
|
1308 |
UEXPORT_C TInt RHeap::DebugFunction(TInt aFunc, TAny* a1, TAny* a2)
|
|
1309 |
/**
|
|
1310 |
@internalComponent
|
|
1311 |
*/
|
|
1312 |
{
|
|
1313 |
TInt r = KErrNone;
|
|
1314 |
switch(aFunc)
|
|
1315 |
{
|
|
1316 |
case RAllocator::ECount:
|
|
1317 |
r = DoCountAllocFree(*(TInt*)a1);
|
|
1318 |
break;
|
|
1319 |
case RAllocator::EMarkStart:
|
|
1320 |
__DEBUG_ONLY(DoMarkStart());
|
|
1321 |
break;
|
|
1322 |
case RAllocator::EMarkEnd:
|
|
1323 |
__DEBUG_ONLY( r = DoMarkEnd((TInt)a1) );
|
|
1324 |
break;
|
|
1325 |
case RAllocator::ECheck:
|
|
1326 |
r = DoCheckHeap((SCheckInfo*)a1);
|
|
1327 |
break;
|
|
1328 |
case RAllocator::ESetFail:
|
|
1329 |
__DEBUG_ONLY(DoSetAllocFail((TAllocFail)(TInt)a1, (TInt)a2));
|
|
1330 |
break;
|
|
1331 |
case RAllocator::ESetBurstFail:
|
|
1332 |
#if _DEBUG
|
|
1333 |
{
|
|
1334 |
SRAllocatorBurstFail* fail = (SRAllocatorBurstFail*) a2;
|
|
1335 |
DoSetAllocFail((TAllocFail)(TInt)a1, fail->iRate, fail->iBurst);
|
|
1336 |
}
|
|
1337 |
#endif
|
|
1338 |
break;
|
|
1339 |
|
|
1340 |
case RAllocator::ECheckFailure:
|
|
1341 |
// iRand will be incremented for each EFailNext, EBurstFailNext,
|
|
1342 |
// EDeterministic and EBurstDeterministic failure.
|
|
1343 |
r = iRand;
|
|
1344 |
break;
|
|
1345 |
|
|
1346 |
case RAllocator::ECopyDebugInfo:
|
|
1347 |
{
|
|
1348 |
TInt nestingLevel = ((SDebugCell*)a1)[-1].nestingLevel;
|
|
1349 |
((SDebugCell*)a2)[-1].nestingLevel = nestingLevel;
|
|
1350 |
break;
|
|
1351 |
}
|
|
1352 |
case RHeap::EWalk:
|
|
1353 |
Walk((TWalkFunc)a1, a2);
|
|
1354 |
break;
|
|
1355 |
default:
|
|
1356 |
return KErrNotSupported;
|
|
1357 |
}
|
|
1358 |
return r;
|
|
1359 |
}
|
|
1360 |
|
|
1361 |
|
|
1362 |
|
|
1363 |
|
|
1364 |
void RHeap::Walk(TWalkFunc aFunc, TAny* aPtr)
|
|
1365 |
//
|
|
1366 |
// Walk the heap calling the info function.
|
|
1367 |
//
|
|
1368 |
{
|
|
1369 |
|
|
1370 |
Lock();
|
|
1371 |
SCell* pC = (SCell*)iBase; // allocated cells
|
|
1372 |
SCell* pF = &iFree; // free cells
|
|
1373 |
FOREVER
|
|
1374 |
{
|
|
1375 |
pF = pF->next; // next free cell
|
|
1376 |
if (!pF)
|
|
1377 |
pF = (SCell*)iTop; // to make size checking work
|
|
1378 |
else if ( (TUint8*)pF>=iTop || (pF->next && pF->next<=pF) )
|
|
1379 |
{
|
|
1380 |
if (iFlags & ETraceAllocs)
|
|
1381 |
BTraceContext12(BTrace::EHeap, BTrace::EHeapCorruption, (TUint32)this, (TUint32)pF+EFreeCellSize, 0);
|
|
1382 |
// free cell pointer off the end or going backwards
|
|
1383 |
Unlock();
|
|
1384 |
(*aFunc)(aPtr, EBadFreeCellAddress, pF, 0);
|
|
1385 |
return;
|
|
1386 |
}
|
|
1387 |
else
|
|
1388 |
{
|
|
1389 |
TInt l = pF->len;
|
|
1390 |
if (l<iMinCell || (l & (iAlign-1)))
|
|
1391 |
{
|
|
1392 |
if (iFlags & ETraceAllocs)
|
|
1393 |
BTraceContext12(BTrace::EHeap, BTrace::EHeapCorruption, (TUint32)this, (TUint32)pF+EFreeCellSize, l-EFreeCellSize);
|
|
1394 |
// free cell length invalid
|
|
1395 |
Unlock();
|
|
1396 |
(*aFunc)(aPtr, EBadFreeCellSize, pF, l);
|
|
1397 |
return;
|
|
1398 |
}
|
|
1399 |
}
|
|
1400 |
while (pC!=pF) // walk allocated cells up to next free cell
|
|
1401 |
{
|
|
1402 |
TInt l = pC->len;
|
|
1403 |
if (l<iMinCell || (l & (iAlign-1)))
|
|
1404 |
{
|
|
1405 |
if (iFlags & ETraceAllocs)
|
|
1406 |
BTraceContext12(BTrace::EHeap, BTrace::EHeapCorruption, (TUint32)this, (TUint32)pC+EAllocCellSize, l-EAllocCellSize);
|
|
1407 |
// allocated cell length invalid
|
|
1408 |
Unlock();
|
|
1409 |
(*aFunc)(aPtr, EBadAllocatedCellSize, pC, l);
|
|
1410 |
return;
|
|
1411 |
}
|
|
1412 |
(*aFunc)(aPtr, EGoodAllocatedCell, pC, l);
|
|
1413 |
SCell* pN = __NEXT_CELL(pC);
|
|
1414 |
if (pN > pF)
|
|
1415 |
{
|
|
1416 |
if (iFlags & ETraceAllocs)
|
|
1417 |
BTraceContext12(BTrace::EHeap, BTrace::EHeapCorruption, (TUint32)this, (TUint32)pC+EAllocCellSize, l-EAllocCellSize);
|
|
1418 |
// cell overlaps next free cell
|
|
1419 |
Unlock();
|
|
1420 |
(*aFunc)(aPtr, EBadAllocatedCellAddress, pC, l);
|
|
1421 |
return;
|
|
1422 |
}
|
|
1423 |
pC = pN;
|
|
1424 |
}
|
|
1425 |
if ((TUint8*)pF == iTop)
|
|
1426 |
break; // reached end of heap
|
|
1427 |
pC = __NEXT_CELL(pF); // step to next allocated cell
|
|
1428 |
(*aFunc)(aPtr, EGoodFreeCell, pF, pF->len);
|
|
1429 |
}
|
|
1430 |
Unlock();
|
|
1431 |
}
|
|
1432 |
|
|
1433 |
TInt RHeap::DoCheckHeap(SCheckInfo* aInfo)
|
|
1434 |
{
|
|
1435 |
(void)aInfo;
|
|
1436 |
SHeapCellInfo info;
|
|
1437 |
memclr(&info, sizeof(info));
|
|
1438 |
info.iHeap = this;
|
|
1439 |
Walk(&WalkCheckCell, &info);
|
|
1440 |
#if defined(_DEBUG)
|
|
1441 |
if (!aInfo)
|
|
1442 |
return KErrNone;
|
|
1443 |
TInt expected = aInfo->iCount;
|
|
1444 |
TInt actual = aInfo->iAll ? info.iTotalAlloc : info.iLevelAlloc;
|
|
1445 |
if (actual!=expected && !iTestData)
|
|
1446 |
{
|
|
1447 |
#ifdef __KERNEL_MODE__
|
|
1448 |
Kern::Fault("KERN-ALLOC COUNT", (expected<<16)|actual );
|
|
1449 |
#else
|
|
1450 |
User::Panic(_L("ALLOC COUNT"), (expected<<16)|actual );
|
|
1451 |
#endif
|
|
1452 |
}
|
|
1453 |
#endif
|
|
1454 |
return KErrNone;
|
|
1455 |
}
|
|
1456 |
|
|
1457 |
#ifdef _DEBUG
|
|
1458 |
void RHeap::DoMarkStart()
|
|
1459 |
{
|
|
1460 |
if (iNestingLevel==0)
|
|
1461 |
iAllocCount=0;
|
|
1462 |
iNestingLevel++;
|
|
1463 |
}
|
|
1464 |
|
|
1465 |
TUint32 RHeap::DoMarkEnd(TInt aExpected)
|
|
1466 |
{
|
|
1467 |
if (iNestingLevel==0)
|
|
1468 |
return 0;
|
|
1469 |
SHeapCellInfo info;
|
|
1470 |
SHeapCellInfo* p = iTestData ? (SHeapCellInfo*)iTestData : &info;
|
|
1471 |
memclr(p, sizeof(info));
|
|
1472 |
p->iHeap = this;
|
|
1473 |
Walk(&WalkCheckCell, p);
|
|
1474 |
if (p->iLevelAlloc != aExpected && !iTestData)
|
|
1475 |
return (TUint32)(p->iStranded + 1);
|
|
1476 |
if (--iNestingLevel == 0)
|
|
1477 |
iAllocCount = 0;
|
|
1478 |
return 0;
|
|
1479 |
}
|
|
1480 |
|
|
1481 |
void ResetAllocCellLevels(TAny* aPtr, RHeap::TCellType aType, TAny* aCell, TInt aLen)
|
|
1482 |
{
|
|
1483 |
(void)aPtr;
|
|
1484 |
(void)aLen;
|
|
1485 |
RHeap::SDebugCell* cell = (RHeap::SDebugCell*)aCell;
|
|
1486 |
if (aType == RHeap::EGoodAllocatedCell)
|
|
1487 |
{
|
|
1488 |
cell->nestingLevel = 0;
|
|
1489 |
}
|
|
1490 |
}
|
|
1491 |
|
|
1492 |
void RHeap::DoSetAllocFail(TAllocFail aType, TInt aRate)
|
|
1493 |
{// Default to a burst mode of 1, as aType may be a burst type.
|
|
1494 |
DoSetAllocFail(aType, aRate, 1);
|
|
1495 |
}
|
|
1496 |
|
|
1497 |
// Don't change as the ETHeapBadDebugFailParameter check below and the API
|
|
1498 |
// documentation rely on this being 16 for RHeap.
|
|
1499 |
LOCAL_D const TInt KBurstFailRateShift = 16;
|
|
1500 |
LOCAL_D const TInt KBurstFailRateMask = (1 << KBurstFailRateShift) - 1;
|
|
1501 |
|
|
1502 |
void RHeap::DoSetAllocFail(TAllocFail aType, TInt aRate, TUint aBurst)
|
|
1503 |
{
|
|
1504 |
if (aType==EReset)
|
|
1505 |
{
|
|
1506 |
// reset levels of all allocated cells to 0
|
|
1507 |
// this should prevent subsequent tests failing unnecessarily
|
|
1508 |
iFailed = EFalse; // Reset for ECheckFailure relies on this.
|
|
1509 |
Walk(&ResetAllocCellLevels, NULL);
|
|
1510 |
// reset heap allocation mark as well
|
|
1511 |
iNestingLevel=0;
|
|
1512 |
iAllocCount=0;
|
|
1513 |
aType=ENone;
|
|
1514 |
}
|
|
1515 |
|
|
1516 |
switch (aType)
|
|
1517 |
{
|
|
1518 |
case EBurstRandom:
|
|
1519 |
case EBurstTrueRandom:
|
|
1520 |
case EBurstDeterministic:
|
|
1521 |
case EBurstFailNext:
|
|
1522 |
// If the fail type is a burst type then iFailRate is split in 2:
|
|
1523 |
// the 16 lsbs are the fail rate and the 16 msbs are the burst length.
|
|
1524 |
if (TUint(aRate) > (TUint)KMaxTUint16 || aBurst > KMaxTUint16)
|
|
1525 |
HEAP_PANIC(ETHeapBadDebugFailParameter);
|
|
1526 |
|
|
1527 |
iFailed = EFalse;
|
|
1528 |
iFailType = aType;
|
|
1529 |
iFailRate = (aRate == 0) ? 1 : aRate;
|
|
1530 |
iFailAllocCount = -iFailRate;
|
|
1531 |
iFailRate = iFailRate | (aBurst << KBurstFailRateShift);
|
|
1532 |
break;
|
|
1533 |
|
|
1534 |
default:
|
|
1535 |
iFailed = EFalse;
|
|
1536 |
iFailType = aType;
|
|
1537 |
iFailRate = (aRate == 0) ? 1 : aRate; // A rate of <1 is meaningless
|
|
1538 |
iFailAllocCount = 0;
|
|
1539 |
break;
|
|
1540 |
}
|
|
1541 |
|
|
1542 |
// Set up iRand for either:
|
|
1543 |
// - random seed value, or
|
|
1544 |
// - a count of the number of failures so far.
|
|
1545 |
iRand = 0;
|
|
1546 |
#ifndef __KERNEL_MODE__
|
|
1547 |
switch (iFailType)
|
|
1548 |
{
|
|
1549 |
case ETrueRandom:
|
|
1550 |
case EBurstTrueRandom:
|
|
1551 |
{
|
|
1552 |
TTime time;
|
|
1553 |
time.HomeTime();
|
|
1554 |
TInt64 seed = time.Int64();
|
|
1555 |
iRand = Math::Rand(seed);
|
|
1556 |
break;
|
|
1557 |
}
|
|
1558 |
case ERandom:
|
|
1559 |
case EBurstRandom:
|
|
1560 |
{
|
|
1561 |
TInt64 seed = 12345;
|
|
1562 |
iRand = Math::Rand(seed);
|
|
1563 |
break;
|
|
1564 |
}
|
|
1565 |
default:
|
|
1566 |
break;
|
|
1567 |
}
|
|
1568 |
#endif
|
|
1569 |
}
|
|
1570 |
|
|
1571 |
TBool RHeap::CheckForSimulatedAllocFail()
|
|
1572 |
//
|
|
1573 |
// Check to see if the user has requested simulated alloc failure, and if so possibly
|
|
1574 |
// Return ETrue indicating a failure.
|
|
1575 |
//
|
|
1576 |
{
|
|
1577 |
// For burst mode failures iFailRate is shared
|
|
1578 |
TUint16 rate = (TUint16)(iFailRate & KBurstFailRateMask);
|
|
1579 |
TUint16 burst = (TUint16)(iFailRate >> KBurstFailRateShift);
|
|
1580 |
TBool r = EFalse;
|
|
1581 |
switch (iFailType)
|
|
1582 |
{
|
|
1583 |
#ifndef __KERNEL_MODE__
|
|
1584 |
case ERandom:
|
|
1585 |
case ETrueRandom:
|
|
1586 |
if (++iFailAllocCount>=iFailRate)
|
|
1587 |
{
|
|
1588 |
iFailAllocCount=0;
|
|
1589 |
if (!iFailed) // haven't failed yet after iFailRate allocations so fail now
|
|
1590 |
return(ETrue);
|
|
1591 |
iFailed=EFalse;
|
|
1592 |
}
|
|
1593 |
else
|
|
1594 |
{
|
|
1595 |
if (!iFailed)
|
|
1596 |
{
|
|
1597 |
TInt64 seed=iRand;
|
|
1598 |
iRand=Math::Rand(seed);
|
|
1599 |
if (iRand%iFailRate==0)
|
|
1600 |
{
|
|
1601 |
iFailed=ETrue;
|
|
1602 |
return(ETrue);
|
|
1603 |
}
|
|
1604 |
}
|
|
1605 |
}
|
|
1606 |
break;
|
|
1607 |
|
|
1608 |
case EBurstRandom:
|
|
1609 |
case EBurstTrueRandom:
|
|
1610 |
if (++iFailAllocCount < 0)
|
|
1611 |
{
|
|
1612 |
// We haven't started failing yet so should we now?
|
|
1613 |
TInt64 seed = iRand;
|
|
1614 |
iRand = Math::Rand(seed);
|
|
1615 |
if (iRand % rate == 0)
|
|
1616 |
{// Fail now. Reset iFailAllocCount so we fail burst times
|
|
1617 |
iFailAllocCount = 0;
|
|
1618 |
r = ETrue;
|
|
1619 |
}
|
|
1620 |
}
|
|
1621 |
else
|
|
1622 |
{
|
|
1623 |
if (iFailAllocCount < burst)
|
|
1624 |
{// Keep failing for burst times
|
|
1625 |
r = ETrue;
|
|
1626 |
}
|
|
1627 |
else
|
|
1628 |
{// We've now failed burst times so start again.
|
|
1629 |
iFailAllocCount = -(rate - 1);
|
|
1630 |
}
|
|
1631 |
}
|
|
1632 |
break;
|
|
1633 |
#endif
|
|
1634 |
case EDeterministic:
|
|
1635 |
if (++iFailAllocCount%iFailRate==0)
|
|
1636 |
{
|
|
1637 |
r=ETrue;
|
|
1638 |
iRand++; // Keep count of how many times we have failed
|
|
1639 |
}
|
|
1640 |
break;
|
|
1641 |
|
|
1642 |
case EBurstDeterministic:
|
|
1643 |
// This will fail burst number of times, every rate attempts.
|
|
1644 |
if (++iFailAllocCount >= 0)
|
|
1645 |
{
|
|
1646 |
if (iFailAllocCount == burst - 1)
|
|
1647 |
{// This is the burst time we have failed so make it the last by
|
|
1648 |
// reseting counts so we next fail after rate attempts.
|
|
1649 |
iFailAllocCount = -rate;
|
|
1650 |
}
|
|
1651 |
r = ETrue;
|
|
1652 |
iRand++; // Keep count of how many times we have failed
|
|
1653 |
}
|
|
1654 |
break;
|
|
1655 |
|
|
1656 |
case EFailNext:
|
|
1657 |
if ((++iFailAllocCount%iFailRate)==0)
|
|
1658 |
{
|
|
1659 |
iFailType=ENone;
|
|
1660 |
r=ETrue;
|
|
1661 |
iRand++; // Keep count of how many times we have failed
|
|
1662 |
}
|
|
1663 |
break;
|
|
1664 |
|
|
1665 |
case EBurstFailNext:
|
|
1666 |
if (++iFailAllocCount >= 0)
|
|
1667 |
{
|
|
1668 |
if (iFailAllocCount == burst - 1)
|
|
1669 |
{// This is the burst time we have failed so make it the last.
|
|
1670 |
iFailType = ENone;
|
|
1671 |
}
|
|
1672 |
r = ETrue;
|
|
1673 |
iRand++; // Keep count of how many times we have failed
|
|
1674 |
}
|
|
1675 |
break;
|
|
1676 |
default:
|
|
1677 |
break;
|
|
1678 |
}
|
|
1679 |
return r;
|
|
1680 |
}
|
|
1681 |
#endif // ifdef _DEBUG
|
|
1682 |
|
|
1683 |
UEXPORT_C TInt RHeap::Extension_(TUint aExtensionId, TAny*& a0, TAny* a1)
|
|
1684 |
{
|
|
1685 |
return RAllocator::Extension_(aExtensionId, a0, a1);
|
|
1686 |
}
|
|
1687 |
|
|
1688 |
#if defined(__HEAP_MACHINE_CODED__) && !defined(_DEBUG)
|
|
1689 |
GLDEF_C void RHeap_PanicBadAllocatedCellSize()
|
|
1690 |
{
|
|
1691 |
HEAP_PANIC(ETHeapBadAllocatedCellSize);
|
|
1692 |
}
|
|
1693 |
|
|
1694 |
GLDEF_C void RHeap_PanicBadNextCell()
|
|
1695 |
{
|
|
1696 |
HEAP_PANIC(ETHeapFreeBadNextCell);
|
|
1697 |
}
|
|
1698 |
|
|
1699 |
GLDEF_C void RHeap_PanicBadPrevCell()
|
|
1700 |
{
|
|
1701 |
HEAP_PANIC(ETHeapFreeBadPrevCell);
|
|
1702 |
}
|
|
1703 |
|
|
1704 |
GLDEF_C void RHeap_PanicBadCellAddress()
|
|
1705 |
{
|
|
1706 |
HEAP_PANIC(ETHeapBadCellAddress);
|
|
1707 |
}
|
|
1708 |
#endif
|
|
1709 |
|
|
1710 |
|
|
1711 |
|
|
1712 |
|
|
1713 |
|