189
|
1 |
// Copyright (c) 1994-2009 Nokia Corporation and/or its subsidiary(-ies).
|
|
2 |
// All rights reserved.
|
|
3 |
// This component and the accompanying materials are made available
|
|
4 |
// under the terms of the License "Eclipse Public License v1.0"
|
|
5 |
// which accompanies this distribution, and is available
|
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
|
|
7 |
//
|
|
8 |
// Initial Contributors:
|
|
9 |
// Nokia Corporation - initial contribution.
|
|
10 |
//
|
|
11 |
// Contributors:
|
|
12 |
//
|
|
13 |
// Description:
|
|
14 |
// kernel\eka\common\debugfunction.cpp
|
|
15 |
//
|
|
16 |
//
|
|
17 |
|
|
18 |
#include "common.h"
|
|
19 |
#ifdef __KERNEL_MODE__
|
|
20 |
#include <kernel/kern_priv.h>
|
|
21 |
#endif
|
|
22 |
#include "dla.h"
|
|
23 |
#ifndef __KERNEL_MODE__
|
|
24 |
#include "slab.h"
|
|
25 |
#include "page_alloc.h"
|
|
26 |
#endif
|
|
27 |
#include "heap_hybrid.h"
|
|
28 |
|
|
29 |
#define GM (&iGlobalMallocState)
|
|
30 |
#define __HEAP_CORRUPTED_TRACE(t,p,l) BTraceContext12(BTrace::EHeap, BTrace::EHeapCorruption, (TUint32)t, (TUint32)p, (TUint32)l);
|
|
31 |
#define __HEAP_CORRUPTED_TEST(c,x, p,l) if (!c) { if (iFlags & (EMonitorMemory+ETraceAllocs) ) __HEAP_CORRUPTED_TRACE(this,p,l) HEAP_PANIC(x); }
|
|
32 |
#define __HEAP_CORRUPTED_TEST_STATIC(c,t,x,p,l) if (!c) { if (t && (t->iFlags & (EMonitorMemory+ETraceAllocs) )) __HEAP_CORRUPTED_TRACE(t,p,l) HEAP_PANIC(x); }
|
|
33 |
|
|
34 |
TInt RHybridHeap::DebugFunction(TInt aFunc, TAny* a1, TAny* a2)
|
|
35 |
{
|
|
36 |
TInt r = KErrNone;
|
|
37 |
switch(aFunc)
|
|
38 |
{
|
|
39 |
|
|
40 |
case RAllocator::ECount:
|
|
41 |
struct HeapInfo info;
|
|
42 |
Lock();
|
|
43 |
GetInfo(&info, NULL);
|
|
44 |
*(unsigned*)a1 = info.iFreeN;
|
|
45 |
r = info.iAllocN;
|
|
46 |
Unlock();
|
|
47 |
break;
|
|
48 |
|
|
49 |
case RAllocator::EMarkStart:
|
|
50 |
__DEBUG_ONLY(DoMarkStart());
|
|
51 |
break;
|
|
52 |
|
|
53 |
case RAllocator::EMarkEnd:
|
|
54 |
__DEBUG_ONLY( r = DoMarkEnd((TInt)a1) );
|
|
55 |
break;
|
|
56 |
|
|
57 |
case RAllocator::ECheck:
|
|
58 |
r = DoCheckHeap((SCheckInfo*)a1);
|
|
59 |
break;
|
|
60 |
|
|
61 |
case RAllocator::ESetFail:
|
|
62 |
__DEBUG_ONLY(DoSetAllocFail((TAllocFail)(TInt)a1, (TInt)a2));
|
|
63 |
break;
|
|
64 |
|
|
65 |
case RAllocator::EGetFail:
|
|
66 |
__DEBUG_ONLY(r = iFailType);
|
|
67 |
break;
|
|
68 |
|
|
69 |
case RAllocator::ESetBurstFail:
|
|
70 |
#if _DEBUG
|
|
71 |
{
|
|
72 |
SRAllocatorBurstFail* fail = (SRAllocatorBurstFail*) a2;
|
|
73 |
DoSetAllocFail((TAllocFail)(TInt)a1, fail->iRate, fail->iBurst);
|
|
74 |
}
|
|
75 |
#endif
|
|
76 |
break;
|
|
77 |
|
|
78 |
case RAllocator::ECheckFailure:
|
|
79 |
// iRand will be incremented for each EFailNext, EBurstFailNext,
|
|
80 |
// EDeterministic and EBurstDeterministic failure.
|
|
81 |
r = iRand;
|
|
82 |
break;
|
|
83 |
|
|
84 |
case RAllocator::ECopyDebugInfo:
|
|
85 |
{
|
|
86 |
TInt nestingLevel = ((SDebugCell*)a1)[-1].nestingLevel;
|
|
87 |
((SDebugCell*)a2)[-1].nestingLevel = nestingLevel;
|
|
88 |
break;
|
|
89 |
}
|
|
90 |
|
|
91 |
case RAllocator::EGetSize:
|
|
92 |
{
|
|
93 |
r = iChunkSize - sizeof(RHybridHeap);
|
|
94 |
break;
|
|
95 |
}
|
|
96 |
|
|
97 |
case RAllocator::EGetMaxLength:
|
|
98 |
{
|
|
99 |
r = iMaxLength;
|
|
100 |
break;
|
|
101 |
}
|
|
102 |
|
|
103 |
case RAllocator::EGetBase:
|
|
104 |
{
|
|
105 |
*(TAny**)a1 = iBase;
|
|
106 |
break;
|
|
107 |
}
|
|
108 |
|
|
109 |
case RAllocator::EAlignInteger:
|
|
110 |
{
|
|
111 |
r = _ALIGN_UP((TInt)a1, iAlign);
|
|
112 |
break;
|
|
113 |
}
|
|
114 |
|
|
115 |
case RAllocator::EAlignAddr:
|
|
116 |
{
|
|
117 |
*(TAny**)a2 = (TAny*)_ALIGN_UP((TLinAddr)a1, iAlign);
|
|
118 |
break;
|
|
119 |
}
|
|
120 |
|
|
121 |
case RHybridHeap::EWalk:
|
|
122 |
struct HeapInfo hinfo;
|
|
123 |
SWalkInfo winfo;
|
|
124 |
Lock();
|
|
125 |
winfo.iFunction = (TWalkFunc)a1;
|
|
126 |
winfo.iParam = a2;
|
|
127 |
winfo.iHeap = (RHybridHeap*)this;
|
|
128 |
GetInfo(&hinfo, &winfo);
|
|
129 |
Unlock();
|
|
130 |
break;
|
|
131 |
|
|
132 |
#ifndef __KERNEL_MODE__
|
|
133 |
|
|
134 |
case RHybridHeap::EHybridHeap:
|
|
135 |
{
|
|
136 |
if ( !a1 )
|
|
137 |
return KErrGeneral;
|
|
138 |
STestCommand* cmd = (STestCommand*)a1;
|
|
139 |
switch ( cmd->iCommand )
|
|
140 |
{
|
|
141 |
case EGetConfig:
|
|
142 |
cmd->iConfig.iSlabBits = iSlabConfigBits;
|
|
143 |
cmd->iConfig.iDelayedSlabThreshold = iPageThreshold;
|
|
144 |
cmd->iConfig.iPagePower = iPageThreshold;
|
|
145 |
break;
|
|
146 |
|
|
147 |
case ESetConfig:
|
|
148 |
//
|
|
149 |
// New configuration data for slab and page allocator.
|
|
150 |
// Reset heap to get data into use
|
|
151 |
//
|
|
152 |
#if USE_HYBRID_HEAP
|
|
153 |
iSlabConfigBits = cmd->iConfig.iSlabBits & 0x3fff;
|
|
154 |
iSlabInitThreshold = cmd->iConfig.iDelayedSlabThreshold;
|
|
155 |
iPageThreshold = (cmd->iConfig.iPagePower & 0x1f);
|
|
156 |
Reset();
|
|
157 |
#endif
|
|
158 |
break;
|
|
159 |
|
|
160 |
case EHeapMetaData:
|
|
161 |
cmd->iData = this;
|
|
162 |
break;
|
|
163 |
|
|
164 |
case ETestData:
|
|
165 |
iTestData = cmd->iData;
|
|
166 |
break;
|
|
167 |
|
|
168 |
default:
|
|
169 |
return KErrNotSupported;
|
|
170 |
|
|
171 |
}
|
|
172 |
|
|
173 |
break;
|
|
174 |
}
|
|
175 |
#endif // __KERNEL_MODE
|
|
176 |
|
|
177 |
default:
|
|
178 |
return KErrNotSupported;
|
|
179 |
|
|
180 |
}
|
|
181 |
return r;
|
|
182 |
}
|
|
183 |
|
|
184 |
void RHybridHeap::Walk(SWalkInfo* aInfo, TAny* aBfr, TInt aLth, TCellType aBfrType, TAllocatorType aAllocatorType)
|
|
185 |
{
|
|
186 |
//
|
|
187 |
// This function is always called from RHybridHeap::GetInfo.
|
|
188 |
// Actual walk function is called if SWalkInfo pointer is defined
|
|
189 |
//
|
|
190 |
//
|
|
191 |
if ( aInfo )
|
|
192 |
{
|
|
193 |
#ifdef __KERNEL_MODE__
|
|
194 |
(void)aAllocatorType;
|
|
195 |
#if defined(_DEBUG)
|
|
196 |
if ( aBfrType == EGoodAllocatedCell )
|
|
197 |
aInfo->iFunction(aInfo->iParam, aBfrType, ((TUint8*)aBfr+EDebugHdrSize), (aLth-EDebugHdrSize) );
|
|
198 |
else
|
|
199 |
aInfo->iFunction(aInfo->iParam, aBfrType, aBfr, aLth );
|
|
200 |
#else
|
|
201 |
aInfo->iFunction(aInfo->iParam, aBfrType, aBfr, aLth );
|
|
202 |
#endif
|
|
203 |
|
|
204 |
#else // __KERNEL_MODE__
|
|
205 |
|
|
206 |
if ( aAllocatorType & (EFullSlab + EPartialFullSlab + EEmptySlab + ESlabSpare) )
|
|
207 |
{
|
|
208 |
if ( aInfo->iHeap )
|
|
209 |
{
|
|
210 |
TUint32 dummy;
|
|
211 |
TInt npages;
|
|
212 |
aInfo->iHeap->DoCheckSlab((slab*)aBfr, aAllocatorType);
|
|
213 |
__HEAP_CORRUPTED_TEST_STATIC(aInfo->iHeap->CheckBitmap(Floor(aBfr, PAGESIZE), PAGESIZE, dummy, npages),
|
|
214 |
aInfo->iHeap, ETHeapBadCellAddress, aBfr, aLth);
|
|
215 |
}
|
|
216 |
if ( aAllocatorType & EPartialFullSlab )
|
|
217 |
WalkPartialFullSlab(aInfo, (slab*)aBfr, aBfrType, aLth);
|
|
218 |
else if ( aAllocatorType & EFullSlab )
|
|
219 |
WalkFullSlab(aInfo, (slab*)aBfr, aBfrType, aLth);
|
|
220 |
}
|
|
221 |
#if defined(_DEBUG)
|
|
222 |
else if ( aBfrType == EGoodAllocatedCell )
|
|
223 |
aInfo->iFunction(aInfo->iParam, aBfrType, ((TUint8*)aBfr+EDebugHdrSize), (aLth-EDebugHdrSize) );
|
|
224 |
else
|
|
225 |
aInfo->iFunction(aInfo->iParam, aBfrType, aBfr, aLth );
|
|
226 |
#else
|
|
227 |
else
|
|
228 |
aInfo->iFunction(aInfo->iParam, aBfrType, aBfr, aLth );
|
|
229 |
#endif
|
|
230 |
|
|
231 |
#endif // __KERNEL_MODE
|
|
232 |
}
|
|
233 |
}
|
|
234 |
|
|
235 |
#ifndef __KERNEL_MODE__
|
|
236 |
void RHybridHeap::WalkPartialFullSlab(SWalkInfo* aInfo, slab* aSlab, TCellType /*aBfrType*/, TInt /*aLth*/)
|
|
237 |
{
|
|
238 |
if ( aInfo )
|
|
239 |
{
|
|
240 |
//
|
|
241 |
// Build bitmap of free buffers in the partial full slab
|
|
242 |
//
|
|
243 |
TUint32 bitmap[4];
|
|
244 |
__HEAP_CORRUPTED_TEST_STATIC( (aInfo->iHeap != NULL), aInfo->iHeap, ETHeapBadCellAddress, 0, aSlab);
|
|
245 |
aInfo->iHeap->BuildPartialSlabBitmap(bitmap, aSlab);
|
|
246 |
//
|
|
247 |
// Find used (allocated) buffers from iPartial full slab
|
|
248 |
//
|
|
249 |
TUint32 h = aSlab->iHeader;
|
|
250 |
TUint32 size = SlabHeaderSize(h);
|
|
251 |
TUint32 count = KMaxSlabPayload / size; // Total buffer count in slab
|
|
252 |
TUint32 i = 0;
|
|
253 |
TUint32 ix = 0;
|
|
254 |
TUint32 bit = 1;
|
|
255 |
|
|
256 |
while ( i < count )
|
|
257 |
{
|
|
258 |
|
|
259 |
if ( bitmap[ix] & bit )
|
|
260 |
{
|
|
261 |
aInfo->iFunction(aInfo->iParam, EGoodFreeCell, &aSlab->iPayload[i*size], size );
|
|
262 |
}
|
|
263 |
else
|
|
264 |
{
|
|
265 |
#if defined(_DEBUG)
|
|
266 |
aInfo->iFunction(aInfo->iParam, EGoodAllocatedCell, (&aSlab->iPayload[i*size]+EDebugHdrSize), (size-EDebugHdrSize) );
|
|
267 |
#else
|
|
268 |
aInfo->iFunction(aInfo->iParam, EGoodAllocatedCell, &aSlab->iPayload[i*size], size );
|
|
269 |
#endif
|
|
270 |
}
|
|
271 |
bit <<= 1;
|
|
272 |
if ( bit == 0 )
|
|
273 |
{
|
|
274 |
bit = 1;
|
|
275 |
ix ++;
|
|
276 |
}
|
|
277 |
|
|
278 |
i ++;
|
|
279 |
}
|
|
280 |
}
|
|
281 |
|
|
282 |
}
|
|
283 |
|
|
284 |
void RHybridHeap::WalkFullSlab(SWalkInfo* aInfo, slab* aSlab, TCellType aBfrType, TInt /*aLth*/)
|
|
285 |
{
|
|
286 |
if ( aInfo )
|
|
287 |
{
|
|
288 |
TUint32 h = aSlab->iHeader;
|
|
289 |
TUint32 size = SlabHeaderSize(h);
|
|
290 |
TUint32 count = (SlabHeaderUsedm4(h) + 4) / size;
|
|
291 |
TUint32 i = 0;
|
|
292 |
while ( i < count )
|
|
293 |
{
|
|
294 |
#if defined(_DEBUG)
|
|
295 |
if ( aBfrType == EGoodAllocatedCell )
|
|
296 |
aInfo->iFunction(aInfo->iParam, aBfrType, (&aSlab->iPayload[i*size]+EDebugHdrSize), (size-EDebugHdrSize) );
|
|
297 |
else
|
|
298 |
aInfo->iFunction(aInfo->iParam, aBfrType, &aSlab->iPayload[i*size], size );
|
|
299 |
#else
|
|
300 |
aInfo->iFunction(aInfo->iParam, aBfrType, &aSlab->iPayload[i*size], size );
|
|
301 |
#endif
|
|
302 |
i ++;
|
|
303 |
}
|
|
304 |
}
|
|
305 |
}
|
|
306 |
|
|
307 |
void RHybridHeap::BuildPartialSlabBitmap(TUint32* aBitmap, slab* aSlab, TAny* aBfr)
|
|
308 |
{
|
|
309 |
//
|
|
310 |
// Build a bitmap of free buffers in a partial full slab
|
|
311 |
//
|
|
312 |
TInt i;
|
|
313 |
TUint32 bit = 0;
|
|
314 |
TUint32 index;
|
|
315 |
TUint32 h = aSlab->iHeader;
|
|
316 |
TUint32 used = SlabHeaderUsedm4(h)+4;
|
|
317 |
TUint32 size = SlabHeaderSize(h);
|
|
318 |
TInt count = (KMaxSlabPayload / size);
|
|
319 |
TInt free_count = count - (used / size); // Total free buffer count in slab
|
|
320 |
aBitmap[0] = 0, aBitmap[1] = 0, aBitmap[2] = 0, aBitmap[3] = 0;
|
|
321 |
TUint32 offs = (h & 0xff) << 2;
|
|
322 |
|
|
323 |
//
|
|
324 |
// Process first buffer in partial slab free buffer chain
|
|
325 |
//
|
|
326 |
while ( offs )
|
|
327 |
{
|
|
328 |
unsigned char* p = (unsigned char*)Offset(aSlab, offs);
|
|
329 |
__HEAP_CORRUPTED_TEST( (sizeof(slabhdr) <= offs), ETHeapBadCellAddress, p, aSlab);
|
|
330 |
offs -= sizeof(slabhdr);
|
|
331 |
__HEAP_CORRUPTED_TEST( (offs % size == 0), ETHeapBadCellAddress, p, aSlab);
|
|
332 |
index = (offs / size); // Bit index in bitmap
|
|
333 |
i = 0;
|
|
334 |
while ( i < 4 )
|
|
335 |
{
|
|
336 |
if ( index < 32 )
|
|
337 |
{
|
|
338 |
bit = (1 << index);
|
|
339 |
break;
|
|
340 |
}
|
|
341 |
index -= 32;
|
|
342 |
i ++;
|
|
343 |
}
|
|
344 |
|
|
345 |
__HEAP_CORRUPTED_TEST( ((aBitmap[i] & bit) == 0), ETHeapBadCellAddress, p, aSlab); // Buffer already in chain
|
|
346 |
|
|
347 |
aBitmap[i] |= bit;
|
|
348 |
free_count --;
|
|
349 |
offs = ((unsigned)*p) << 2; // Next in free chain
|
|
350 |
}
|
|
351 |
|
|
352 |
__HEAP_CORRUPTED_TEST( (free_count >= 0), ETHeapBadCellAddress, aBfr, aSlab); // free buffer count/size mismatch
|
|
353 |
//
|
|
354 |
// Process next rest of the free buffers which are in the
|
|
355 |
// wilderness (at end of the slab)
|
|
356 |
//
|
|
357 |
index = count - 1;
|
|
358 |
i = index / 32;
|
|
359 |
index = index % 32;
|
|
360 |
while ( free_count && (i >= 0))
|
|
361 |
{
|
|
362 |
bit = (1 << index);
|
|
363 |
__HEAP_CORRUPTED_TEST( ((aBitmap[i] & bit) == 0), ETHeapBadCellAddress, aBfr, aSlab); // Buffer already in chain
|
|
364 |
aBitmap[i] |= bit;
|
|
365 |
if ( index )
|
|
366 |
index --;
|
|
367 |
else
|
|
368 |
{
|
|
369 |
index = 31;
|
|
370 |
i --;
|
|
371 |
}
|
|
372 |
free_count --;
|
|
373 |
}
|
|
374 |
|
|
375 |
if ( aBfr ) // Assure that specified buffer does NOT exist in partial slab free buffer chain
|
|
376 |
{
|
|
377 |
offs = LowBits(aBfr, SLABSIZE);
|
|
378 |
__HEAP_CORRUPTED_TEST( (sizeof(slabhdr) <= offs), ETHeapBadCellAddress, aBfr, aSlab);
|
|
379 |
offs -= sizeof(slabhdr);
|
|
380 |
__HEAP_CORRUPTED_TEST( ((offs % size) == 0), ETHeapBadCellAddress, aBfr, aSlab);
|
|
381 |
index = (offs / size); // Bit index in bitmap
|
|
382 |
i = 0;
|
|
383 |
while ( i < 4 )
|
|
384 |
{
|
|
385 |
if ( index < 32 )
|
|
386 |
{
|
|
387 |
bit = (1 << index);
|
|
388 |
break;
|
|
389 |
}
|
|
390 |
index -= 32;
|
|
391 |
i ++;
|
|
392 |
}
|
|
393 |
__HEAP_CORRUPTED_TEST( ((aBitmap[i] & bit) == 0), ETHeapBadCellAddress, aBfr, aSlab); // Buffer already in chain
|
|
394 |
}
|
|
395 |
}
|
|
396 |
|
|
397 |
#endif // __KERNEL_MODE__
|
|
398 |
|
|
399 |
void RHybridHeap::WalkCheckCell(TAny* aPtr, TCellType aType, TAny* aCell, TInt aLen)
|
|
400 |
{
|
|
401 |
(void)aCell;
|
|
402 |
SHeapCellInfo& info = *(SHeapCellInfo*)aPtr;
|
|
403 |
switch(aType)
|
|
404 |
{
|
|
405 |
case EGoodAllocatedCell:
|
|
406 |
{
|
|
407 |
++info.iTotalAlloc;
|
|
408 |
info.iTotalAllocSize += aLen;
|
|
409 |
#if defined(_DEBUG)
|
|
410 |
RHybridHeap& h = *info.iHeap;
|
|
411 |
SDebugCell* DbgCell = (SDebugCell*)((TUint8*)aCell-EDebugHdrSize);
|
|
412 |
if ( DbgCell->nestingLevel == h.iNestingLevel )
|
|
413 |
{
|
|
414 |
if (++info.iLevelAlloc==1)
|
|
415 |
info.iStranded = DbgCell;
|
|
416 |
#ifdef __KERNEL_MODE__
|
|
417 |
if (KDebugNum(KSERVER) || KDebugNum(KTESTFAST))
|
|
418 |
{
|
|
419 |
Kern::Printf("LEAKED KERNEL HEAP CELL @ %08x : len=%d", aCell, aLen);
|
|
420 |
TLinAddr base = ((TLinAddr)aCell)&~0x0f;
|
|
421 |
TLinAddr end = ((TLinAddr)aCell)+(TLinAddr)aLen;
|
|
422 |
while(base<end)
|
|
423 |
{
|
|
424 |
const TUint32* p = (const TUint32*)base;
|
|
425 |
Kern::Printf("%08x: %08x %08x %08x %08x", p, p[0], p[1], p[2], p[3]);
|
|
426 |
base += 16;
|
|
427 |
}
|
|
428 |
}
|
|
429 |
#endif
|
|
430 |
}
|
|
431 |
#endif
|
|
432 |
break;
|
|
433 |
}
|
|
434 |
case EGoodFreeCell:
|
|
435 |
++info.iTotalFree;
|
|
436 |
break;
|
|
437 |
case EBadAllocatedCellSize:
|
|
438 |
HEAP_PANIC(ETHeapBadAllocatedCellSize);
|
|
439 |
case EBadAllocatedCellAddress:
|
|
440 |
HEAP_PANIC(ETHeapBadAllocatedCellAddress);
|
|
441 |
case EBadFreeCellAddress:
|
|
442 |
HEAP_PANIC(ETHeapBadFreeCellAddress);
|
|
443 |
case EBadFreeCellSize:
|
|
444 |
HEAP_PANIC(ETHeapBadFreeCellSize);
|
|
445 |
default:
|
|
446 |
HEAP_PANIC(ETHeapWalkBadCellType);
|
|
447 |
}
|
|
448 |
}
|
|
449 |
|
|
450 |
|
|
451 |
TInt RHybridHeap::DoCheckHeap(SCheckInfo* aInfo)
|
|
452 |
{
|
|
453 |
(void)aInfo;
|
|
454 |
SHeapCellInfo info;
|
|
455 |
memclr(&info, sizeof(info));
|
|
456 |
info.iHeap = this;
|
|
457 |
struct HeapInfo hinfo;
|
|
458 |
SWalkInfo winfo;
|
|
459 |
Lock();
|
|
460 |
DoCheckMallocState(GM); // Check DL heap internal structure
|
|
461 |
#ifndef __KERNEL_MODE__
|
|
462 |
TUint32 dummy;
|
|
463 |
TInt npages;
|
|
464 |
__HEAP_CORRUPTED_TEST(CheckBitmap(NULL, 0, dummy, npages), ETHeapBadCellAddress, this, 0); // Check page allocator buffers
|
|
465 |
DoCheckSlabTrees();
|
|
466 |
DoCheckCommittedSize(npages, GM);
|
|
467 |
#endif
|
|
468 |
winfo.iFunction = WalkCheckCell;
|
|
469 |
winfo.iParam = &info;
|
|
470 |
winfo.iHeap = (RHybridHeap*)this;
|
|
471 |
GetInfo(&hinfo, &winfo);
|
|
472 |
Unlock();
|
|
473 |
|
|
474 |
#if defined(_DEBUG)
|
|
475 |
if (!aInfo)
|
|
476 |
return KErrNone;
|
|
477 |
TInt expected = aInfo->iCount;
|
|
478 |
TInt actual = aInfo->iAll ? info.iTotalAlloc : info.iLevelAlloc;
|
|
479 |
if (actual!=expected && !iTestData)
|
|
480 |
{
|
|
481 |
#ifdef __KERNEL_MODE__
|
|
482 |
Kern::Fault("KERN-ALLOC COUNT", (expected<<16)|actual );
|
|
483 |
#else
|
|
484 |
User::Panic(_L("ALLOC COUNT"), (expected<<16)|actual );
|
|
485 |
#endif
|
|
486 |
}
|
|
487 |
#endif
|
|
488 |
return KErrNone;
|
|
489 |
}
|
|
490 |
|
|
491 |
#ifdef _DEBUG
|
|
492 |
void RHybridHeap::DoMarkStart()
|
|
493 |
{
|
|
494 |
if (iNestingLevel==0)
|
|
495 |
iAllocCount=0;
|
|
496 |
iNestingLevel++;
|
|
497 |
}
|
|
498 |
|
|
499 |
TUint32 RHybridHeap::DoMarkEnd(TInt aExpected)
|
|
500 |
{
|
|
501 |
if (iNestingLevel==0)
|
|
502 |
return 0;
|
|
503 |
SHeapCellInfo info;
|
|
504 |
SHeapCellInfo* p = iTestData ? (SHeapCellInfo*)iTestData : &info;
|
|
505 |
memclr(p, sizeof(info));
|
|
506 |
p->iHeap = this;
|
|
507 |
struct HeapInfo hinfo;
|
|
508 |
SWalkInfo winfo;
|
|
509 |
Lock();
|
|
510 |
winfo.iFunction = WalkCheckCell;
|
|
511 |
winfo.iParam = p;
|
|
512 |
winfo.iHeap = (RHybridHeap*)this;
|
|
513 |
GetInfo(&hinfo, &winfo);
|
|
514 |
Unlock();
|
|
515 |
|
|
516 |
if (p->iLevelAlloc != aExpected && !iTestData)
|
|
517 |
return (TUint32)(p->iStranded + 1);
|
|
518 |
if (--iNestingLevel == 0)
|
|
519 |
iAllocCount = 0;
|
|
520 |
return 0;
|
|
521 |
}
|
|
522 |
|
|
523 |
void RHybridHeap::DoSetAllocFail(TAllocFail aType, TInt aRate)
|
|
524 |
{// Default to a burst mode of 1, as aType may be a burst type.
|
|
525 |
DoSetAllocFail(aType, aRate, 1);
|
|
526 |
}
|
|
527 |
|
|
528 |
void ResetAllocCellLevels(TAny* aPtr, RHybridHeap::TCellType aType, TAny* aCell, TInt aLen)
|
|
529 |
{
|
|
530 |
(void)aPtr;
|
|
531 |
(void)aLen;
|
|
532 |
|
|
533 |
if (aType == RHybridHeap::EGoodAllocatedCell)
|
|
534 |
{
|
|
535 |
RHybridHeap::SDebugCell* DbgCell = (RHybridHeap::SDebugCell*)((TUint8*)aCell-RHeap::EDebugHdrSize);
|
|
536 |
DbgCell->nestingLevel = 0;
|
|
537 |
}
|
|
538 |
}
|
|
539 |
|
|
540 |
// Don't change as the ETHeapBadDebugFailParameter check below and the API
|
|
541 |
// documentation rely on this being 16 for RHybridHeap.
|
|
542 |
LOCAL_D const TInt KBurstFailRateShift = 16;
|
|
543 |
LOCAL_D const TInt KBurstFailRateMask = (1 << KBurstFailRateShift) - 1;
|
|
544 |
|
|
545 |
void RHybridHeap::DoSetAllocFail(TAllocFail aType, TInt aRate, TUint aBurst)
|
|
546 |
{
|
|
547 |
if (aType==EReset)
|
|
548 |
{
|
|
549 |
// reset levels of all allocated cells to 0
|
|
550 |
// this should prevent subsequent tests failing unnecessarily
|
|
551 |
iFailed = EFalse; // Reset for ECheckFailure relies on this.
|
|
552 |
struct HeapInfo hinfo;
|
|
553 |
SWalkInfo winfo;
|
|
554 |
Lock();
|
|
555 |
winfo.iFunction = (TWalkFunc)&ResetAllocCellLevels;
|
|
556 |
winfo.iParam = NULL;
|
|
557 |
winfo.iHeap = (RHybridHeap*)this;
|
|
558 |
GetInfo(&hinfo, &winfo);
|
|
559 |
Unlock();
|
|
560 |
// reset heap allocation mark as well
|
|
561 |
iNestingLevel=0;
|
|
562 |
iAllocCount=0;
|
|
563 |
aType=ENone;
|
|
564 |
}
|
|
565 |
|
|
566 |
switch (aType)
|
|
567 |
{
|
|
568 |
case EBurstRandom:
|
|
569 |
case EBurstTrueRandom:
|
|
570 |
case EBurstDeterministic:
|
|
571 |
case EBurstFailNext:
|
|
572 |
// If the fail type is a burst type then iFailRate is split in 2:
|
|
573 |
// the 16 lsbs are the fail rate and the 16 msbs are the burst length.
|
|
574 |
if (TUint(aRate) > (TUint)KMaxTUint16 || aBurst > KMaxTUint16)
|
|
575 |
HEAP_PANIC(ETHeapBadDebugFailParameter);
|
|
576 |
|
|
577 |
iFailed = EFalse;
|
|
578 |
iFailType = aType;
|
|
579 |
iFailRate = (aRate == 0) ? 1 : aRate;
|
|
580 |
iFailAllocCount = -iFailRate;
|
|
581 |
iFailRate = iFailRate | (aBurst << KBurstFailRateShift);
|
|
582 |
break;
|
|
583 |
|
|
584 |
default:
|
|
585 |
iFailed = EFalse;
|
|
586 |
iFailType = aType;
|
|
587 |
iFailRate = (aRate == 0) ? 1 : aRate; // A rate of <1 is meaningless
|
|
588 |
iFailAllocCount = 0;
|
|
589 |
break;
|
|
590 |
}
|
|
591 |
|
|
592 |
// Set up iRand for either:
|
|
593 |
// - random seed value, or
|
|
594 |
// - a count of the number of failures so far.
|
|
595 |
iRand = 0;
|
|
596 |
#ifndef __KERNEL_MODE__
|
|
597 |
switch (iFailType)
|
|
598 |
{
|
|
599 |
case ETrueRandom:
|
|
600 |
case EBurstTrueRandom:
|
|
601 |
{
|
|
602 |
TTime time;
|
|
603 |
time.HomeTime();
|
|
604 |
TInt64 seed = time.Int64();
|
|
605 |
iRand = Math::Rand(seed);
|
|
606 |
break;
|
|
607 |
}
|
|
608 |
case ERandom:
|
|
609 |
case EBurstRandom:
|
|
610 |
{
|
|
611 |
TInt64 seed = 12345;
|
|
612 |
iRand = Math::Rand(seed);
|
|
613 |
break;
|
|
614 |
}
|
|
615 |
default:
|
|
616 |
break;
|
|
617 |
}
|
|
618 |
#endif
|
|
619 |
}
|
|
620 |
|
|
621 |
TBool RHybridHeap::CheckForSimulatedAllocFail()
|
|
622 |
//
|
|
623 |
// Check to see if the user has requested simulated alloc failure, and if so possibly
|
|
624 |
// Return ETrue indicating a failure.
|
|
625 |
//
|
|
626 |
{
|
|
627 |
// For burst mode failures iFailRate is shared
|
|
628 |
TUint16 rate = (TUint16)(iFailRate & KBurstFailRateMask);
|
|
629 |
TUint16 burst = (TUint16)(iFailRate >> KBurstFailRateShift);
|
|
630 |
TBool r = EFalse;
|
|
631 |
switch (iFailType)
|
|
632 |
{
|
|
633 |
#ifndef __KERNEL_MODE__
|
|
634 |
case ERandom:
|
|
635 |
case ETrueRandom:
|
|
636 |
if (++iFailAllocCount>=iFailRate)
|
|
637 |
{
|
|
638 |
iFailAllocCount=0;
|
|
639 |
if (!iFailed) // haven't failed yet after iFailRate allocations so fail now
|
|
640 |
return(ETrue);
|
|
641 |
iFailed=EFalse;
|
|
642 |
}
|
|
643 |
else
|
|
644 |
{
|
|
645 |
if (!iFailed)
|
|
646 |
{
|
|
647 |
TInt64 seed=iRand;
|
|
648 |
iRand=Math::Rand(seed);
|
|
649 |
if (iRand%iFailRate==0)
|
|
650 |
{
|
|
651 |
iFailed=ETrue;
|
|
652 |
return(ETrue);
|
|
653 |
}
|
|
654 |
}
|
|
655 |
}
|
|
656 |
break;
|
|
657 |
|
|
658 |
case EBurstRandom:
|
|
659 |
case EBurstTrueRandom:
|
|
660 |
if (++iFailAllocCount < 0)
|
|
661 |
{
|
|
662 |
// We haven't started failing yet so should we now?
|
|
663 |
TInt64 seed = iRand;
|
|
664 |
iRand = Math::Rand(seed);
|
|
665 |
if (iRand % rate == 0)
|
|
666 |
{// Fail now. Reset iFailAllocCount so we fail burst times
|
|
667 |
iFailAllocCount = 0;
|
|
668 |
r = ETrue;
|
|
669 |
}
|
|
670 |
}
|
|
671 |
else
|
|
672 |
{
|
|
673 |
if (iFailAllocCount < burst)
|
|
674 |
{// Keep failing for burst times
|
|
675 |
r = ETrue;
|
|
676 |
}
|
|
677 |
else
|
|
678 |
{// We've now failed burst times so start again.
|
|
679 |
iFailAllocCount = -(rate - 1);
|
|
680 |
}
|
|
681 |
}
|
|
682 |
break;
|
|
683 |
#endif
|
|
684 |
case EDeterministic:
|
|
685 |
if (++iFailAllocCount%iFailRate==0)
|
|
686 |
{
|
|
687 |
r=ETrue;
|
|
688 |
iRand++; // Keep count of how many times we have failed
|
|
689 |
}
|
|
690 |
break;
|
|
691 |
|
|
692 |
case EBurstDeterministic:
|
|
693 |
// This will fail burst number of times, every rate attempts.
|
|
694 |
if (++iFailAllocCount >= 0)
|
|
695 |
{
|
|
696 |
if (iFailAllocCount == burst - 1)
|
|
697 |
{// This is the burst time we have failed so make it the last by
|
|
698 |
// reseting counts so we next fail after rate attempts.
|
|
699 |
iFailAllocCount = -rate;
|
|
700 |
}
|
|
701 |
r = ETrue;
|
|
702 |
iRand++; // Keep count of how many times we have failed
|
|
703 |
}
|
|
704 |
break;
|
|
705 |
|
|
706 |
case EFailNext:
|
|
707 |
if ((++iFailAllocCount%iFailRate)==0)
|
|
708 |
{
|
|
709 |
iFailType=ENone;
|
|
710 |
r=ETrue;
|
|
711 |
iRand++; // Keep count of how many times we have failed
|
|
712 |
}
|
|
713 |
break;
|
|
714 |
|
|
715 |
case EBurstFailNext:
|
|
716 |
if (++iFailAllocCount >= 0)
|
|
717 |
{
|
|
718 |
if (iFailAllocCount == burst - 1)
|
|
719 |
{// This is the burst time we have failed so make it the last.
|
|
720 |
iFailType = ENone;
|
|
721 |
}
|
|
722 |
r = ETrue;
|
|
723 |
iRand++; // Keep count of how many times we have failed
|
|
724 |
}
|
|
725 |
break;
|
|
726 |
|
|
727 |
default:
|
|
728 |
break;
|
|
729 |
}
|
|
730 |
return r;
|
|
731 |
}
|
|
732 |
|
|
733 |
#endif // DEBUG
|
|
734 |
|
|
735 |
//
|
|
736 |
// Methods for Doug Lea allocator detailed check
|
|
737 |
//
|
|
738 |
|
|
739 |
void RHybridHeap::DoCheckAnyChunk(mstate m, mchunkptr p)
|
|
740 |
{
|
|
741 |
__HEAP_CORRUPTED_TEST(((IS_ALIGNED(CHUNK2MEM(p))) || (p->iHead == FENCEPOST_HEAD)), ETHeapBadCellAddress, p, 0);
|
|
742 |
(void)m;
|
|
743 |
}
|
|
744 |
|
|
745 |
/* Check properties of iTop chunk */
|
|
746 |
void RHybridHeap::DoCheckTopChunk(mstate m, mchunkptr p)
|
|
747 |
{
|
|
748 |
msegmentptr sp = &m->iSeg;
|
|
749 |
size_t sz = CHUNKSIZE(p);
|
|
750 |
__HEAP_CORRUPTED_TEST((sp != 0), ETHeapBadCellAddress, p, 0);
|
|
751 |
__HEAP_CORRUPTED_TEST(((IS_ALIGNED(CHUNK2MEM(p))) || (p->iHead == FENCEPOST_HEAD)), ETHeapBadCellAddress, p,0);
|
|
752 |
__HEAP_CORRUPTED_TEST((sz == m->iTopSize), ETHeapBadCellAddress,p,0);
|
|
753 |
__HEAP_CORRUPTED_TEST((sz > 0), ETHeapBadCellAddress,p,0);
|
|
754 |
__HEAP_CORRUPTED_TEST((sz == ((sp->iBase + sp->iSize) - (TUint8*)p) - TOP_FOOT_SIZE), ETHeapBadCellAddress,p,0);
|
|
755 |
__HEAP_CORRUPTED_TEST((PINUSE(p)), ETHeapBadCellAddress,p,0);
|
|
756 |
__HEAP_CORRUPTED_TEST((!NEXT_PINUSE(p)), ETHeapBadCellAddress,p,0);
|
|
757 |
}
|
|
758 |
|
|
759 |
/* Check properties of inuse chunks */
|
|
760 |
void RHybridHeap::DoCheckInuseChunk(mstate m, mchunkptr p)
|
|
761 |
{
|
|
762 |
DoCheckAnyChunk(m, p);
|
|
763 |
__HEAP_CORRUPTED_TEST((CINUSE(p)), ETHeapBadCellAddress,p,0);
|
|
764 |
__HEAP_CORRUPTED_TEST((NEXT_PINUSE(p)), ETHeapBadCellAddress,p,0);
|
|
765 |
/* If not PINUSE and not mmapped, previous chunk has OK offset */
|
|
766 |
__HEAP_CORRUPTED_TEST((PINUSE(p) || NEXT_CHUNK(PREV_CHUNK(p)) == p), ETHeapBadCellAddress,p,0);
|
|
767 |
}
|
|
768 |
|
|
769 |
/* Check properties of free chunks */
|
|
770 |
void RHybridHeap::DoCheckFreeChunk(mstate m, mchunkptr p)
|
|
771 |
{
|
|
772 |
size_t sz = p->iHead & ~(PINUSE_BIT|CINUSE_BIT);
|
|
773 |
mchunkptr next = CHUNK_PLUS_OFFSET(p, sz);
|
|
774 |
DoCheckAnyChunk(m, p);
|
|
775 |
__HEAP_CORRUPTED_TEST((!CINUSE(p)), ETHeapBadCellAddress,p,0);
|
|
776 |
__HEAP_CORRUPTED_TEST((!NEXT_PINUSE(p)), ETHeapBadCellAddress,p,0);
|
|
777 |
if (p != m->iDv && p != m->iTop)
|
|
778 |
{
|
|
779 |
if (sz >= MIN_CHUNK_SIZE)
|
|
780 |
{
|
|
781 |
__HEAP_CORRUPTED_TEST(((sz & CHUNK_ALIGN_MASK) == 0), ETHeapBadCellAddress,p,0);
|
|
782 |
__HEAP_CORRUPTED_TEST((IS_ALIGNED(CHUNK2MEM(p))), ETHeapBadCellAddress,p,0);
|
|
783 |
__HEAP_CORRUPTED_TEST((next->iPrevFoot == sz), ETHeapBadCellAddress,p,0);
|
|
784 |
__HEAP_CORRUPTED_TEST((PINUSE(p)), ETHeapBadCellAddress,p,0);
|
|
785 |
__HEAP_CORRUPTED_TEST( (next == m->iTop || CINUSE(next)), ETHeapBadCellAddress,p,0);
|
|
786 |
__HEAP_CORRUPTED_TEST((p->iFd->iBk == p), ETHeapBadCellAddress,p,0);
|
|
787 |
__HEAP_CORRUPTED_TEST((p->iBk->iFd == p), ETHeapBadCellAddress,p,0);
|
|
788 |
}
|
|
789 |
else /* markers are always of size SIZE_T_SIZE */
|
|
790 |
__HEAP_CORRUPTED_TEST((sz == SIZE_T_SIZE), ETHeapBadCellAddress,p,0);
|
|
791 |
}
|
|
792 |
}
|
|
793 |
|
|
794 |
/* Check properties of malloced chunks at the point they are malloced */
|
|
795 |
void RHybridHeap::DoCheckMallocedChunk(mstate m, void* mem, size_t s)
|
|
796 |
{
|
|
797 |
if (mem != 0)
|
|
798 |
{
|
|
799 |
mchunkptr p = MEM2CHUNK(mem);
|
|
800 |
size_t sz = p->iHead & ~(PINUSE_BIT|CINUSE_BIT);
|
|
801 |
DoCheckInuseChunk(m, p);
|
|
802 |
__HEAP_CORRUPTED_TEST(((sz & CHUNK_ALIGN_MASK) == 0), ETHeapBadCellAddress,p,0);
|
|
803 |
__HEAP_CORRUPTED_TEST((sz >= MIN_CHUNK_SIZE), ETHeapBadCellAddress,p,0);
|
|
804 |
__HEAP_CORRUPTED_TEST((sz >= s), ETHeapBadCellAddress,p,0);
|
|
805 |
/* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */
|
|
806 |
__HEAP_CORRUPTED_TEST((sz < (s + MIN_CHUNK_SIZE)), ETHeapBadCellAddress,p,0);
|
|
807 |
}
|
|
808 |
}
|
|
809 |
|
|
810 |
/* Check a tree and its subtrees. */
|
|
811 |
void RHybridHeap::DoCheckTree(mstate m, tchunkptr t)
|
|
812 |
{
|
|
813 |
tchunkptr head = 0;
|
|
814 |
tchunkptr u = t;
|
|
815 |
bindex_t tindex = t->iIndex;
|
|
816 |
size_t tsize = CHUNKSIZE(t);
|
|
817 |
bindex_t idx;
|
|
818 |
DoComputeTreeIndex(tsize, idx);
|
|
819 |
__HEAP_CORRUPTED_TEST((tindex == idx), ETHeapBadCellAddress,u,0);
|
|
820 |
__HEAP_CORRUPTED_TEST((tsize >= MIN_LARGE_SIZE), ETHeapBadCellAddress,u,0);
|
|
821 |
__HEAP_CORRUPTED_TEST((tsize >= MINSIZE_FOR_TREE_INDEX(idx)), ETHeapBadCellAddress,u,0);
|
|
822 |
__HEAP_CORRUPTED_TEST(((idx == NTREEBINS-1) || (tsize < MINSIZE_FOR_TREE_INDEX((idx+1)))), ETHeapBadCellAddress,u,0);
|
|
823 |
|
|
824 |
do
|
|
825 |
{ /* traverse through chain of same-sized nodes */
|
|
826 |
DoCheckAnyChunk(m, ((mchunkptr)u));
|
|
827 |
__HEAP_CORRUPTED_TEST((u->iIndex == tindex), ETHeapBadCellAddress,u,0);
|
|
828 |
__HEAP_CORRUPTED_TEST((CHUNKSIZE(u) == tsize), ETHeapBadCellAddress,u,0);
|
|
829 |
__HEAP_CORRUPTED_TEST((!CINUSE(u)), ETHeapBadCellAddress,u,0);
|
|
830 |
__HEAP_CORRUPTED_TEST((!NEXT_PINUSE(u)), ETHeapBadCellAddress,u,0);
|
|
831 |
__HEAP_CORRUPTED_TEST((u->iFd->iBk == u), ETHeapBadCellAddress,u,0);
|
|
832 |
__HEAP_CORRUPTED_TEST((u->iBk->iFd == u), ETHeapBadCellAddress,u,0);
|
|
833 |
if (u->iParent == 0)
|
|
834 |
{
|
|
835 |
__HEAP_CORRUPTED_TEST((u->iChild[0] == 0), ETHeapBadCellAddress,u,0);
|
|
836 |
__HEAP_CORRUPTED_TEST((u->iChild[1] == 0), ETHeapBadCellAddress,u,0);
|
|
837 |
}
|
|
838 |
else
|
|
839 |
{
|
|
840 |
__HEAP_CORRUPTED_TEST((head == 0), ETHeapBadCellAddress,u,0); /* only one node on chain has iParent */
|
|
841 |
head = u;
|
|
842 |
__HEAP_CORRUPTED_TEST((u->iParent != u), ETHeapBadCellAddress,u,0);
|
|
843 |
__HEAP_CORRUPTED_TEST( (u->iParent->iChild[0] == u ||
|
|
844 |
u->iParent->iChild[1] == u ||
|
|
845 |
*((tbinptr*)(u->iParent)) == u), ETHeapBadCellAddress,u,0);
|
|
846 |
if (u->iChild[0] != 0)
|
|
847 |
{
|
|
848 |
__HEAP_CORRUPTED_TEST((u->iChild[0]->iParent == u), ETHeapBadCellAddress,u,0);
|
|
849 |
__HEAP_CORRUPTED_TEST((u->iChild[0] != u), ETHeapBadCellAddress,u,0);
|
|
850 |
DoCheckTree(m, u->iChild[0]);
|
|
851 |
}
|
|
852 |
if (u->iChild[1] != 0)
|
|
853 |
{
|
|
854 |
__HEAP_CORRUPTED_TEST((u->iChild[1]->iParent == u), ETHeapBadCellAddress,u,0);
|
|
855 |
__HEAP_CORRUPTED_TEST((u->iChild[1] != u), ETHeapBadCellAddress,u,0);
|
|
856 |
DoCheckTree(m, u->iChild[1]);
|
|
857 |
}
|
|
858 |
if (u->iChild[0] != 0 && u->iChild[1] != 0)
|
|
859 |
{
|
|
860 |
__HEAP_CORRUPTED_TEST((CHUNKSIZE(u->iChild[0]) < CHUNKSIZE(u->iChild[1])), ETHeapBadCellAddress,u,0);
|
|
861 |
}
|
|
862 |
}
|
|
863 |
u = u->iFd;
|
|
864 |
}
|
|
865 |
while (u != t);
|
|
866 |
__HEAP_CORRUPTED_TEST((head != 0), ETHeapBadCellAddress,u,0);
|
|
867 |
}
|
|
868 |
|
|
869 |
/* Check all the chunks in a treebin. */
|
|
870 |
void RHybridHeap::DoCheckTreebin(mstate m, bindex_t i)
|
|
871 |
{
|
|
872 |
tbinptr* tb = TREEBIN_AT(m, i);
|
|
873 |
tchunkptr t = *tb;
|
|
874 |
int empty = (m->iTreeMap & (1U << i)) == 0;
|
|
875 |
if (t == 0)
|
|
876 |
__HEAP_CORRUPTED_TEST((empty), ETHeapBadCellAddress,t,0);
|
|
877 |
if (!empty)
|
|
878 |
DoCheckTree(m, t);
|
|
879 |
}
|
|
880 |
|
|
881 |
/* Check all the chunks in a smallbin. */
|
|
882 |
void RHybridHeap::DoCheckSmallbin(mstate m, bindex_t i)
|
|
883 |
{
|
|
884 |
sbinptr b = SMALLBIN_AT(m, i);
|
|
885 |
mchunkptr p = b->iBk;
|
|
886 |
unsigned int empty = (m->iSmallMap & (1U << i)) == 0;
|
|
887 |
if (p == b)
|
|
888 |
__HEAP_CORRUPTED_TEST((empty), ETHeapBadCellAddress,p,0);
|
|
889 |
if (!empty)
|
|
890 |
{
|
|
891 |
for (; p != b; p = p->iBk)
|
|
892 |
{
|
|
893 |
size_t size = CHUNKSIZE(p);
|
|
894 |
mchunkptr q;
|
|
895 |
/* each chunk claims to be free */
|
|
896 |
DoCheckFreeChunk(m, p);
|
|
897 |
/* chunk belongs in bin */
|
|
898 |
__HEAP_CORRUPTED_TEST((SMALL_INDEX(size) == i), ETHeapBadCellAddress,p,0);
|
|
899 |
__HEAP_CORRUPTED_TEST((p->iBk == b || CHUNKSIZE(p->iBk) == CHUNKSIZE(p)), ETHeapBadCellAddress,p,0);
|
|
900 |
/* chunk is followed by an inuse chunk */
|
|
901 |
q = NEXT_CHUNK(p);
|
|
902 |
if (q->iHead != FENCEPOST_HEAD)
|
|
903 |
DoCheckInuseChunk(m, q);
|
|
904 |
}
|
|
905 |
}
|
|
906 |
}
|
|
907 |
|
|
908 |
/* Find x in a bin. Used in other check functions. */
|
|
909 |
TInt RHybridHeap::BinFind(mstate m, mchunkptr x)
|
|
910 |
{
|
|
911 |
size_t size = CHUNKSIZE(x);
|
|
912 |
if (IS_SMALL(size))
|
|
913 |
{
|
|
914 |
bindex_t sidx = SMALL_INDEX(size);
|
|
915 |
sbinptr b = SMALLBIN_AT(m, sidx);
|
|
916 |
if (SMALLMAP_IS_MARKED(m, sidx))
|
|
917 |
{
|
|
918 |
mchunkptr p = b;
|
|
919 |
do
|
|
920 |
{
|
|
921 |
if (p == x)
|
|
922 |
return 1;
|
|
923 |
}
|
|
924 |
while ((p = p->iFd) != b);
|
|
925 |
}
|
|
926 |
}
|
|
927 |
else
|
|
928 |
{
|
|
929 |
bindex_t tidx;
|
|
930 |
DoComputeTreeIndex(size, tidx);
|
|
931 |
if (TREEMAP_IS_MARKED(m, tidx))
|
|
932 |
{
|
|
933 |
tchunkptr t = *TREEBIN_AT(m, tidx);
|
|
934 |
size_t sizebits = size << LEFTSHIFT_FOR_TREE_INDEX(tidx);
|
|
935 |
while (t != 0 && CHUNKSIZE(t) != size)
|
|
936 |
{
|
|
937 |
t = t->iChild[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
|
|
938 |
sizebits <<= 1;
|
|
939 |
}
|
|
940 |
if (t != 0)
|
|
941 |
{
|
|
942 |
tchunkptr u = t;
|
|
943 |
do
|
|
944 |
{
|
|
945 |
if (u == (tchunkptr)x)
|
|
946 |
return 1;
|
|
947 |
}
|
|
948 |
while ((u = u->iFd) != t);
|
|
949 |
}
|
|
950 |
}
|
|
951 |
}
|
|
952 |
return 0;
|
|
953 |
}
|
|
954 |
|
|
955 |
/* Traverse each chunk and check it; return total */
|
|
956 |
size_t RHybridHeap::TraverseAndCheck(mstate m)
|
|
957 |
{
|
|
958 |
size_t sum = 0;
|
|
959 |
msegmentptr s = &m->iSeg;
|
|
960 |
sum += m->iTopSize + TOP_FOOT_SIZE;
|
|
961 |
mchunkptr q = ALIGN_AS_CHUNK(s->iBase);
|
|
962 |
mchunkptr lastq = 0;
|
|
963 |
__HEAP_CORRUPTED_TEST((PINUSE(q)), ETHeapBadCellAddress,q,0);
|
|
964 |
while (q != m->iTop && q->iHead != FENCEPOST_HEAD)
|
|
965 |
{
|
|
966 |
sum += CHUNKSIZE(q);
|
|
967 |
if (CINUSE(q))
|
|
968 |
{
|
|
969 |
__HEAP_CORRUPTED_TEST((!BinFind(m, q)), ETHeapBadCellAddress,q,0);
|
|
970 |
DoCheckInuseChunk(m, q);
|
|
971 |
}
|
|
972 |
else
|
|
973 |
{
|
|
974 |
__HEAP_CORRUPTED_TEST((q == m->iDv || BinFind(m, q)), ETHeapBadCellAddress,q,0);
|
|
975 |
__HEAP_CORRUPTED_TEST((lastq == 0 || CINUSE(lastq)), ETHeapBadCellAddress,q,0); /* Not 2 consecutive free */
|
|
976 |
DoCheckFreeChunk(m, q);
|
|
977 |
}
|
|
978 |
lastq = q;
|
|
979 |
q = NEXT_CHUNK(q);
|
|
980 |
}
|
|
981 |
return sum;
|
|
982 |
}
|
|
983 |
|
|
984 |
/* Check all properties of malloc_state. */
|
|
985 |
void RHybridHeap::DoCheckMallocState(mstate m)
|
|
986 |
{
|
|
987 |
bindex_t i;
|
|
988 |
// size_t total;
|
|
989 |
/* check bins */
|
|
990 |
for (i = 0; i < NSMALLBINS; ++i)
|
|
991 |
DoCheckSmallbin(m, i);
|
|
992 |
for (i = 0; i < NTREEBINS; ++i)
|
|
993 |
DoCheckTreebin(m, i);
|
|
994 |
|
|
995 |
if (m->iDvSize != 0)
|
|
996 |
{ /* check iDv chunk */
|
|
997 |
DoCheckAnyChunk(m, m->iDv);
|
|
998 |
__HEAP_CORRUPTED_TEST((m->iDvSize == CHUNKSIZE(m->iDv)), ETHeapBadCellAddress,m->iDv,0);
|
|
999 |
__HEAP_CORRUPTED_TEST((m->iDvSize >= MIN_CHUNK_SIZE), ETHeapBadCellAddress,m->iDv,0);
|
|
1000 |
__HEAP_CORRUPTED_TEST((BinFind(m, m->iDv) == 0), ETHeapBadCellAddress,m->iDv,0);
|
|
1001 |
}
|
|
1002 |
|
|
1003 |
if (m->iTop != 0)
|
|
1004 |
{ /* check iTop chunk */
|
|
1005 |
DoCheckTopChunk(m, m->iTop);
|
|
1006 |
__HEAP_CORRUPTED_TEST((m->iTopSize == CHUNKSIZE(m->iTop)), ETHeapBadCellAddress,m->iTop,0);
|
|
1007 |
__HEAP_CORRUPTED_TEST((m->iTopSize > 0), ETHeapBadCellAddress,m->iTop,0);
|
|
1008 |
__HEAP_CORRUPTED_TEST((BinFind(m, m->iTop) == 0), ETHeapBadCellAddress,m->iTop,0);
|
|
1009 |
}
|
|
1010 |
|
|
1011 |
// total =
|
|
1012 |
TraverseAndCheck(m);
|
|
1013 |
}
|
|
1014 |
|
|
1015 |
#ifndef __KERNEL_MODE__
|
|
1016 |
//
|
|
1017 |
// Methods for Slab allocator detailed check
|
|
1018 |
//
|
|
1019 |
void RHybridHeap::DoCheckSlabTree(slab** aS, TBool aPartialPage)
|
|
1020 |
{
|
|
1021 |
slab* s = *aS;
|
|
1022 |
if (!s)
|
|
1023 |
return;
|
|
1024 |
|
|
1025 |
TUint size = SlabHeaderSize(s->iHeader);
|
|
1026 |
slab** parent = aS;
|
|
1027 |
slab** child2 = &s->iChild2;
|
|
1028 |
|
|
1029 |
while ( s )
|
|
1030 |
{
|
|
1031 |
__HEAP_CORRUPTED_TEST((s->iParent == parent), ETHeapBadCellAddress,s,SLABSIZE);
|
|
1032 |
__HEAP_CORRUPTED_TEST((!s->iChild1 || s < s->iChild1), ETHeapBadCellAddress,s,SLABSIZE);
|
|
1033 |
__HEAP_CORRUPTED_TEST((!s->iChild2 || s < s->iChild2), ETHeapBadCellAddress,s,SLABSIZE);
|
|
1034 |
|
|
1035 |
if ( aPartialPage )
|
|
1036 |
{
|
|
1037 |
if ( s->iChild1 )
|
|
1038 |
size = SlabHeaderSize(s->iChild1->iHeader);
|
|
1039 |
}
|
|
1040 |
else
|
|
1041 |
{
|
|
1042 |
__HEAP_CORRUPTED_TEST((SlabHeaderSize(s->iHeader) == size), ETHeapBadCellAddress,s,SLABSIZE);
|
|
1043 |
}
|
|
1044 |
parent = &s->iChild1;
|
|
1045 |
s = s->iChild1;
|
|
1046 |
|
|
1047 |
}
|
|
1048 |
|
|
1049 |
parent = child2;
|
|
1050 |
s = *child2;
|
|
1051 |
|
|
1052 |
while ( s )
|
|
1053 |
{
|
|
1054 |
__HEAP_CORRUPTED_TEST((s->iParent == parent), ETHeapBadCellAddress,s,SLABSIZE);
|
|
1055 |
__HEAP_CORRUPTED_TEST((!s->iChild1 || s < s->iChild1), ETHeapBadCellAddress,s,SLABSIZE);
|
|
1056 |
__HEAP_CORRUPTED_TEST((!s->iChild2 || s < s->iChild2), ETHeapBadCellAddress,s,SLABSIZE);
|
|
1057 |
|
|
1058 |
if ( aPartialPage )
|
|
1059 |
{
|
|
1060 |
if ( s->iChild2 )
|
|
1061 |
size = SlabHeaderSize(s->iChild2->iHeader);
|
|
1062 |
}
|
|
1063 |
else
|
|
1064 |
{
|
|
1065 |
__HEAP_CORRUPTED_TEST((SlabHeaderSize(s->iHeader) == size), ETHeapBadCellAddress,s,SLABSIZE);
|
|
1066 |
}
|
|
1067 |
parent = &s->iChild2;
|
|
1068 |
s = s->iChild2;
|
|
1069 |
|
|
1070 |
}
|
|
1071 |
|
|
1072 |
}
|
|
1073 |
|
|
1074 |
void RHybridHeap::DoCheckSlabTrees()
|
|
1075 |
{
|
|
1076 |
for (TInt i = 0; i < (MAXSLABSIZE>>2); ++i)
|
|
1077 |
DoCheckSlabTree(&iSlabAlloc[i].iPartial, EFalse);
|
|
1078 |
DoCheckSlabTree(&iPartialPage, ETrue);
|
|
1079 |
}
|
|
1080 |
|
|
1081 |
void RHybridHeap::DoCheckSlab(slab* aSlab, TAllocatorType aSlabType, TAny* aBfr)
|
|
1082 |
{
|
|
1083 |
if ( (aSlabType == ESlabSpare) || (aSlabType == EEmptySlab) )
|
|
1084 |
return;
|
|
1085 |
|
|
1086 |
unsigned h = aSlab->iHeader;
|
|
1087 |
__HEAP_CORRUPTED_TEST((ZEROBITS(h)), ETHeapBadCellAddress,aBfr,aSlab);
|
|
1088 |
unsigned used = SlabHeaderUsedm4(h)+4;
|
|
1089 |
unsigned size = SlabHeaderSize(h);
|
|
1090 |
__HEAP_CORRUPTED_TEST( (used < SLABSIZE),ETHeapBadCellAddress, aBfr, aSlab);
|
|
1091 |
__HEAP_CORRUPTED_TEST( ((size > 3 ) && (size < MAXSLABSIZE)), ETHeapBadCellAddress,aBfr,aSlab);
|
|
1092 |
unsigned count = 0;
|
|
1093 |
|
|
1094 |
switch ( aSlabType )
|
|
1095 |
{
|
|
1096 |
case EFullSlab:
|
|
1097 |
count = (KMaxSlabPayload / size );
|
|
1098 |
__HEAP_CORRUPTED_TEST((used == count*size), ETHeapBadCellAddress,aBfr,aSlab);
|
|
1099 |
__HEAP_CORRUPTED_TEST((HeaderFloating(h)), ETHeapBadCellAddress,aBfr,aSlab);
|
|
1100 |
break;
|
|
1101 |
|
|
1102 |
case EPartialFullSlab:
|
|
1103 |
__HEAP_CORRUPTED_TEST(((used % size)==0),ETHeapBadCellAddress,aBfr,aSlab);
|
|
1104 |
__HEAP_CORRUPTED_TEST(((SlabHeaderFree(h) == 0) || (((SlabHeaderFree(h)<<2)-sizeof(slabhdr)) % SlabHeaderSize(h) == 0)),
|
|
1105 |
ETHeapBadCellAddress,aBfr,aSlab);
|
|
1106 |
break;
|
|
1107 |
|
|
1108 |
default:
|
|
1109 |
break;
|
|
1110 |
|
|
1111 |
}
|
|
1112 |
}
|
|
1113 |
|
|
1114 |
//
|
|
1115 |
// Check that committed size in heap equals number of pages in bitmap
|
|
1116 |
// plus size of Doug Lea region
|
|
1117 |
//
|
|
1118 |
void RHybridHeap::DoCheckCommittedSize(TInt aNPages, mstate aM)
|
|
1119 |
{
|
|
1120 |
TInt total_committed = (aNPages * iPageSize) + aM->iSeg.iSize + (iBase - (TUint8*)this);
|
|
1121 |
__HEAP_CORRUPTED_TEST((total_committed == iChunkSize), ETHeapBadCellAddress,total_committed,iChunkSize);
|
|
1122 |
}
|
|
1123 |
|
|
1124 |
#endif // __KERNEL_MODE__
|