30
|
1 |
// heaputils.cpp
|
|
2 |
//
|
|
3 |
// Copyright (c) 2010 Accenture. All rights reserved.
|
|
4 |
// This component and the accompanying materials are made available
|
|
5 |
// under the terms of the "Eclipse Public License v1.0"
|
|
6 |
// which accompanies this distribution, and is available
|
|
7 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
|
|
8 |
//
|
|
9 |
// Initial Contributors:
|
|
10 |
// Accenture - Initial contribution
|
|
11 |
//
|
|
12 |
#ifdef TEST_HYBRIDHEAP_ASSERTS
|
|
13 |
#define private public
|
|
14 |
#include <e32def.h>
|
|
15 |
#include "slab.h"
|
|
16 |
#include "page_alloc.h"
|
|
17 |
#include "heap_hybrid.h"
|
|
18 |
#endif
|
|
19 |
|
|
20 |
#include "heaputils.h"
|
|
21 |
|
|
22 |
#ifdef __KERNEL_MODE__
|
|
23 |
|
|
24 |
#include <kern_priv.h>
|
|
25 |
#define MEM Kern
|
|
26 |
__ASSERT_COMPILE(sizeof(LtkUtils::RKernelSideAllocatorHelper) == 10*4);
|
|
27 |
#define KERN_ENTER_CS() NKern::ThreadEnterCS()
|
|
28 |
#define KERN_LEAVE_CS() NKern::ThreadLeaveCS()
|
|
29 |
#define LOG(args...)
|
|
30 |
#define HUEXPORT_C
|
|
31 |
#else
|
|
32 |
|
|
33 |
#include <e32std.h>
|
|
34 |
#define MEM User
|
|
35 |
#define KERN_ENTER_CS()
|
|
36 |
#define KERN_LEAVE_CS()
|
|
37 |
//#include <e32debug.h>
|
|
38 |
//#define LOG(args...) RDebug::Printf(args)
|
|
39 |
#define LOG(args...)
|
|
40 |
|
|
41 |
#ifdef STANDALONE_ALLOCHELPER
|
|
42 |
#define HUEXPORT_C
|
|
43 |
#else
|
|
44 |
#define HUEXPORT_C EXPORT_C
|
|
45 |
#endif
|
|
46 |
|
|
47 |
#endif // __KERNEL_MODE__
|
|
48 |
|
|
49 |
using LtkUtils::RAllocatorHelper;
|
|
50 |
const TUint KPageSize = 4096;
|
|
51 |
__ASSERT_COMPILE(sizeof(RAllocatorHelper) == 9*4);
|
|
52 |
|
|
53 |
// RAllocatorHelper
|
|
54 |
|
|
55 |
HUEXPORT_C RAllocatorHelper::RAllocatorHelper()
|
|
56 |
: iAllocatorAddress(0), iAllocatorType(EUnknown), iInfo(NULL), iValidInfo(0), iTempSlabBitmap(NULL), iPageCache(NULL), iPageCacheAddr(0)
|
|
57 |
#ifdef __KERNEL_MODE__
|
|
58 |
, iChunk(NULL)
|
|
59 |
#endif
|
|
60 |
{
|
|
61 |
}
|
|
62 |
|
|
63 |
namespace LtkUtils
|
|
64 |
{
|
|
65 |
class THeapInfo
|
|
66 |
{
|
|
67 |
public:
|
|
68 |
THeapInfo()
|
|
69 |
{
|
|
70 |
ClearStats();
|
|
71 |
}
|
|
72 |
|
|
73 |
void ClearStats()
|
|
74 |
{
|
|
75 |
memclr(this, sizeof(THeapInfo));
|
|
76 |
}
|
|
77 |
|
|
78 |
TInt iAllocatedSize; // number of bytes in allocated cells (excludes free cells, cell header overhead)
|
|
79 |
TInt iCommittedSize; // amount of memory actually committed (includes cell header overhead, gaps smaller than an MMU page)
|
|
80 |
TInt iAllocationCount; // number of allocations currently
|
|
81 |
TInt iMaxCommittedSize; // or thereabouts
|
|
82 |
TInt iMinCommittedSize;
|
|
83 |
TInt iUnusedPages;
|
|
84 |
TInt iCommittedFreeSpace;
|
|
85 |
// Heap-only stats
|
|
86 |
TInt iHeapFreeCellCount;
|
|
87 |
// Hybrid-only stats
|
|
88 |
TInt iDlaAllocsSize;
|
|
89 |
TInt iDlaAllocsCount;
|
|
90 |
TInt iDlaFreeSize;
|
|
91 |
TInt iDlaFreeCount;
|
|
92 |
TInt iSlabAllocsSize;
|
|
93 |
TInt iSlabAllocsCount;
|
|
94 |
TInt iPageAllocsSize;
|
|
95 |
TInt iPageAllocsCount;
|
|
96 |
TInt iSlabFreeCellSize;
|
|
97 |
TInt iSlabFreeCellCount;
|
|
98 |
TInt iSlabFreeSlabSize;
|
|
99 |
TInt iSlabFreeSlabCount;
|
|
100 |
};
|
|
101 |
}
|
|
102 |
|
|
103 |
const TInt KTempBitmapSize = 256; // KMaxSlabPayload / mincellsize, technically. Close enough.
|
|
104 |
|
|
105 |
#ifdef __KERNEL_MODE__
|
|
106 |
|
|
107 |
TInt RAllocatorHelper::OpenKernelHeap()
|
|
108 |
{
|
|
109 |
_LIT(KName, "SvHeap");
|
|
110 |
NKern::ThreadEnterCS();
|
|
111 |
DObjectCon* chunkContainer = Kern::Containers()[EChunk];
|
|
112 |
chunkContainer->Wait();
|
|
113 |
const TInt chunkCount = chunkContainer->Count();
|
|
114 |
DChunk* foundChunk = NULL;
|
|
115 |
for(TInt i=0; i<chunkCount; i++)
|
|
116 |
{
|
|
117 |
DChunk* chunk = (DChunk*)(*chunkContainer)[i];
|
|
118 |
if (chunk->NameBuf() && chunk->NameBuf()->Find(KName) != KErrNotFound)
|
|
119 |
{
|
|
120 |
// Found it. No need to open it, we can be fairly confident the kernel heap isn't going to disappear from under us
|
|
121 |
foundChunk = chunk;
|
|
122 |
break;
|
|
123 |
}
|
|
124 |
}
|
|
125 |
iChunk = foundChunk;
|
|
126 |
chunkContainer->Signal();
|
|
127 |
#ifdef __WINS__
|
|
128 |
TInt err = OpenChunkHeap((TLinAddr)foundChunk->Base(), 0); // It looks like DChunk::iBase/DChunk::iFixedBase should both be ok for the kernel chunk
|
|
129 |
#else
|
|
130 |
// Copied from P::KernelInfo
|
|
131 |
const TRomHeader& romHdr=Epoc::RomHeader();
|
|
132 |
const TRomEntry* primaryEntry=(const TRomEntry*)Kern::SuperPage().iPrimaryEntry;
|
|
133 |
const TRomImageHeader* primaryImageHeader=(const TRomImageHeader*)primaryEntry->iAddressLin;
|
|
134 |
TLinAddr stack = romHdr.iKernDataAddress + Kern::RoundToPageSize(romHdr.iTotalSvDataSize);
|
|
135 |
TLinAddr heap = stack + Kern::RoundToPageSize(primaryImageHeader->iStackSize);
|
|
136 |
TInt err = OpenChunkHeap(heap, 0); // aChunkMaxSize is only used for trying the middle of the chunk for hybrid allocatorness, and the kernel heap doesn't use that (thankfully). So we can safely pass in zero.
|
|
137 |
|
|
138 |
#endif
|
|
139 |
if (!err) err = FinishConstruction();
|
|
140 |
NKern::ThreadLeaveCS();
|
|
141 |
return err;
|
|
142 |
}
|
|
143 |
|
|
144 |
#else
|
|
145 |
|
|
146 |
HUEXPORT_C TInt RAllocatorHelper::Open(RAllocator* aAllocator)
|
|
147 |
{
|
|
148 |
iAllocatorAddress = (TLinAddr)aAllocator;
|
|
149 |
TInt udeb = EuserIsUdeb();
|
|
150 |
if (udeb < 0) return udeb; // error
|
|
151 |
|
|
152 |
TInt err = IdentifyAllocatorType(udeb);
|
|
153 |
if (!err)
|
|
154 |
{
|
|
155 |
err = FinishConstruction(); // Allocate everything up front
|
|
156 |
}
|
|
157 |
if (!err)
|
|
158 |
{
|
|
159 |
// We always stealth our own allocations, again to avoid tripping up allocator checks
|
|
160 |
SetCellNestingLevel(iInfo, -1);
|
|
161 |
SetCellNestingLevel(iTempSlabBitmap, -1);
|
|
162 |
SetCellNestingLevel(iPageCache, -1);
|
|
163 |
}
|
|
164 |
return err;
|
|
165 |
}
|
|
166 |
|
|
167 |
#endif
|
|
168 |
|
|
169 |
TInt RAllocatorHelper::FinishConstruction()
|
|
170 |
{
|
|
171 |
TInt err = KErrNone;
|
|
172 |
KERN_ENTER_CS();
|
|
173 |
if (!iInfo)
|
|
174 |
{
|
|
175 |
iInfo = new THeapInfo;
|
|
176 |
if (!iInfo) err = KErrNoMemory;
|
|
177 |
}
|
|
178 |
if (!err && !iTempSlabBitmap)
|
|
179 |
{
|
|
180 |
iTempSlabBitmap = (TUint8*)MEM::Alloc(KTempBitmapSize);
|
|
181 |
if (!iTempSlabBitmap) err = KErrNoMemory;
|
|
182 |
}
|
|
183 |
if (!err && !iPageCache)
|
|
184 |
{
|
|
185 |
iPageCache = MEM::Alloc(KPageSize);
|
|
186 |
if (!iPageCache) err = KErrNoMemory;
|
|
187 |
}
|
|
188 |
|
|
189 |
if (err)
|
|
190 |
{
|
|
191 |
delete iInfo;
|
|
192 |
iInfo = NULL;
|
|
193 |
MEM::Free(iTempSlabBitmap);
|
|
194 |
iTempSlabBitmap = NULL;
|
|
195 |
MEM::Free(iPageCache);
|
|
196 |
iPageCache = NULL;
|
|
197 |
}
|
|
198 |
KERN_LEAVE_CS();
|
|
199 |
return err;
|
|
200 |
}
|
|
201 |
|
|
202 |
TInt RAllocatorHelper::ReadWord(TLinAddr aLocation, TUint32& aResult) const
|
|
203 |
{
|
|
204 |
// Check if we can satisfy the read from the cache
|
|
205 |
if (aLocation >= iPageCacheAddr)
|
|
206 |
{
|
|
207 |
TUint offset = aLocation - iPageCacheAddr;
|
|
208 |
if (offset < KPageSize)
|
|
209 |
{
|
|
210 |
aResult = ((TUint32*)iPageCache)[offset >> 2];
|
|
211 |
return KErrNone;
|
|
212 |
}
|
|
213 |
}
|
|
214 |
|
|
215 |
// If we reach here, not in page cache. Try and read in the new page
|
|
216 |
if (iPageCache)
|
|
217 |
{
|
|
218 |
TLinAddr pageAddr = aLocation & ~(KPageSize-1);
|
|
219 |
TInt err = ReadData(pageAddr, iPageCache, KPageSize);
|
|
220 |
if (!err)
|
|
221 |
{
|
|
222 |
iPageCacheAddr = pageAddr;
|
|
223 |
aResult = ((TUint32*)iPageCache)[(aLocation - iPageCacheAddr) >> 2];
|
|
224 |
return KErrNone;
|
|
225 |
}
|
|
226 |
}
|
|
227 |
|
|
228 |
// All else fails, try just reading it uncached
|
|
229 |
return ReadData(aLocation, &aResult, sizeof(TUint32));
|
|
230 |
}
|
|
231 |
|
|
232 |
TInt RAllocatorHelper::ReadByte(TLinAddr aLocation, TUint8& aResult) const
|
|
233 |
{
|
|
234 |
// Like ReadWord but 8-bit
|
|
235 |
|
|
236 |
// Check if we can satisfy the read from the cache
|
|
237 |
if (aLocation >= iPageCacheAddr)
|
|
238 |
{
|
|
239 |
TUint offset = aLocation - iPageCacheAddr;
|
|
240 |
if (offset < KPageSize)
|
|
241 |
{
|
|
242 |
aResult = ((TUint8*)iPageCache)[offset];
|
|
243 |
return KErrNone;
|
|
244 |
}
|
|
245 |
}
|
|
246 |
|
|
247 |
// If we reach here, not in page cache. Try and read in the new page
|
|
248 |
if (iPageCache)
|
|
249 |
{
|
|
250 |
TLinAddr pageAddr = aLocation & ~(KPageSize-1);
|
|
251 |
TInt err = ReadData(pageAddr, iPageCache, KPageSize);
|
|
252 |
if (!err)
|
|
253 |
{
|
|
254 |
iPageCacheAddr = pageAddr;
|
|
255 |
aResult = ((TUint8*)iPageCache)[(aLocation - iPageCacheAddr)];
|
|
256 |
return KErrNone;
|
|
257 |
}
|
|
258 |
}
|
|
259 |
|
|
260 |
// All else fails, try just reading it uncached
|
|
261 |
return ReadData(aLocation, &aResult, sizeof(TUint8));
|
|
262 |
}
|
|
263 |
|
|
264 |
|
|
265 |
TInt RAllocatorHelper::WriteWord(TLinAddr aLocation, TUint32 aWord)
|
|
266 |
{
|
|
267 |
// Invalidate the page cache if necessary
|
|
268 |
if (aLocation >= iPageCacheAddr && aLocation - iPageCacheAddr < KPageSize)
|
|
269 |
{
|
|
270 |
iPageCacheAddr = 0;
|
|
271 |
}
|
|
272 |
|
|
273 |
return WriteData(aLocation, &aWord, sizeof(TUint32));
|
|
274 |
}
|
|
275 |
|
|
276 |
TInt RAllocatorHelper::ReadData(TLinAddr aLocation, TAny* aResult, TInt aSize) const
|
|
277 |
{
|
|
278 |
// RAllocatorHelper base class impl is for allocators in same address space, so just copy it
|
|
279 |
memcpy(aResult, (const TAny*)aLocation, aSize);
|
|
280 |
return KErrNone;
|
|
281 |
}
|
|
282 |
|
|
283 |
TInt RAllocatorHelper::WriteData(TLinAddr aLocation, const TAny* aData, TInt aSize)
|
|
284 |
{
|
|
285 |
memcpy((TAny*)aLocation, aData, aSize);
|
|
286 |
return KErrNone;
|
|
287 |
}
|
|
288 |
|
|
289 |
#ifdef __KERNEL_MODE__
|
|
290 |
|
|
291 |
LtkUtils::RKernelSideAllocatorHelper::RKernelSideAllocatorHelper()
|
|
292 |
: iThread(NULL)
|
|
293 |
{}
|
|
294 |
|
|
295 |
void LtkUtils::RKernelSideAllocatorHelper::Close()
|
|
296 |
{
|
|
297 |
NKern::ThreadEnterCS();
|
|
298 |
if (iThread)
|
|
299 |
{
|
|
300 |
iThread->Close(NULL);
|
|
301 |
}
|
|
302 |
iThread = NULL;
|
|
303 |
RAllocatorHelper::Close();
|
|
304 |
NKern::ThreadLeaveCS();
|
|
305 |
}
|
|
306 |
|
|
307 |
TInt LtkUtils::RKernelSideAllocatorHelper::ReadData(TLinAddr aLocation, TAny* aResult, TInt aSize) const
|
|
308 |
{
|
|
309 |
return Kern::ThreadRawRead(iThread, (const TAny*)aLocation, aResult, aSize);
|
|
310 |
}
|
|
311 |
|
|
312 |
TInt LtkUtils::RKernelSideAllocatorHelper::WriteData(TLinAddr aLocation, const TAny* aData, TInt aSize)
|
|
313 |
{
|
|
314 |
return Kern::ThreadRawWrite(iThread, (TAny*)aLocation, aData, aSize);
|
|
315 |
}
|
|
316 |
|
|
317 |
TInt LtkUtils::RKernelSideAllocatorHelper::TryLock()
|
|
318 |
{
|
|
319 |
return KErrNotSupported;
|
|
320 |
}
|
|
321 |
|
|
322 |
void LtkUtils::RKernelSideAllocatorHelper::TryUnlock()
|
|
323 |
{
|
|
324 |
// Not supported
|
|
325 |
}
|
|
326 |
|
|
327 |
TInt LtkUtils::RKernelSideAllocatorHelper::OpenUserHeap(TUint aThreadId, TLinAddr aAllocatorAddress, TBool aEuserIsUdeb)
|
|
328 |
{
|
|
329 |
NKern::ThreadEnterCS();
|
|
330 |
DObjectCon* threads = Kern::Containers()[EThread];
|
|
331 |
threads->Wait();
|
|
332 |
iThread = Kern::ThreadFromId(aThreadId);
|
|
333 |
if (iThread && iThread->Open() != KErrNone)
|
|
334 |
{
|
|
335 |
// Failed to open
|
|
336 |
iThread = NULL;
|
|
337 |
}
|
|
338 |
threads->Signal();
|
|
339 |
NKern::ThreadLeaveCS();
|
|
340 |
if (!iThread) return KErrNotFound;
|
|
341 |
iAllocatorAddress = aAllocatorAddress;
|
|
342 |
TInt err = IdentifyAllocatorType(aEuserIsUdeb);
|
|
343 |
if (err) Close();
|
|
344 |
return err;
|
|
345 |
}
|
|
346 |
|
|
347 |
#endif // __KERNEL_MODE__
|
|
348 |
|
|
349 |
TInt RAllocatorHelper::OpenChunkHeap(TLinAddr aChunkBase, TInt aChunkMaxSize)
|
|
350 |
{
|
|
351 |
iAllocatorAddress = aChunkBase;
|
|
352 |
#ifdef __KERNEL_MODE__
|
|
353 |
// Must be in CS
|
|
354 |
// Assumes that this only ever gets called for the kernel heap. Otherwise goes through RKernelSideAllocatorHelper::OpenUserHeap.
|
|
355 |
TInt udeb = EFalse; // We can't figure this out until after we've got the heap
|
|
356 |
#else
|
|
357 |
// Assumes the chunk isn't the kernel heap. It's not a good idea to try messing with the kernel heap from user side...
|
|
358 |
TInt udeb = EuserIsUdeb();
|
|
359 |
if (udeb < 0) return udeb; // error
|
|
360 |
#endif
|
|
361 |
|
|
362 |
TInt err = IdentifyAllocatorType(udeb);
|
|
363 |
if (err == KErrNone && iAllocatorType == EAllocator)
|
|
364 |
{
|
|
365 |
// We've no reason to assume it's an allocator because we don't know the iAllocatorAddress actually is an RAllocator*
|
|
366 |
err = KErrNotFound;
|
|
367 |
}
|
|
368 |
if (err)
|
|
369 |
{
|
|
370 |
TInt oldErr = err;
|
|
371 |
TAllocatorType oldType = iAllocatorType;
|
|
372 |
// Try middle of chunk, in case it's an RHybridHeap
|
|
373 |
iAllocatorAddress += aChunkMaxSize / 2;
|
|
374 |
err = IdentifyAllocatorType(udeb);
|
|
375 |
if (err || iAllocatorType == EAllocator)
|
|
376 |
{
|
|
377 |
// No better than before
|
|
378 |
iAllocatorAddress = aChunkBase;
|
|
379 |
iAllocatorType = oldType;
|
|
380 |
err = oldErr;
|
|
381 |
}
|
|
382 |
}
|
|
383 |
#ifdef __KERNEL_MODE__
|
|
384 |
if (err == KErrNone)
|
|
385 |
{
|
|
386 |
// Now we know the allocator, we can figure out the udeb-ness
|
|
387 |
RAllocator* kernelAllocator = reinterpret_cast<RAllocator*>(iAllocatorAddress);
|
|
388 |
kernelAllocator->DebugFunction(RAllocator::ESetFail, (TAny*)9999, (TAny*)0); // Use an invalid fail reason - this should have no effect on the operation of the heap
|
|
389 |
TInt err = kernelAllocator->DebugFunction(7, NULL, NULL); // 7 is RAllocator::TAllocDebugOp::EGetFail
|
|
390 |
if (err == 9999)
|
|
391 |
{
|
|
392 |
// udeb new
|
|
393 |
udeb = ETrue;
|
|
394 |
}
|
|
395 |
else if (err == KErrNotSupported)
|
|
396 |
{
|
|
397 |
// Old heap - fall back to slightly nasty non-thread-safe method
|
|
398 |
kernelAllocator->DebugFunction(RAllocator::ESetFail, (TAny*)RAllocator::EFailNext, (TAny*)1);
|
|
399 |
TAny* res = Kern::Alloc(4);
|
|
400 |
if (res) udeb = ETrue;
|
|
401 |
Kern::Free(res);
|
|
402 |
}
|
|
403 |
else
|
|
404 |
{
|
|
405 |
// it's new urel
|
|
406 |
}
|
|
407 |
|
|
408 |
// Put everything back
|
|
409 |
kernelAllocator->DebugFunction(RAllocator::ESetFail, (TAny*)RAllocator::ENone, (TAny*)0);
|
|
410 |
// And update the type now we know the udeb-ness for certain
|
|
411 |
err = IdentifyAllocatorType(udeb);
|
|
412 |
}
|
|
413 |
#endif
|
|
414 |
return err;
|
|
415 |
}
|
|
416 |
|
|
417 |
|
|
418 |
// The guts of RAllocatorHelper
|
|
419 |
|
|
420 |
enum TWhatToGet
|
|
421 |
{
|
|
422 |
ECommitted = 1,
|
|
423 |
EAllocated = 2,
|
|
424 |
ECount = 4,
|
|
425 |
EMaxSize = 8,
|
|
426 |
EUnusedPages = 16,
|
|
427 |
ECommittedFreeSpace = 32,
|
|
428 |
EMinSize = 64,
|
|
429 |
EHybridStats = 128,
|
|
430 |
};
|
|
431 |
|
|
432 |
class RHackAllocator : public RAllocator
|
|
433 |
{
|
|
434 |
public:
|
|
435 |
using RAllocator::iHandles;
|
|
436 |
using RAllocator::iTotalAllocSize;
|
|
437 |
using RAllocator::iCellCount;
|
|
438 |
};
|
|
439 |
|
|
440 |
class RHackHeap : public RHeap
|
|
441 |
{
|
|
442 |
public:
|
|
443 |
// Careful, only allowed to use things that are still in the new RHeap, and are still in the same place
|
|
444 |
using RHeap::iMaxLength;
|
|
445 |
using RHeap::iChunkHandle;
|
|
446 |
using RHeap::iLock;
|
|
447 |
using RHeap::iBase;
|
|
448 |
using RHeap::iAlign;
|
|
449 |
using RHeap::iTop;
|
|
450 |
};
|
|
451 |
|
|
452 |
const TInt KChunkSizeOffset = 30*4;
|
|
453 |
const TInt KPageMapOffset = 141*4;
|
|
454 |
//const TInt KDlOnlyOffset = 33*4;
|
|
455 |
const TInt KMallocStateOffset = 34*4;
|
|
456 |
const TInt KMallocStateTopSizeOffset = 3*4;
|
|
457 |
const TInt KMallocStateTopOffset = 5*4;
|
|
458 |
const TInt KMallocStateSegOffset = 105*4;
|
|
459 |
const TInt KUserHybridHeapSize = 186*4;
|
|
460 |
const TInt KSparePageOffset = 167*4;
|
|
461 |
const TInt KPartialPageOffset = 165*4;
|
|
462 |
const TInt KFullSlabOffset = 166*4;
|
|
463 |
const TInt KSlabAllocOffset = 172*4;
|
|
464 |
const TInt KSlabParentOffset = 1*4;
|
|
465 |
const TInt KSlabChild1Offset = 2*4;
|
|
466 |
const TInt KSlabChild2Offset = 3*4;
|
|
467 |
const TInt KSlabPayloadOffset = 4*4;
|
|
468 |
const TInt KSlabsetSize = 4;
|
|
469 |
|
|
470 |
#ifdef TEST_HYBRIDHEAP_ASSERTS
|
|
471 |
__ASSERT_COMPILE(_FOFF(RHybridHeap, iChunkSize) == KChunkSizeOffset);
|
|
472 |
__ASSERT_COMPILE(_FOFF(RHybridHeap, iPageMap) == KPageMapOffset);
|
|
473 |
__ASSERT_COMPILE(_FOFF(RHybridHeap, iGlobalMallocState) == KMallocStateOffset);
|
|
474 |
__ASSERT_COMPILE(sizeof(malloc_state) == 107*4);
|
|
475 |
__ASSERT_COMPILE(_FOFF(malloc_state, iTopSize) == KMallocStateTopSizeOffset);
|
|
476 |
__ASSERT_COMPILE(_FOFF(malloc_state, iTop) == KMallocStateTopOffset);
|
|
477 |
__ASSERT_COMPILE(_FOFF(malloc_state, iSeg) == KMallocStateSegOffset);
|
|
478 |
__ASSERT_COMPILE(sizeof(RHybridHeap) == KUserHybridHeapSize);
|
|
479 |
__ASSERT_COMPILE(_FOFF(RHybridHeap, iSparePage) == KSparePageOffset);
|
|
480 |
__ASSERT_COMPILE(_FOFF(RHybridHeap, iPartialPage) == KPartialPageOffset);
|
|
481 |
__ASSERT_COMPILE(_FOFF(RHybridHeap, iSlabAlloc) == KSlabAllocOffset);
|
|
482 |
__ASSERT_COMPILE(_FOFF(slab, iParent) == KSlabParentOffset);
|
|
483 |
__ASSERT_COMPILE(_FOFF(slab, iChild1) == KSlabChild1Offset);
|
|
484 |
__ASSERT_COMPILE(_FOFF(slab, iChild2) == KSlabChild2Offset);
|
|
485 |
__ASSERT_COMPILE(_FOFF(slab, iPayload) == KSlabPayloadOffset);
|
|
486 |
__ASSERT_COMPILE(sizeof(slabset) == KSlabsetSize);
|
|
487 |
#endif
|
|
488 |
|
|
489 |
TInt RAllocatorHelper::TryLock()
|
|
490 |
{
|
|
491 |
#ifdef __KERNEL_MODE__
|
|
492 |
NKern::ThreadEnterCS();
|
|
493 |
DMutex* m = *(DMutex**)(iAllocatorAddress + _FOFF(RHackHeap, iLock));
|
|
494 |
if (m) Kern::MutexWait(*m);
|
|
495 |
return KErrNone;
|
|
496 |
#else
|
|
497 |
if (iAllocatorType != EUnknown && iAllocatorType != EAllocator)
|
|
498 |
{
|
|
499 |
RFastLock& lock = *reinterpret_cast<RFastLock*>(iAllocatorAddress + _FOFF(RHackHeap, iLock));
|
|
500 |
lock.Wait();
|
|
501 |
return KErrNone;
|
|
502 |
}
|
|
503 |
return KErrNotSupported;
|
|
504 |
#endif
|
|
505 |
}
|
|
506 |
|
|
507 |
void RAllocatorHelper::TryUnlock()
|
|
508 |
{
|
|
509 |
#ifdef __KERNEL_MODE__
|
|
510 |
DMutex* m = *(DMutex**)(iAllocatorAddress + _FOFF(RHackHeap, iLock));
|
|
511 |
if (m) Kern::MutexSignal(*m);
|
|
512 |
NKern::ThreadLeaveCS();
|
|
513 |
#else
|
|
514 |
if (iAllocatorType != EUnknown && iAllocatorType != EAllocator)
|
|
515 |
{
|
|
516 |
RFastLock& lock = *reinterpret_cast<RFastLock*>(iAllocatorAddress + _FOFF(RHackHeap, iLock));
|
|
517 |
lock.Signal();
|
|
518 |
}
|
|
519 |
#endif
|
|
520 |
}
|
|
521 |
|
|
522 |
HUEXPORT_C void RAllocatorHelper::Close()
|
|
523 |
{
|
|
524 |
KERN_ENTER_CS();
|
|
525 |
iAllocatorType = EUnknown;
|
|
526 |
iAllocatorAddress = 0;
|
|
527 |
delete iInfo;
|
|
528 |
iInfo = NULL;
|
|
529 |
iValidInfo = 0;
|
|
530 |
MEM::Free(iTempSlabBitmap);
|
|
531 |
iTempSlabBitmap = NULL;
|
|
532 |
MEM::Free(iPageCache);
|
|
533 |
iPageCache = NULL;
|
|
534 |
iPageCacheAddr = 0;
|
|
535 |
KERN_LEAVE_CS();
|
|
536 |
}
|
|
537 |
|
|
538 |
TInt RAllocatorHelper::IdentifyAllocatorType(TBool aAllocatorIsUdeb)
|
|
539 |
{
|
|
540 |
iAllocatorType = EUnknown;
|
|
541 |
|
|
542 |
TUint32 handlesPtr = 0;
|
|
543 |
TInt err = ReadWord(iAllocatorAddress + _FOFF(RHackAllocator, iHandles), handlesPtr);
|
|
544 |
|
|
545 |
if (err) return err;
|
|
546 |
if (handlesPtr == iAllocatorAddress + _FOFF(RHackHeap, iChunkHandle) || handlesPtr == iAllocatorAddress + _FOFF(RHackHeap, iLock))
|
|
547 |
{
|
|
548 |
// It's an RHeap of some kind - I doubt any other RAllocator subclass will use iHandles in this way
|
|
549 |
TUint32 base = 0;
|
|
550 |
err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iBase), base);
|
|
551 |
if (err) return err;
|
|
552 |
TInt objsize = (TInt)base - (TInt)iAllocatorAddress;
|
|
553 |
if (objsize <= 32*4)
|
|
554 |
{
|
|
555 |
// Old RHeap
|
|
556 |
iAllocatorType = aAllocatorIsUdeb ? EUdebOldRHeap : EUrelOldRHeap;
|
|
557 |
}
|
|
558 |
else
|
|
559 |
{
|
|
560 |
// new hybrid heap - bigger than the old one. Likewise figure out if udeb or urel.
|
|
561 |
iAllocatorType = aAllocatorIsUdeb ? EUdebHybridHeap : EUrelHybridHeap;
|
|
562 |
}
|
|
563 |
}
|
|
564 |
else
|
|
565 |
{
|
|
566 |
iAllocatorType = EAllocator;
|
|
567 |
}
|
|
568 |
return KErrNone;
|
|
569 |
}
|
|
570 |
|
|
571 |
HUEXPORT_C TInt RAllocatorHelper::SetCellNestingLevel(TAny* aCell, TInt aNestingLevel)
|
|
572 |
{
|
|
573 |
TInt err = KErrNone;
|
|
574 |
|
|
575 |
switch (iAllocatorType)
|
|
576 |
{
|
|
577 |
case EUdebOldRHeap:
|
|
578 |
case EUdebHybridHeap:
|
|
579 |
// By this reckoning, they're in the same place amazingly
|
|
580 |
{
|
|
581 |
TLinAddr nestingAddr = (TLinAddr)aCell - 8;
|
|
582 |
err = WriteWord(nestingAddr, aNestingLevel);
|
|
583 |
break;
|
|
584 |
}
|
|
585 |
default:
|
|
586 |
break;
|
|
587 |
}
|
|
588 |
return err;
|
|
589 |
}
|
|
590 |
|
|
591 |
HUEXPORT_C TInt RAllocatorHelper::GetCellNestingLevel(TAny* aCell, TInt& aNestingLevel)
|
|
592 |
{
|
|
593 |
switch (iAllocatorType)
|
|
594 |
{
|
|
595 |
case EUdebOldRHeap:
|
|
596 |
case EUdebHybridHeap:
|
|
597 |
// By this reckoning, they're in the same place amazingly
|
|
598 |
{
|
|
599 |
TLinAddr nestingAddr = (TLinAddr)aCell - 8;
|
|
600 |
return ReadWord(nestingAddr, (TUint32&)aNestingLevel);
|
|
601 |
}
|
|
602 |
default:
|
|
603 |
return KErrNotSupported;
|
|
604 |
}
|
|
605 |
}
|
|
606 |
|
|
607 |
TInt RAllocatorHelper::RefreshDetails(TUint aMask)
|
|
608 |
{
|
|
609 |
TInt err = FinishConstruction();
|
|
610 |
if (err) return err;
|
|
611 |
|
|
612 |
// Invalidate the page cache
|
|
613 |
iPageCacheAddr = 0;
|
|
614 |
|
|
615 |
TryLock();
|
|
616 |
err = DoRefreshDetails(aMask);
|
|
617 |
TryUnlock();
|
|
618 |
return err;
|
|
619 |
}
|
|
620 |
|
|
621 |
const TInt KHeapWalkStatsForOldHeap = (EUnusedPages|ECommittedFreeSpace);
|
|
622 |
const TInt KHeapWalkStatsForNewHeap = (EAllocated|ECount|EUnusedPages|ECommittedFreeSpace|EHybridStats);
|
|
623 |
|
|
624 |
TInt RAllocatorHelper::DoRefreshDetails(TUint aMask)
|
|
625 |
{
|
|
626 |
TInt err = KErrNotSupported;
|
|
627 |
switch (iAllocatorType)
|
|
628 |
{
|
|
629 |
case EUrelOldRHeap:
|
|
630 |
case EUdebOldRHeap:
|
|
631 |
{
|
|
632 |
if (aMask & ECommitted)
|
|
633 |
{
|
|
634 |
// The old RHeap::Size() used to use iTop - iBase, which was effectively chunkSize - sizeof(RHeap)
|
|
635 |
// I think that for CommittedSize we should include the size of the heap object, just as it includes
|
|
636 |
// the size of heap cell metadata and overhead. Plus it makes sure the committedsize is a multiple of the page size
|
|
637 |
TUint32 top = 0;
|
|
638 |
//TUint32 base = 0;
|
|
639 |
//err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iBase), base);
|
|
640 |
//if (err) return err;
|
|
641 |
err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iTop), top);
|
|
642 |
if (err) return err;
|
|
643 |
|
|
644 |
//iInfo->iCommittedSize = top - base;
|
|
645 |
iInfo->iCommittedSize = top - iAllocatorAddress;
|
|
646 |
iValidInfo |= ECommitted;
|
|
647 |
}
|
|
648 |
if (aMask & EAllocated)
|
|
649 |
{
|
|
650 |
TUint32 allocSize = 0;
|
|
651 |
err = ReadWord(iAllocatorAddress + _FOFF(RHackAllocator, iTotalAllocSize), allocSize);
|
|
652 |
if (err) return err;
|
|
653 |
iInfo->iAllocatedSize = allocSize;
|
|
654 |
iValidInfo |= EAllocated;
|
|
655 |
}
|
|
656 |
if (aMask & ECount)
|
|
657 |
{
|
|
658 |
TUint32 count = 0;
|
|
659 |
err = ReadWord(iAllocatorAddress + _FOFF(RHackAllocator, iCellCount), count);
|
|
660 |
if (err) return err;
|
|
661 |
iInfo->iAllocationCount = count;
|
|
662 |
iValidInfo |= ECount;
|
|
663 |
}
|
|
664 |
if (aMask & EMaxSize)
|
|
665 |
{
|
|
666 |
TUint32 maxlen = 0;
|
|
667 |
err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iMaxLength), maxlen);
|
|
668 |
if (err) return err;
|
|
669 |
iInfo->iMaxCommittedSize = maxlen;
|
|
670 |
iValidInfo |= EMaxSize;
|
|
671 |
}
|
|
672 |
if (aMask & EMinSize)
|
|
673 |
{
|
|
674 |
TUint32 minlen = 0;
|
|
675 |
err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iMaxLength) - 4, minlen); // This isn't a typo! iMinLength is 4 bytes before iMaxLength, on old heap ONLY
|
|
676 |
if (err) return err;
|
|
677 |
iInfo->iMinCommittedSize = minlen;
|
|
678 |
iValidInfo |= EMinSize;
|
|
679 |
}
|
|
680 |
if (aMask & KHeapWalkStatsForOldHeap)
|
|
681 |
{
|
|
682 |
// Need a heap walk
|
|
683 |
iInfo->ClearStats();
|
|
684 |
iValidInfo = 0;
|
|
685 |
err = DoWalk(&WalkForStats, NULL);
|
|
686 |
if (err == KErrNone) iValidInfo |= KHeapWalkStatsForOldHeap;
|
|
687 |
}
|
|
688 |
return err;
|
|
689 |
}
|
|
690 |
case EUrelHybridHeap:
|
|
691 |
case EUdebHybridHeap:
|
|
692 |
{
|
|
693 |
TBool needWalk = EFalse;
|
|
694 |
if (aMask & ECommitted)
|
|
695 |
{
|
|
696 |
// RAllocator::Size uses iChunkSize - sizeof(RHybridHeap);
|
|
697 |
// We can't do exactly the same, because we can't calculate sizeof(RHybridHeap), only ROUND_UP(sizeof(RHybridHeap), iAlign)
|
|
698 |
// And if fact we don't bother and just use iChunkSize
|
|
699 |
TUint32 chunkSize = 0;
|
|
700 |
err = ReadWord(iAllocatorAddress + KChunkSizeOffset, chunkSize);
|
|
701 |
if (err) return err;
|
|
702 |
//TUint32 baseAddr = 0;
|
|
703 |
//err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iBase), baseAddr);
|
|
704 |
//if (err) return err;
|
|
705 |
iInfo->iCommittedSize = chunkSize; // - (baseAddr - iAllocatorAddress);
|
|
706 |
iValidInfo |= ECommitted;
|
|
707 |
}
|
|
708 |
if (aMask & (EAllocated|ECount))
|
|
709 |
{
|
|
710 |
if (iAllocatorType == EUdebHybridHeap)
|
|
711 |
{
|
|
712 |
// Easy, just get them from the counter
|
|
713 |
TUint32 totalAlloc = 0;
|
|
714 |
err = ReadWord(iAllocatorAddress + _FOFF(RHackAllocator, iTotalAllocSize), totalAlloc);
|
|
715 |
if (err) return err;
|
|
716 |
iInfo->iAllocatedSize = totalAlloc;
|
|
717 |
iValidInfo |= EAllocated;
|
|
718 |
|
|
719 |
TUint32 cellCount = 0;
|
|
720 |
err = ReadWord(iAllocatorAddress + _FOFF(RHackAllocator, iCellCount), cellCount);
|
|
721 |
if (err) return err;
|
|
722 |
iInfo->iAllocationCount = cellCount;
|
|
723 |
iValidInfo |= ECount;
|
|
724 |
}
|
|
725 |
else
|
|
726 |
{
|
|
727 |
// A heap walk is needed
|
|
728 |
needWalk = ETrue;
|
|
729 |
}
|
|
730 |
}
|
|
731 |
if (aMask & EMaxSize)
|
|
732 |
{
|
|
733 |
TUint32 maxlen = 0;
|
|
734 |
err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iMaxLength), maxlen);
|
|
735 |
if (err) return err;
|
|
736 |
iInfo->iMaxCommittedSize = maxlen;
|
|
737 |
iValidInfo |= EMaxSize;
|
|
738 |
}
|
|
739 |
if (aMask & EMinSize)
|
|
740 |
{
|
|
741 |
TUint32 minlen = 0;
|
|
742 |
err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iAlign) + 4*4, minlen); // iMinLength is in different place to old RHeap
|
|
743 |
if (err) return err;
|
|
744 |
iInfo->iMinCommittedSize = minlen;
|
|
745 |
iValidInfo |= EMinSize;
|
|
746 |
}
|
|
747 |
if (aMask & (EUnusedPages|ECommittedFreeSpace|EHybridStats))
|
|
748 |
{
|
|
749 |
// EAllocated and ECount have already been taken care of above
|
|
750 |
needWalk = ETrue;
|
|
751 |
}
|
|
752 |
|
|
753 |
if (needWalk)
|
|
754 |
{
|
|
755 |
iInfo->ClearStats();
|
|
756 |
iValidInfo = 0;
|
|
757 |
err = DoWalk(&WalkForStats, NULL);
|
|
758 |
if (err == KErrNone) iValidInfo |= KHeapWalkStatsForNewHeap;
|
|
759 |
}
|
|
760 |
return err;
|
|
761 |
}
|
|
762 |
default:
|
|
763 |
return KErrNotSupported;
|
|
764 |
}
|
|
765 |
}
|
|
766 |
|
|
767 |
TInt RAllocatorHelper::CheckValid(TUint aMask)
|
|
768 |
{
|
|
769 |
if ((iValidInfo & aMask) == aMask)
|
|
770 |
{
|
|
771 |
return KErrNone;
|
|
772 |
}
|
|
773 |
else
|
|
774 |
{
|
|
775 |
return RefreshDetails(aMask);
|
|
776 |
}
|
|
777 |
}
|
|
778 |
|
|
779 |
HUEXPORT_C TInt RAllocatorHelper::CommittedSize()
|
|
780 |
{
|
|
781 |
TInt err = CheckValid(ECommitted);
|
|
782 |
if (err) return err;
|
|
783 |
return iInfo->iCommittedSize;
|
|
784 |
}
|
|
785 |
|
|
786 |
HUEXPORT_C TInt RAllocatorHelper::AllocatedSize()
|
|
787 |
{
|
|
788 |
TInt err = CheckValid(EAllocated);
|
|
789 |
if (err) return err;
|
|
790 |
return iInfo->iAllocatedSize;
|
|
791 |
}
|
|
792 |
|
|
793 |
HUEXPORT_C TInt RAllocatorHelper::AllocationCount()
|
|
794 |
{
|
|
795 |
TInt err = CheckValid(ECount);
|
|
796 |
if (err) return err;
|
|
797 |
return iInfo->iAllocationCount;
|
|
798 |
}
|
|
799 |
|
|
800 |
HUEXPORT_C TInt RAllocatorHelper::RefreshDetails()
|
|
801 |
{
|
|
802 |
return RefreshDetails(iValidInfo);
|
|
803 |
}
|
|
804 |
|
|
805 |
HUEXPORT_C TInt RAllocatorHelper::MaxCommittedSize()
|
|
806 |
{
|
|
807 |
TInt err = CheckValid(EMaxSize);
|
|
808 |
if (err) return err;
|
|
809 |
return iInfo->iMaxCommittedSize;
|
|
810 |
}
|
|
811 |
|
|
812 |
HUEXPORT_C TInt RAllocatorHelper::MinCommittedSize()
|
|
813 |
{
|
|
814 |
TInt err = CheckValid(EMinSize);
|
|
815 |
if (err) return err;
|
|
816 |
return iInfo->iMinCommittedSize;
|
|
817 |
}
|
|
818 |
|
|
819 |
HUEXPORT_C TInt RAllocatorHelper::AllocCountForCell(TAny* aCell) const
|
|
820 |
{
|
|
821 |
TUint32 allocCount = 0;
|
|
822 |
switch (iAllocatorType)
|
|
823 |
{
|
|
824 |
case EUdebOldRHeap:
|
|
825 |
case EUdebHybridHeap: // Both are in the same place, amazingly
|
|
826 |
{
|
|
827 |
TLinAddr allocCountAddr = (TLinAddr)aCell - 4;
|
|
828 |
TInt err = ReadWord(allocCountAddr, allocCount);
|
|
829 |
if (err) return err;
|
|
830 |
return (TInt)allocCount;
|
|
831 |
}
|
|
832 |
default:
|
|
833 |
return KErrNotSupported;
|
|
834 |
}
|
|
835 |
}
|
|
836 |
|
|
837 |
struct SContext3
|
|
838 |
{
|
|
839 |
RAllocatorHelper::TWalkFunc3 iOrigWalkFn;
|
|
840 |
TAny* iOrigContext;
|
|
841 |
};
|
|
842 |
|
|
843 |
TBool RAllocatorHelper::DispatchClientWalkCallback(RAllocatorHelper& aHelper, TAny* aContext, RAllocatorHelper::TExtendedCellType aCellType, TLinAddr aCellPtr, TInt aCellLength)
|
|
844 |
{
|
|
845 |
WalkForStats(aHelper, NULL, aCellType, aCellPtr, aCellLength);
|
|
846 |
SContext3* context = static_cast<SContext3*>(aContext);
|
|
847 |
return (*context->iOrigWalkFn)(aHelper, context->iOrigContext, aCellType, aCellPtr, aCellLength);
|
|
848 |
}
|
|
849 |
|
|
850 |
HUEXPORT_C TInt RAllocatorHelper::Walk(TWalkFunc3 aCallbackFn, TAny* aContext)
|
|
851 |
{
|
|
852 |
// Might as well take the opportunity of updating our stats at the same time as walking the heap for the client
|
|
853 |
SContext3 context = { aCallbackFn, aContext };
|
|
854 |
|
|
855 |
TInt err = FinishConstruction(); // In case this hasn't been done yet
|
|
856 |
if (err) return err;
|
|
857 |
|
|
858 |
TryLock();
|
|
859 |
err = DoWalk(&DispatchClientWalkCallback, &context);
|
|
860 |
TryUnlock();
|
|
861 |
return err;
|
|
862 |
}
|
|
863 |
|
|
864 |
TInt RAllocatorHelper::DoWalk(TWalkFunc3 aCallbackFn, TAny* aContext)
|
|
865 |
{
|
|
866 |
TInt err = KErrNotSupported;
|
|
867 |
switch (iAllocatorType)
|
|
868 |
{
|
|
869 |
case EUdebOldRHeap:
|
|
870 |
case EUrelOldRHeap:
|
|
871 |
err = OldSkoolWalk(aCallbackFn, aContext);
|
|
872 |
break;
|
|
873 |
case EUrelHybridHeap:
|
|
874 |
case EUdebHybridHeap:
|
|
875 |
err = NewHotnessWalk(aCallbackFn, aContext);
|
|
876 |
break;
|
|
877 |
default:
|
|
878 |
err = KErrNotSupported;
|
|
879 |
break;
|
|
880 |
}
|
|
881 |
return err;
|
|
882 |
}
|
|
883 |
|
|
884 |
struct SContext
|
|
885 |
{
|
|
886 |
RAllocatorHelper::TWalkFunc iOrigWalkFn;
|
|
887 |
TAny* iOrigContext;
|
|
888 |
};
|
|
889 |
|
|
890 |
struct SContext2
|
|
891 |
{
|
|
892 |
RAllocatorHelper::TWalkFunc2 iOrigWalkFn;
|
|
893 |
TAny* iOrigContext;
|
|
894 |
};
|
|
895 |
|
|
896 |
#define New2Old(aNew) (((aNew)&RAllocatorHelper::EAllocationMask) ? RAllocatorHelper::EAllocation : ((aNew)&RAllocatorHelper::EFreeMask) ? RAllocatorHelper::EFreeSpace : RAllocatorHelper::EBadness)
|
|
897 |
|
|
898 |
TBool DispatchOldTWalkFuncCallback(RAllocatorHelper& /*aHelper*/, TAny* aContext, RAllocatorHelper::TExtendedCellType aCellType, TLinAddr aCellPtr, TInt aCellLength)
|
|
899 |
{
|
|
900 |
SContext* context = static_cast<SContext*>(aContext);
|
|
901 |
return (*context->iOrigWalkFn)(context->iOrigContext, New2Old(aCellType), aCellPtr, aCellLength);
|
|
902 |
}
|
|
903 |
|
|
904 |
TBool DispatchOldTWalk2FuncCallback(RAllocatorHelper& aHelper, TAny* aContext, RAllocatorHelper::TExtendedCellType aCellType, TLinAddr aCellPtr, TInt aCellLength)
|
|
905 |
{
|
|
906 |
SContext2* context = static_cast<SContext2*>(aContext);
|
|
907 |
return (*context->iOrigWalkFn)(aHelper, context->iOrigContext, New2Old(aCellType), aCellPtr, aCellLength);
|
|
908 |
}
|
|
909 |
|
|
910 |
HUEXPORT_C TInt RAllocatorHelper::Walk(TWalkFunc aCallbackFn, TAny* aContext)
|
|
911 |
{
|
|
912 |
// For backwards compatability insert a compatability callback to map between the different types of callback that clients requested
|
|
913 |
SContext context = { aCallbackFn, aContext };
|
|
914 |
return Walk(&DispatchOldTWalkFuncCallback, &context);
|
|
915 |
}
|
|
916 |
|
|
917 |
HUEXPORT_C TInt RAllocatorHelper::Walk(TWalkFunc2 aCallbackFn, TAny* aContext)
|
|
918 |
{
|
|
919 |
SContext2 context = { aCallbackFn, aContext };
|
|
920 |
return Walk(&DispatchOldTWalk2FuncCallback, &context);
|
|
921 |
}
|
|
922 |
|
|
923 |
|
|
924 |
TInt RAllocatorHelper::OldSkoolWalk(TWalkFunc3 aCallbackFn, TAny* aContext)
|
|
925 |
{
|
|
926 |
TLinAddr pC = 0;
|
|
927 |
TInt err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iBase), pC); // pC = iBase; // allocated cells
|
|
928 |
if (err) return err;
|
|
929 |
TLinAddr pF = iAllocatorAddress + _FOFF(RHackHeap, iAlign) + 3*4; // pF = &iFree; // free cells
|
|
930 |
|
|
931 |
TLinAddr top = 0;
|
|
932 |
err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iTop), top);
|
|
933 |
if (err) return err;
|
|
934 |
const TInt KAllocatedCellHeaderSize = iAllocatorType == EUdebOldRHeap ? 12 : 4;
|
|
935 |
TInt minCell = 0;
|
|
936 |
err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iAlign) + 4, (TUint32&)minCell);
|
|
937 |
if (err) return err;
|
|
938 |
TInt align = 0;
|
|
939 |
err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iAlign), (TUint32&)align);
|
|
940 |
if (err) return err;
|
|
941 |
|
|
942 |
FOREVER
|
|
943 |
{
|
|
944 |
err = ReadWord(pF+4, pF); // pF = pF->next; // next free cell
|
|
945 |
if (err) return err;
|
|
946 |
TLinAddr pFnext = 0;
|
|
947 |
if (pF) err = ReadWord(pF + 4, pFnext);
|
|
948 |
if (err) return err;
|
|
949 |
|
|
950 |
if (!pF)
|
|
951 |
{
|
|
952 |
pF = top; // to make size checking work
|
|
953 |
}
|
|
954 |
else if (pF>=top || (pFnext && pFnext<=pF) )
|
|
955 |
{
|
|
956 |
// free cell pointer off the end or going backwards
|
|
957 |
//Unlock();
|
|
958 |
(*aCallbackFn)(*this, aContext, EHeapBadFreeCellAddress, pF, 0);
|
|
959 |
return KErrCorrupt;
|
|
960 |
}
|
|
961 |
else
|
|
962 |
{
|
|
963 |
TInt l; // = pF->len
|
|
964 |
err = ReadWord(pF, (TUint32&)l);
|
|
965 |
if (err) return err;
|
|
966 |
if (l<minCell || (l & (align-1)))
|
|
967 |
{
|
|
968 |
// free cell length invalid
|
|
969 |
//Unlock();
|
|
970 |
(*aCallbackFn)(*this, aContext, EHeapBadFreeCellSize, pF, l);
|
|
971 |
return KErrCorrupt;
|
|
972 |
}
|
|
973 |
}
|
|
974 |
while (pC!=pF) // walk allocated cells up to next free cell
|
|
975 |
{
|
|
976 |
TInt l; // pC->len;
|
|
977 |
err = ReadWord(pC, (TUint32&)l);
|
|
978 |
if (err) return err;
|
|
979 |
if (l<minCell || (l & (align-1)))
|
|
980 |
{
|
|
981 |
// allocated cell length invalid
|
|
982 |
//Unlock();
|
|
983 |
(*aCallbackFn)(*this, aContext, EHeapBadAllocatedCellSize, pC, l);
|
|
984 |
return KErrCorrupt;
|
|
985 |
}
|
|
986 |
TBool shouldContinue = (*aCallbackFn)(*this, aContext, EHeapAllocation, pC + KAllocatedCellHeaderSize, l - KAllocatedCellHeaderSize);
|
|
987 |
if (!shouldContinue) return KErrNone;
|
|
988 |
|
|
989 |
//SCell* pN = __NEXT_CELL(pC);
|
|
990 |
TLinAddr pN = pC + l;
|
|
991 |
if (pN > pF)
|
|
992 |
{
|
|
993 |
// cell overlaps next free cell
|
|
994 |
//Unlock();
|
|
995 |
(*aCallbackFn)(*this, aContext, EHeapBadAllocatedCellAddress, pC, l);
|
|
996 |
return KErrCorrupt;
|
|
997 |
}
|
|
998 |
pC = pN;
|
|
999 |
}
|
|
1000 |
if (pF == top)
|
|
1001 |
break; // reached end of heap
|
|
1002 |
TInt pFlen = 0;
|
|
1003 |
err = ReadWord(pF, (TUint32&)pFlen);
|
|
1004 |
if (err) return err;
|
|
1005 |
pC = pF + pFlen; // pC = __NEXT_CELL(pF); // step to next allocated cell
|
|
1006 |
TBool shouldContinue = (*aCallbackFn)(*this, aContext, EHeapFreeCell, pF, pFlen);
|
|
1007 |
if (!shouldContinue) return KErrNone;
|
|
1008 |
}
|
|
1009 |
return KErrNone;
|
|
1010 |
}
|
|
1011 |
|
|
1012 |
HUEXPORT_C TInt RAllocatorHelper::CountUnusedPages()
|
|
1013 |
{
|
|
1014 |
TInt err = CheckValid(EUnusedPages);
|
|
1015 |
if (err) return err;
|
|
1016 |
return iInfo->iUnusedPages;
|
|
1017 |
}
|
|
1018 |
|
|
1019 |
HUEXPORT_C TInt RAllocatorHelper::CommittedFreeSpace()
|
|
1020 |
{
|
|
1021 |
TInt err = CheckValid(ECommittedFreeSpace);
|
|
1022 |
if (err) return err;
|
|
1023 |
return iInfo->iCommittedFreeSpace;
|
|
1024 |
}
|
|
1025 |
|
|
1026 |
#define ROUND_DOWN(val, pow2) ((val) & ~((pow2)-1))
|
|
1027 |
#define ROUND_UP(val, pow2) ROUND_DOWN((val) + (pow2) - 1, (pow2))
|
|
1028 |
|
|
1029 |
HUEXPORT_C TLinAddr RAllocatorHelper::AllocatorAddress() const
|
|
1030 |
{
|
|
1031 |
return iAllocatorAddress;
|
|
1032 |
}
|
|
1033 |
|
|
1034 |
TBool RAllocatorHelper::WalkForStats(RAllocatorHelper& aSelf, TAny* /*aContext*/, TExtendedCellType aType, TLinAddr aCellPtr, TInt aCellLength)
|
|
1035 |
{
|
|
1036 |
//ASSERT(aCellLength >= 0);
|
|
1037 |
THeapInfo& info = *aSelf.iInfo;
|
|
1038 |
|
|
1039 |
TInt pagesSpanned = 0; // The number of pages that fit entirely inside the payload of this cell
|
|
1040 |
if ((TUint)aCellLength > KPageSize)
|
|
1041 |
{
|
|
1042 |
TLinAddr nextPageAlignedAddr = ROUND_UP(aCellPtr, KPageSize);
|
|
1043 |
pagesSpanned = ROUND_DOWN(aCellPtr + aCellLength - nextPageAlignedAddr, KPageSize) / KPageSize;
|
|
1044 |
}
|
|
1045 |
|
|
1046 |
if (aSelf.iAllocatorType == EUrelOldRHeap || aSelf.iAllocatorType == EUdebOldRHeap)
|
|
1047 |
{
|
|
1048 |
if (aType & EFreeMask)
|
|
1049 |
{
|
|
1050 |
info.iUnusedPages += pagesSpanned;
|
|
1051 |
info.iCommittedFreeSpace += aCellLength;
|
|
1052 |
info.iHeapFreeCellCount++;
|
|
1053 |
}
|
|
1054 |
}
|
|
1055 |
else
|
|
1056 |
{
|
|
1057 |
if (aType & EAllocationMask)
|
|
1058 |
{
|
|
1059 |
info.iAllocatedSize += aCellLength;
|
|
1060 |
info.iAllocationCount++;
|
|
1061 |
}
|
|
1062 |
else if (aType & EFreeMask)
|
|
1063 |
{
|
|
1064 |
// I *think* that DLA will decommit pages from inside free cells...
|
|
1065 |
TInt committedLen = aCellLength - (pagesSpanned * KPageSize);
|
|
1066 |
info.iCommittedFreeSpace += committedLen;
|
|
1067 |
}
|
|
1068 |
|
|
1069 |
switch (aType)
|
|
1070 |
{
|
|
1071 |
case EDlaAllocation:
|
|
1072 |
info.iDlaAllocsSize += aCellLength;
|
|
1073 |
info.iDlaAllocsCount++;
|
|
1074 |
break;
|
|
1075 |
case EPageAllocation:
|
|
1076 |
info.iPageAllocsSize += aCellLength;
|
|
1077 |
info.iPageAllocsCount++;
|
|
1078 |
break;
|
|
1079 |
case ESlabAllocation:
|
|
1080 |
info.iSlabAllocsSize += aCellLength;
|
|
1081 |
info.iSlabAllocsCount++;
|
|
1082 |
break;
|
|
1083 |
case EDlaFreeCell:
|
|
1084 |
info.iDlaFreeSize += aCellLength;
|
|
1085 |
info.iDlaFreeCount++;
|
|
1086 |
break;
|
|
1087 |
case ESlabFreeCell:
|
|
1088 |
info.iSlabFreeCellSize += aCellLength;
|
|
1089 |
info.iSlabFreeCellCount++;
|
|
1090 |
break;
|
|
1091 |
case ESlabFreeSlab:
|
|
1092 |
info.iSlabFreeSlabSize += aCellLength;
|
|
1093 |
info.iSlabFreeSlabCount++;
|
|
1094 |
break;
|
|
1095 |
default:
|
|
1096 |
break;
|
|
1097 |
}
|
|
1098 |
}
|
|
1099 |
|
|
1100 |
return ETrue;
|
|
1101 |
}
|
|
1102 |
|
|
1103 |
#define PAGESHIFT 12
|
|
1104 |
|
|
1105 |
TUint RAllocatorHelper::PageMapOperatorBrackets(unsigned ix, TInt& err) const
|
|
1106 |
{
|
|
1107 |
//return 1U&(iBase[ix>>3] >> (ix&7));
|
|
1108 |
TUint32 basePtr = 0;
|
|
1109 |
err = ReadWord(iAllocatorAddress + KPageMapOffset, basePtr);
|
|
1110 |
if (err) return 0;
|
|
1111 |
|
|
1112 |
TUint8 res = 0;
|
|
1113 |
err = ReadByte(basePtr + (ix >> 3), res);
|
|
1114 |
if (err) return 0;
|
|
1115 |
|
|
1116 |
return 1U&(res >> (ix&7));
|
|
1117 |
}
|
|
1118 |
|
|
1119 |
|
|
1120 |
TInt RAllocatorHelper::PageMapFind(TUint start, TUint bit, TInt& err)
|
|
1121 |
{
|
|
1122 |
TUint32 iNbits = 0;
|
|
1123 |
err = ReadWord(iAllocatorAddress + KPageMapOffset + 4, iNbits);
|
|
1124 |
if (err) return 0;
|
|
1125 |
|
|
1126 |
if (start<iNbits) do
|
|
1127 |
{
|
|
1128 |
//if ((*this)[start]==bit)
|
|
1129 |
if (PageMapOperatorBrackets(start, err) == bit || err)
|
|
1130 |
return start;
|
|
1131 |
} while (++start<iNbits);
|
|
1132 |
return -1;
|
|
1133 |
}
|
|
1134 |
|
|
1135 |
TUint RAllocatorHelper::PagedDecode(TUint pos, TInt& err)
|
|
1136 |
{
|
|
1137 |
unsigned bits = PageMapBits(pos,2,err);
|
|
1138 |
if (err) return 0;
|
|
1139 |
bits >>= 1;
|
|
1140 |
if (bits == 0)
|
|
1141 |
return 1;
|
|
1142 |
bits = PageMapBits(pos+2,2,err);
|
|
1143 |
if (err) return 0;
|
|
1144 |
if ((bits & 1) == 0)
|
|
1145 |
return 2 + (bits>>1);
|
|
1146 |
else if ((bits>>1) == 0)
|
|
1147 |
{
|
|
1148 |
return PageMapBits(pos+4, 4,err);
|
|
1149 |
}
|
|
1150 |
else
|
|
1151 |
{
|
|
1152 |
return PageMapBits(pos+4, 18,err);
|
|
1153 |
}
|
|
1154 |
}
|
|
1155 |
|
|
1156 |
TUint RAllocatorHelper::PageMapBits(unsigned ix, unsigned len, TInt& err)
|
|
1157 |
{
|
|
1158 |
int l=len;
|
|
1159 |
unsigned val=0;
|
|
1160 |
unsigned bit=0;
|
|
1161 |
while (--l>=0)
|
|
1162 |
{
|
|
1163 |
//val |= (*this)[ix++]<<bit++;
|
|
1164 |
val |= PageMapOperatorBrackets(ix++, err) << bit++;
|
|
1165 |
if (err) return 0;
|
|
1166 |
}
|
|
1167 |
return val;
|
|
1168 |
}
|
|
1169 |
|
|
1170 |
enum TSlabType { ESlabFullInfo, ESlabPartialInfo, ESlabEmptyInfo };
|
|
1171 |
|
|
1172 |
#ifndef TEST_HYBRIDHEAP_ASSERTS
|
|
1173 |
#define MAXSLABSIZE 56
|
|
1174 |
#define SLABSHIFT 10
|
|
1175 |
#define SLABSIZE (1 << SLABSHIFT)
|
|
1176 |
const TInt KMaxSlabPayload = SLABSIZE - KSlabPayloadOffset;
|
|
1177 |
#endif
|
|
1178 |
|
|
1179 |
TInt RAllocatorHelper::NewHotnessWalk(TWalkFunc3 aCallbackFn, TAny* aContext)
|
|
1180 |
{
|
|
1181 |
// RHybridHeap does paged, slab then DLA, so that's what we do too
|
|
1182 |
// Remember Kernel RHybridHeaps don't even have the page and slab members
|
|
1183 |
|
|
1184 |
TUint32 basePtr;
|
|
1185 |
TInt err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iBase), basePtr);
|
|
1186 |
if (err) return err;
|
|
1187 |
if (basePtr < iAllocatorAddress + KUserHybridHeapSize)
|
|
1188 |
{
|
|
1189 |
// Must be a kernel one - don't do page and slab
|
|
1190 |
}
|
|
1191 |
else
|
|
1192 |
{
|
|
1193 |
// Paged
|
|
1194 |
TUint32 membase = 0;
|
|
1195 |
err = ReadWord(iAllocatorAddress + KPageMapOffset + 8, membase);
|
|
1196 |
if (err) return err;
|
|
1197 |
|
|
1198 |
TBool shouldContinue = ETrue;
|
|
1199 |
for (int ix = 0;(ix = PageMapFind(ix,1,err)) >= 0 && err == KErrNone;)
|
|
1200 |
{
|
|
1201 |
int npage = PagedDecode(ix, err);
|
|
1202 |
if (err) return err;
|
|
1203 |
// Introduce paged buffer to the walk function
|
|
1204 |
TLinAddr bfr = membase + (1 << (PAGESHIFT-1))*ix;
|
|
1205 |
int len = npage << PAGESHIFT;
|
|
1206 |
if ( (TUint)len > KPageSize )
|
|
1207 |
{ // If buffer is not larger than one page it must be a slab page mapped into bitmap
|
|
1208 |
if (iAllocatorType == EUdebHybridHeap)
|
|
1209 |
{
|
|
1210 |
bfr += 8;
|
|
1211 |
len -= 8;
|
|
1212 |
}
|
|
1213 |
shouldContinue = (*aCallbackFn)(*this, aContext, EPageAllocation, bfr, len);
|
|
1214 |
if (!shouldContinue) return KErrNone;
|
|
1215 |
}
|
|
1216 |
ix += (npage<<1);
|
|
1217 |
}
|
|
1218 |
if (err) return err;
|
|
1219 |
|
|
1220 |
// Slab
|
|
1221 |
TUint32 sparePage = 0;
|
|
1222 |
err = ReadWord(iAllocatorAddress + KSparePageOffset, sparePage);
|
|
1223 |
if (err) return err;
|
|
1224 |
if (sparePage)
|
|
1225 |
{
|
|
1226 |
//Walk(wi, iSparePage, iPageSize, EGoodFreeCell, ESlabSpare); // Introduce Slab spare page to the walk function
|
|
1227 |
// This counts as 4 spare slabs
|
|
1228 |
for (TInt i = 0; i < 4; i++)
|
|
1229 |
{
|
|
1230 |
shouldContinue = (*aCallbackFn)(*this, aContext, ESlabFreeSlab, sparePage + SLABSIZE*i, SLABSIZE);
|
|
1231 |
if (!shouldContinue) return KErrNone;
|
|
1232 |
}
|
|
1233 |
}
|
|
1234 |
|
|
1235 |
//TreeWalk(&iFullSlab, &SlabFullInfo, i, wi);
|
|
1236 |
TInt err = TreeWalk(iAllocatorAddress + KFullSlabOffset, ESlabFullInfo, aCallbackFn, aContext, shouldContinue);
|
|
1237 |
if (err || !shouldContinue) return err;
|
|
1238 |
for (int ix = 0; ix < (MAXSLABSIZE>>2); ++ix)
|
|
1239 |
{
|
|
1240 |
TUint32 partialAddr = iAllocatorAddress + KSlabAllocOffset + ix*KSlabsetSize;
|
|
1241 |
//TreeWalk(&iSlabAlloc[ix].iPartial, &SlabPartialInfo, i, wi);
|
|
1242 |
err = TreeWalk(partialAddr, ESlabPartialInfo, aCallbackFn, aContext, shouldContinue);
|
|
1243 |
if (err || !shouldContinue) return err;
|
|
1244 |
}
|
|
1245 |
//TreeWalk(&iPartialPage, &SlabEmptyInfo, i, wi);
|
|
1246 |
TreeWalk(iAllocatorAddress + KPartialPageOffset, ESlabEmptyInfo, aCallbackFn, aContext, shouldContinue);
|
|
1247 |
}
|
|
1248 |
|
|
1249 |
// DLA
|
|
1250 |
#define CHUNK_OVERHEAD (sizeof(TUint))
|
|
1251 |
#define CHUNK_ALIGN_MASK (7)
|
|
1252 |
#define CHUNK2MEM(p) ((TLinAddr)(p) + 8)
|
|
1253 |
#define MEM2CHUNK(mem) ((TLinAddr)(p) - 8)
|
|
1254 |
/* chunk associated with aligned address A */
|
|
1255 |
#define ALIGN_OFFSET(A)\
|
|
1256 |
((((TLinAddr)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
|
|
1257 |
((8 - ((TLinAddr)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
|
|
1258 |
#define ALIGN_AS_CHUNK(A) ((A) + ALIGN_OFFSET(CHUNK2MEM(A)))
|
|
1259 |
#define CINUSE_BIT 2
|
|
1260 |
#define INUSE_BITS 3
|
|
1261 |
|
|
1262 |
TUint32 topSize = 0;
|
|
1263 |
err = ReadWord(iAllocatorAddress + KMallocStateOffset + KMallocStateTopSizeOffset, topSize);
|
|
1264 |
if (err) return err;
|
|
1265 |
|
|
1266 |
TUint32 top = 0;
|
|
1267 |
err = ReadWord(iAllocatorAddress + KMallocStateOffset + KMallocStateTopOffset, top);
|
|
1268 |
if (err) return err;
|
|
1269 |
|
|
1270 |
TInt max = ((topSize-1) & ~CHUNK_ALIGN_MASK) - CHUNK_OVERHEAD;
|
|
1271 |
if ( max < 0 )
|
|
1272 |
max = 0;
|
|
1273 |
|
|
1274 |
TBool shouldContinue = (*aCallbackFn)(*this, aContext, EDlaFreeCell, top, max);
|
|
1275 |
if (!shouldContinue) return KErrNone;
|
|
1276 |
|
|
1277 |
TUint32 mallocStateSegBase = 0;
|
|
1278 |
err = ReadWord(iAllocatorAddress + KMallocStateOffset + KMallocStateSegOffset, mallocStateSegBase);
|
|
1279 |
if (err) return err;
|
|
1280 |
|
|
1281 |
for (TLinAddr q = ALIGN_AS_CHUNK(mallocStateSegBase); q != top; /*q = NEXT_CHUNK(q)*/)
|
|
1282 |
{
|
|
1283 |
TUint32 qhead = 0;
|
|
1284 |
err = ReadWord(q + 4, qhead);
|
|
1285 |
if (err) return err;
|
|
1286 |
//TInt sz = CHUNKSIZE(q);
|
|
1287 |
TInt sz = qhead & ~(INUSE_BITS);
|
|
1288 |
if (!(qhead & CINUSE_BIT))
|
|
1289 |
{
|
|
1290 |
//Walk(wi, CHUNK2MEM(q), sz, EGoodFreeCell, EDougLeaAllocator); // Introduce DL free buffer to the walk function
|
|
1291 |
shouldContinue = (*aCallbackFn)(*this, aContext, EDlaFreeCell, CHUNK2MEM(q), sz);
|
|
1292 |
if (!shouldContinue) return KErrNone;
|
|
1293 |
}
|
|
1294 |
else
|
|
1295 |
{
|
|
1296 |
//Walk(wi, CHUNK2MEM(q), (sz- CHUNK_OVERHEAD), EGoodAllocatedCell, EDougLeaAllocator); // Introduce DL allocated buffer to the walk function
|
|
1297 |
TLinAddr addr = CHUNK2MEM(q);
|
|
1298 |
TInt size = sz - CHUNK_OVERHEAD;
|
|
1299 |
if (iAllocatorType == EUdebHybridHeap)
|
|
1300 |
{
|
|
1301 |
size -= 8;
|
|
1302 |
addr += 8;
|
|
1303 |
}
|
|
1304 |
shouldContinue = (*aCallbackFn)(*this, aContext, EDlaAllocation, addr, size);
|
|
1305 |
if (!shouldContinue) return KErrNone;
|
|
1306 |
}
|
|
1307 |
// This is q = NEXT_CHUNK(q) expanded
|
|
1308 |
q = q + sz;
|
|
1309 |
}
|
|
1310 |
return KErrNone;
|
|
1311 |
}
|
|
1312 |
|
|
1313 |
TInt RAllocatorHelper::TreeWalk(TUint32 aSlabRoot, TInt aSlabType, TWalkFunc3 aCallbackFn, TAny* aContext, TBool& shouldContinue)
|
|
1314 |
{
|
|
1315 |
const TSlabType type = (TSlabType)aSlabType;
|
|
1316 |
|
|
1317 |
TUint32 s = 0;
|
|
1318 |
TInt err = ReadWord(aSlabRoot, s);
|
|
1319 |
if (err) return err;
|
|
1320 |
//slab* s = *root;
|
|
1321 |
if (!s)
|
|
1322 |
return KErrNone;
|
|
1323 |
|
|
1324 |
for (;;)
|
|
1325 |
{
|
|
1326 |
//slab* c;
|
|
1327 |
//while ((c = s->iChild1) != 0)
|
|
1328 |
// s = c; // walk down left side to end
|
|
1329 |
TUint32 c;
|
|
1330 |
for(;;)
|
|
1331 |
{
|
|
1332 |
err = ReadWord(s + KSlabChild1Offset, c);
|
|
1333 |
if (err) return err;
|
|
1334 |
if (c == 0) break;
|
|
1335 |
else s = c;
|
|
1336 |
}
|
|
1337 |
for (;;)
|
|
1338 |
{
|
|
1339 |
//TODOf(s, i, wi);
|
|
1340 |
//TODO __HEAP_CORRUPTED_TEST_STATIC
|
|
1341 |
TUint32 h;
|
|
1342 |
err = ReadWord(s, h); // = aSlab->iHeader;
|
|
1343 |
if (err) return err;
|
|
1344 |
TUint32 size = (h&0x0003f000)>>12; //SlabHeaderSize(h);
|
|
1345 |
TUint debugheadersize = 0;
|
|
1346 |
if (iAllocatorType == EUdebHybridHeap) debugheadersize = 8;
|
|
1347 |
TUint32 usedCount = (((h&0x0ffc0000)>>18) + 4) / size; // (SlabHeaderUsedm4(h) + 4) / size;
|
|
1348 |
switch (type)
|
|
1349 |
{
|
|
1350 |
case ESlabFullInfo:
|
|
1351 |
{
|
|
1352 |
TUint32 count = usedCount;
|
|
1353 |
TUint32 i = 0;
|
|
1354 |
while ( i < count )
|
|
1355 |
{
|
|
1356 |
TUint32 addr = s + KSlabPayloadOffset + i*size; //&aSlab->iPayload[i*size];
|
|
1357 |
shouldContinue = (*aCallbackFn)(*this, aContext, ESlabAllocation, addr + debugheadersize, size - debugheadersize);
|
|
1358 |
if (!shouldContinue) return KErrNone;
|
|
1359 |
i++;
|
|
1360 |
}
|
|
1361 |
break;
|
|
1362 |
}
|
|
1363 |
case ESlabPartialInfo:
|
|
1364 |
{
|
|
1365 |
//TODO __HEAP_CORRUPTED_TEST_STATIC
|
|
1366 |
TUint32 count = KMaxSlabPayload / size;
|
|
1367 |
TUint32 freeOffset = (h & 0xff) << 2;
|
|
1368 |
if (freeOffset == 0)
|
|
1369 |
{
|
|
1370 |
// TODO Shouldn't happen for a slab on the partial list
|
|
1371 |
}
|
|
1372 |
memset(iTempSlabBitmap, 1, KTempBitmapSize); // Everything defaults to in use
|
|
1373 |
TUint wildernessCount = count - usedCount;
|
|
1374 |
while (freeOffset)
|
|
1375 |
{
|
|
1376 |
wildernessCount--;
|
|
1377 |
TInt idx = (freeOffset-KSlabPayloadOffset)/size;
|
|
1378 |
LOG("iTempSlabBitmap freeOffset %d index %d", freeOffset, idx);
|
|
1379 |
iTempSlabBitmap[idx] = 0; // Mark it as free
|
|
1380 |
|
|
1381 |
TUint32 addr = s + freeOffset;
|
|
1382 |
TUint8 nextCell = 0;
|
|
1383 |
err = ReadByte(addr, nextCell);
|
|
1384 |
if (err) return err;
|
|
1385 |
freeOffset = ((TUint32)nextCell) << 2;
|
|
1386 |
}
|
|
1387 |
memset(iTempSlabBitmap + count - wildernessCount, 0, wildernessCount); // Mark the wilderness as free
|
|
1388 |
for (TInt i = 0; i < count; i++)
|
|
1389 |
{
|
|
1390 |
TLinAddr addr = s + KSlabPayloadOffset + i*size;
|
|
1391 |
if (iTempSlabBitmap[i])
|
|
1392 |
{
|
|
1393 |
// In use
|
|
1394 |
shouldContinue = (*aCallbackFn)(*this, aContext, ESlabAllocation, addr + debugheadersize, size - debugheadersize);
|
|
1395 |
}
|
|
1396 |
else
|
|
1397 |
{
|
|
1398 |
// Free
|
|
1399 |
shouldContinue = (*aCallbackFn)(*this, aContext, ESlabFreeCell, addr, size);
|
|
1400 |
}
|
|
1401 |
if (!shouldContinue) return KErrNone;
|
|
1402 |
}
|
|
1403 |
break;
|
|
1404 |
}
|
|
1405 |
case ESlabEmptyInfo:
|
|
1406 |
{
|
|
1407 |
// Check which slabs of this page are empty
|
|
1408 |
TUint32 pageAddr = ROUND_DOWN(s, KPageSize);
|
|
1409 |
TUint32 headerForPage = 0;
|
|
1410 |
err = ReadWord(pageAddr, headerForPage);
|
|
1411 |
if (err) return err;
|
|
1412 |
TUint32 slabHeaderPageMap = (headerForPage & 0x00000f00)>>8; // SlabHeaderPagemap(unsigned h)
|
|
1413 |
for (TInt slabIdx = 0; slabIdx < 4; slabIdx++)
|
|
1414 |
{
|
|
1415 |
if (slabHeaderPageMap & (1<<slabIdx))
|
|
1416 |
{
|
|
1417 |
TUint32 addr = pageAddr + SLABSIZE*slabIdx + KSlabPayloadOffset; //&aSlab->iPayload[i*size];
|
|
1418 |
shouldContinue = (*aCallbackFn)(*this, aContext, ESlabFreeSlab, addr, KMaxSlabPayload);
|
|
1419 |
if (!shouldContinue) return KErrNone;
|
|
1420 |
}
|
|
1421 |
}
|
|
1422 |
break;
|
|
1423 |
}
|
|
1424 |
}
|
|
1425 |
|
|
1426 |
//c = s->iChild2;
|
|
1427 |
err = ReadWord(s + KSlabChild2Offset, c);
|
|
1428 |
if (err) return err;
|
|
1429 |
|
|
1430 |
if (c)
|
|
1431 |
{ // one step down right side, now try and walk down left
|
|
1432 |
s = c;
|
|
1433 |
break;
|
|
1434 |
}
|
|
1435 |
for (;;)
|
|
1436 |
{ // loop to walk up right side
|
|
1437 |
TUint32 pp = 0;
|
|
1438 |
err = ReadWord(s + KSlabParentOffset, pp);
|
|
1439 |
if (err) return err;
|
|
1440 |
//slab** pp = s->iParent;
|
|
1441 |
if (pp == aSlabRoot)
|
|
1442 |
return KErrNone;
|
|
1443 |
#define SlabFor(x) ROUND_DOWN(x, SLABSIZE)
|
|
1444 |
s = SlabFor(pp);
|
|
1445 |
//if (pp == &s->iChild1)
|
|
1446 |
if (pp == s + KSlabChild1Offset)
|
|
1447 |
break;
|
|
1448 |
}
|
|
1449 |
}
|
|
1450 |
}
|
|
1451 |
}
|
|
1452 |
|
|
1453 |
HUEXPORT_C TInt RAllocatorHelper::SizeForCellType(TExtendedCellType aType)
|
|
1454 |
{
|
|
1455 |
if (aType & EBadnessMask) return KErrArgument;
|
|
1456 |
if (aType == EAllocationMask) return AllocatedSize();
|
|
1457 |
|
|
1458 |
if (iAllocatorType == EUdebOldRHeap || iAllocatorType == EUrelOldRHeap)
|
|
1459 |
{
|
|
1460 |
switch (aType)
|
|
1461 |
{
|
|
1462 |
case EHeapAllocation:
|
|
1463 |
return AllocatedSize();
|
|
1464 |
case EHeapFreeCell:
|
|
1465 |
case EFreeMask:
|
|
1466 |
return CommittedFreeSpace();
|
|
1467 |
default:
|
|
1468 |
return KErrNotSupported;
|
|
1469 |
}
|
|
1470 |
}
|
|
1471 |
else if (iAllocatorType == EUrelHybridHeap || iAllocatorType == EUdebHybridHeap)
|
|
1472 |
{
|
|
1473 |
TInt err = CheckValid(EHybridStats);
|
|
1474 |
if (err) return err;
|
|
1475 |
|
|
1476 |
switch (aType)
|
|
1477 |
{
|
|
1478 |
case EHeapAllocation:
|
|
1479 |
case EHeapFreeCell:
|
|
1480 |
return KErrNotSupported;
|
|
1481 |
case EDlaAllocation:
|
|
1482 |
return iInfo->iDlaAllocsSize;
|
|
1483 |
case EPageAllocation:
|
|
1484 |
return iInfo->iPageAllocsSize;
|
|
1485 |
case ESlabAllocation:
|
|
1486 |
return iInfo->iSlabAllocsSize;
|
|
1487 |
case EDlaFreeCell:
|
|
1488 |
return iInfo->iDlaFreeSize;
|
|
1489 |
case ESlabFreeCell:
|
|
1490 |
return iInfo->iSlabFreeCellSize;
|
|
1491 |
case ESlabFreeSlab:
|
|
1492 |
return iInfo->iSlabFreeSlabSize;
|
|
1493 |
case EFreeMask:
|
|
1494 |
// Note this isn't the same as asking for CommittedFreeSpace(). SizeForCellType(EFreeMask) may include decommitted pages that lie inside a free cell
|
|
1495 |
return iInfo->iDlaFreeSize + iInfo->iSlabFreeCellSize + iInfo->iSlabFreeSlabSize;
|
|
1496 |
default:
|
|
1497 |
return KErrNotSupported;
|
|
1498 |
}
|
|
1499 |
}
|
|
1500 |
else
|
|
1501 |
{
|
|
1502 |
return KErrNotSupported;
|
|
1503 |
}
|
|
1504 |
}
|
|
1505 |
|
|
1506 |
HUEXPORT_C TInt RAllocatorHelper::CountForCellType(TExtendedCellType aType)
|
|
1507 |
{
|
|
1508 |
if (aType & EBadnessMask) return KErrArgument;
|
|
1509 |
if (aType == EAllocationMask) return AllocationCount();
|
|
1510 |
|
|
1511 |
if (iAllocatorType == EUdebOldRHeap || iAllocatorType == EUrelOldRHeap)
|
|
1512 |
{
|
|
1513 |
switch (aType)
|
|
1514 |
{
|
|
1515 |
case EHeapAllocation:
|
|
1516 |
return AllocationCount();
|
|
1517 |
case EHeapFreeCell:
|
|
1518 |
case EFreeMask:
|
|
1519 |
{
|
|
1520 |
TInt err = CheckValid(ECommittedFreeSpace);
|
|
1521 |
if (err) return err;
|
|
1522 |
return iInfo->iHeapFreeCellCount;
|
|
1523 |
}
|
|
1524 |
default:
|
|
1525 |
return KErrNotSupported;
|
|
1526 |
}
|
|
1527 |
}
|
|
1528 |
else if (iAllocatorType == EUrelHybridHeap || iAllocatorType == EUdebHybridHeap)
|
|
1529 |
{
|
|
1530 |
TInt err = CheckValid(EHybridStats);
|
|
1531 |
if (err) return err;
|
|
1532 |
|
|
1533 |
switch (aType)
|
|
1534 |
{
|
|
1535 |
case EHeapAllocation:
|
|
1536 |
case EHeapFreeCell:
|
|
1537 |
return KErrNotSupported;
|
|
1538 |
case EDlaAllocation:
|
|
1539 |
return iInfo->iDlaAllocsCount;
|
|
1540 |
case EPageAllocation:
|
|
1541 |
return iInfo->iPageAllocsCount;
|
|
1542 |
case ESlabAllocation:
|
|
1543 |
return iInfo->iSlabAllocsCount;
|
|
1544 |
case EDlaFreeCell:
|
|
1545 |
return iInfo->iDlaFreeCount;
|
|
1546 |
case ESlabFreeCell:
|
|
1547 |
return iInfo->iSlabFreeCellCount;
|
|
1548 |
case ESlabFreeSlab:
|
|
1549 |
return iInfo->iSlabFreeSlabCount;
|
|
1550 |
case EFreeMask:
|
|
1551 |
// This isn't a hugely meaningful value, but if that's what they asked for...
|
|
1552 |
return iInfo->iDlaFreeCount + iInfo->iSlabFreeCellCount + iInfo->iSlabFreeSlabCount;
|
|
1553 |
default:
|
|
1554 |
return KErrNotSupported;
|
|
1555 |
}
|
|
1556 |
}
|
|
1557 |
else
|
|
1558 |
{
|
|
1559 |
return KErrNotSupported;
|
|
1560 |
}
|
|
1561 |
}
|
|
1562 |
|
|
1563 |
HUEXPORT_C TBool LtkUtils::RAllocatorHelper::AllocatorIsUdeb() const
|
|
1564 |
{
|
|
1565 |
return iAllocatorType == EUdebOldRHeap || iAllocatorType == EUdebHybridHeap;
|
|
1566 |
}
|
|
1567 |
|
|
1568 |
|
|
1569 |
HUEXPORT_C const TDesC& LtkUtils::RAllocatorHelper::Description() const
|
|
1570 |
{
|
|
1571 |
_LIT(KRHeap, "RHeap");
|
|
1572 |
_LIT(KRHybridHeap, "RHybridHeap");
|
|
1573 |
_LIT(KUnknown, "Unknown");
|
|
1574 |
switch (iAllocatorType)
|
|
1575 |
{
|
|
1576 |
case EUrelOldRHeap:
|
|
1577 |
case EUdebOldRHeap:
|
|
1578 |
return KRHeap;
|
|
1579 |
case EUrelHybridHeap:
|
|
1580 |
case EUdebHybridHeap:
|
|
1581 |
return KRHybridHeap;
|
|
1582 |
case EAllocator:
|
|
1583 |
case EUnknown:
|
|
1584 |
default:
|
|
1585 |
return KUnknown;
|
|
1586 |
}
|
|
1587 |
}
|
|
1588 |
|
|
1589 |
#ifdef __KERNEL_MODE__
|
|
1590 |
|
|
1591 |
DChunk* LtkUtils::RAllocatorHelper::OpenUnderlyingChunk()
|
|
1592 |
{
|
|
1593 |
// Enter and leave in CS and with no locks held. On exit the returned DChunk has been Open()ed.
|
|
1594 |
TInt err = iChunk->Open();
|
|
1595 |
if (err) return NULL;
|
|
1596 |
return iChunk;
|
|
1597 |
}
|
|
1598 |
|
|
1599 |
DChunk* LtkUtils::RKernelSideAllocatorHelper::OpenUnderlyingChunk()
|
|
1600 |
{
|
|
1601 |
if (iAllocatorType != EUrelOldRHeap && iAllocatorType != EUdebOldRHeap && iAllocatorType != EUrelHybridHeap && iAllocatorType != EUdebHybridHeap) return NULL;
|
|
1602 |
// Note RKernelSideAllocatorHelper doesn't use or access RAllocatorHelper::iChunk, because we figure out the chunk handle in a different way.
|
|
1603 |
// It is for this reason that iChunk is private, to remove temptation
|
|
1604 |
|
|
1605 |
// Enter and leave in CS and with no locks held. On exit the returned DChunk has been Open()ed.
|
|
1606 |
TUint32 chunkHandle = 0;
|
|
1607 |
TInt err = ReadData(iAllocatorAddress + _FOFF(RHackHeap, iChunkHandle), &chunkHandle, sizeof(TUint32));
|
|
1608 |
if (err) return NULL;
|
|
1609 |
|
|
1610 |
NKern::LockSystem();
|
|
1611 |
DChunk* result = (DChunk*)Kern::ObjectFromHandle(iThread, chunkHandle, EChunk);
|
|
1612 |
if (result && result->Open() != KErrNone)
|
|
1613 |
{
|
|
1614 |
result = NULL;
|
|
1615 |
}
|
|
1616 |
NKern::UnlockSystem();
|
|
1617 |
return result;
|
|
1618 |
}
|
|
1619 |
|
|
1620 |
LtkUtils::RAllocatorHelper::TType LtkUtils::RAllocatorHelper::GetType() const
|
|
1621 |
{
|
|
1622 |
switch (iAllocatorType)
|
|
1623 |
{
|
|
1624 |
case EUrelOldRHeap:
|
|
1625 |
case EUdebOldRHeap:
|
|
1626 |
return ETypeRHeap;
|
|
1627 |
case EUrelHybridHeap:
|
|
1628 |
case EUdebHybridHeap:
|
|
1629 |
return ETypeRHybridHeap;
|
|
1630 |
case EAllocator:
|
|
1631 |
case EUnknown:
|
|
1632 |
default:
|
|
1633 |
return ETypeUnknown;
|
|
1634 |
}
|
|
1635 |
}
|
|
1636 |
|
|
1637 |
#else
|
|
1638 |
|
|
1639 |
TInt LtkUtils::RAllocatorHelper::EuserIsUdeb()
|
|
1640 |
{
|
|
1641 |
TAny* buf = User::Alloc(4096);
|
|
1642 |
if (!buf) return KErrNoMemory;
|
|
1643 |
RAllocator* dummyHeap = UserHeap::FixedHeap(buf, 4096, 4, ETrue);
|
|
1644 |
if (!dummyHeap) return KErrNoMemory; // Don't think this can happen
|
|
1645 |
|
|
1646 |
dummyHeap->__DbgSetAllocFail(RAllocator::EFailNext, 1);
|
|
1647 |
TAny* ptr = dummyHeap->Alloc(4);
|
|
1648 |
// Because we specified singleThreaded=ETrue we can allow dummyHeap to just go out of scope here
|
|
1649 |
User::Free(buf);
|
|
1650 |
|
|
1651 |
if (ptr)
|
|
1652 |
{
|
|
1653 |
// Clearly the __DbgSetAllocFail had no effect so we must be urel
|
|
1654 |
// We don't need to free ptr because it came from the dummy heap
|
|
1655 |
return EFalse;
|
|
1656 |
}
|
|
1657 |
else
|
|
1658 |
{
|
|
1659 |
return ETrue;
|
|
1660 |
}
|
|
1661 |
}
|
|
1662 |
|
|
1663 |
#ifndef STANDALONE_ALLOCHELPER
|
|
1664 |
|
|
1665 |
#include <fshell/ltkutils.h>
|
|
1666 |
HUEXPORT_C void LtkUtils::MakeHeapCellInvisible(TAny* aCell)
|
|
1667 |
{
|
|
1668 |
RAllocatorHelper helper;
|
|
1669 |
TInt err = helper.Open(&User::Allocator());
|
|
1670 |
if (err == KErrNone)
|
|
1671 |
{
|
|
1672 |
helper.SetCellNestingLevel(aCell, -1);
|
|
1673 |
helper.Close();
|
|
1674 |
}
|
|
1675 |
}
|
|
1676 |
#endif // STANDALONE_ALLOCHELPER
|
|
1677 |
|
|
1678 |
#endif
|