memspy/Driver/Shared/heaputils.cpp
branchRCL_3
changeset 49 7fdc9a71d314
equal deleted inserted replaced
46:e26895079d7c 49:7fdc9a71d314
       
     1 // heaputils.cpp
       
     2 // 
       
     3 // Copyright (c) 2010 Accenture. All rights reserved.
       
     4 // This component and the accompanying materials are made available
       
     5 // under the terms of the "Eclipse Public License v1.0"
       
     6 // which accompanies this distribution, and is available
       
     7 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
       
     8 // 
       
     9 // Initial Contributors:
       
    10 // Accenture - Initial contribution
       
    11 //
       
    12 #ifdef TEST_HYBRIDHEAP_ASSERTS
       
    13 #define private public
       
    14 #include <e32def.h>
       
    15 #include "slab.h"
       
    16 #include "page_alloc.h"
       
    17 #include "heap_hybrid.h"
       
    18 #endif
       
    19 
       
    20 #include "heaputils.h"
       
    21 
       
    22 #ifdef __KERNEL_MODE__
       
    23 
       
    24 #include <kern_priv.h>
       
    25 #define MEM Kern
       
    26 __ASSERT_COMPILE(sizeof(LtkUtils::RKernelSideAllocatorHelper) == 10*4);
       
    27 #define KERN_ENTER_CS() NKern::ThreadEnterCS()
       
    28 #define KERN_LEAVE_CS() NKern::ThreadLeaveCS()
       
    29 #define LOG(args...)
       
    30 #define HUEXPORT_C
       
    31 #else
       
    32 
       
    33 #include <e32std.h>
       
    34 #define MEM User
       
    35 #define KERN_ENTER_CS()
       
    36 #define KERN_LEAVE_CS()
       
    37 //#include <e32debug.h>
       
    38 //#define LOG(args...) RDebug::Printf(args)
       
    39 #define LOG(args...)
       
    40 
       
    41 #ifdef STANDALONE_ALLOCHELPER
       
    42 #define HUEXPORT_C
       
    43 #else
       
    44 #define HUEXPORT_C EXPORT_C
       
    45 #endif
       
    46 
       
    47 #endif // __KERNEL_MODE__
       
    48 
       
    49 using LtkUtils::RAllocatorHelper;
       
    50 const TUint KPageSize = 4096;
       
    51 __ASSERT_COMPILE(sizeof(RAllocatorHelper) == 9*4);
       
    52 
       
    53 // RAllocatorHelper
       
    54 
       
    55 HUEXPORT_C RAllocatorHelper::RAllocatorHelper()
       
    56 	: iAllocatorAddress(0), iAllocatorType(EUnknown), iInfo(NULL), iValidInfo(0), iTempSlabBitmap(NULL), iPageCache(NULL), iPageCacheAddr(0)
       
    57 #ifdef __KERNEL_MODE__
       
    58 	, iChunk(NULL)
       
    59 #endif
       
    60 	{
       
    61 	}
       
    62 
       
    63 namespace LtkUtils
       
    64 	{
       
    65 	class THeapInfo
       
    66 		{
       
    67 	public:
       
    68 		THeapInfo()
       
    69 			{
       
    70 			ClearStats();
       
    71 			}
       
    72 
       
    73 		void ClearStats()
       
    74 			{
       
    75 			memclr(this, sizeof(THeapInfo));
       
    76 			}
       
    77 
       
    78 		TInt iAllocatedSize; // number of bytes in allocated cells (excludes free cells, cell header overhead)
       
    79 		TInt iCommittedSize; // amount of memory actually committed (includes cell header overhead, gaps smaller than an MMU page)
       
    80 		TInt iAllocationCount; // number of allocations currently
       
    81 		TInt iMaxCommittedSize; // or thereabouts
       
    82 		TInt iMinCommittedSize;
       
    83 		TInt iUnusedPages;
       
    84 		TInt iCommittedFreeSpace;
       
    85 		// Heap-only stats
       
    86 		TInt iHeapFreeCellCount;
       
    87 		// Hybrid-only stats
       
    88 		TInt iDlaAllocsSize;
       
    89 		TInt iDlaAllocsCount;
       
    90 		TInt iDlaFreeSize;
       
    91 		TInt iDlaFreeCount;
       
    92 		TInt iSlabAllocsSize;
       
    93 		TInt iSlabAllocsCount;
       
    94 		TInt iPageAllocsSize;
       
    95 		TInt iPageAllocsCount;
       
    96 		TInt iSlabFreeCellSize;
       
    97 		TInt iSlabFreeCellCount;
       
    98 		TInt iSlabFreeSlabSize;
       
    99 		TInt iSlabFreeSlabCount;
       
   100 		};
       
   101 	}
       
   102 
       
   103 const TInt KTempBitmapSize = 256; // KMaxSlabPayload / mincellsize, technically. Close enough.
       
   104 
       
   105 #ifdef __KERNEL_MODE__
       
   106 
       
   107 TInt RAllocatorHelper::OpenKernelHeap()
       
   108 	{
       
   109 	_LIT(KName, "SvHeap");
       
   110 	NKern::ThreadEnterCS();
       
   111 	DObjectCon* chunkContainer = Kern::Containers()[EChunk];
       
   112 	chunkContainer->Wait();
       
   113 	const TInt chunkCount = chunkContainer->Count();
       
   114 	DChunk* foundChunk = NULL;
       
   115 	for(TInt i=0; i<chunkCount; i++)
       
   116 		{
       
   117 		DChunk* chunk = (DChunk*)(*chunkContainer)[i];
       
   118 		if (chunk->NameBuf() && chunk->NameBuf()->Find(KName) != KErrNotFound)
       
   119 			{
       
   120 			// Found it. No need to open it, we can be fairly confident the kernel heap isn't going to disappear from under us
       
   121 			foundChunk = chunk;
       
   122 			break;
       
   123 			}
       
   124 		}
       
   125 	iChunk = foundChunk;
       
   126     chunkContainer->Signal();
       
   127 #ifdef __WINS__
       
   128 	TInt err = OpenChunkHeap((TLinAddr)foundChunk->Base(), 0); // It looks like DChunk::iBase/DChunk::iFixedBase should both be ok for the kernel chunk
       
   129 #else
       
   130 	// Copied from P::KernelInfo
       
   131 	const TRomHeader& romHdr=Epoc::RomHeader();
       
   132 	const TRomEntry* primaryEntry=(const TRomEntry*)Kern::SuperPage().iPrimaryEntry;
       
   133 	const TRomImageHeader* primaryImageHeader=(const TRomImageHeader*)primaryEntry->iAddressLin;
       
   134 	TLinAddr stack = romHdr.iKernDataAddress + Kern::RoundToPageSize(romHdr.iTotalSvDataSize);
       
   135 	TLinAddr heap = stack + Kern::RoundToPageSize(primaryImageHeader->iStackSize);
       
   136 	TInt err = OpenChunkHeap(heap, 0); // aChunkMaxSize is only used for trying the middle of the chunk for hybrid allocatorness, and the kernel heap doesn't use that (thankfully). So we can safely pass in zero.
       
   137 
       
   138 #endif
       
   139 	if (!err) err = FinishConstruction();
       
   140 	NKern::ThreadLeaveCS();
       
   141 	return err;
       
   142 	}
       
   143 
       
   144 #else
       
   145 
       
   146 HUEXPORT_C TInt RAllocatorHelper::Open(RAllocator* aAllocator)
       
   147 	{
       
   148 	iAllocatorAddress = (TLinAddr)aAllocator;
       
   149 	TInt udeb = EuserIsUdeb();
       
   150 	if (udeb < 0) return udeb; // error
       
   151 
       
   152 	TInt err = IdentifyAllocatorType(udeb);
       
   153 	if (!err)
       
   154 		{
       
   155 		err = FinishConstruction(); // Allocate everything up front
       
   156 		}
       
   157 	if (!err)
       
   158 		{
       
   159 		// We always stealth our own allocations, again to avoid tripping up allocator checks
       
   160 		SetCellNestingLevel(iInfo, -1);
       
   161 		SetCellNestingLevel(iTempSlabBitmap, -1);
       
   162 		SetCellNestingLevel(iPageCache, -1);
       
   163 		}
       
   164 	return err;
       
   165 	}
       
   166 
       
   167 #endif
       
   168 
       
   169 TInt RAllocatorHelper::FinishConstruction()
       
   170 	{
       
   171 	TInt err = KErrNone;
       
   172 	KERN_ENTER_CS();
       
   173 	if (!iInfo)
       
   174 		{
       
   175 		iInfo = new THeapInfo;
       
   176 		if (!iInfo) err = KErrNoMemory;
       
   177 		}
       
   178 	if (!err && !iTempSlabBitmap)
       
   179 		{
       
   180 		iTempSlabBitmap = (TUint8*)MEM::Alloc(KTempBitmapSize);
       
   181 		if (!iTempSlabBitmap) err = KErrNoMemory;
       
   182 		}
       
   183 	if (!err && !iPageCache)
       
   184 		{
       
   185 		iPageCache = MEM::Alloc(KPageSize);
       
   186 		if (!iPageCache) err = KErrNoMemory;
       
   187 		}
       
   188 
       
   189 	if (err)
       
   190 		{
       
   191 		delete iInfo;
       
   192 		iInfo = NULL;
       
   193 		MEM::Free(iTempSlabBitmap);
       
   194 		iTempSlabBitmap = NULL;
       
   195 		MEM::Free(iPageCache);
       
   196 		iPageCache = NULL;
       
   197 		}
       
   198 	KERN_LEAVE_CS();
       
   199 	return err;
       
   200 	}
       
   201 
       
   202 TInt RAllocatorHelper::ReadWord(TLinAddr aLocation, TUint32& aResult) const
       
   203 	{
       
   204 	// Check if we can satisfy the read from the cache
       
   205 	if (aLocation >= iPageCacheAddr)
       
   206 		{
       
   207 		TUint offset = aLocation - iPageCacheAddr;
       
   208 		if (offset < KPageSize)
       
   209 			{
       
   210 			aResult = ((TUint32*)iPageCache)[offset >> 2];
       
   211 			return KErrNone;
       
   212 			}
       
   213 		}
       
   214 
       
   215 	// If we reach here, not in page cache. Try and read in the new page
       
   216 	if (iPageCache)
       
   217 		{
       
   218 		TLinAddr pageAddr = aLocation & ~(KPageSize-1);
       
   219 		TInt err = ReadData(pageAddr, iPageCache, KPageSize);
       
   220 		if (!err)
       
   221 			{
       
   222 			iPageCacheAddr = pageAddr;
       
   223 			aResult = ((TUint32*)iPageCache)[(aLocation - iPageCacheAddr) >> 2];
       
   224 			return KErrNone;
       
   225 			}
       
   226 		}
       
   227 
       
   228 	// All else fails, try just reading it uncached
       
   229 	return ReadData(aLocation, &aResult, sizeof(TUint32));
       
   230 	}
       
   231 
       
   232 TInt RAllocatorHelper::ReadByte(TLinAddr aLocation, TUint8& aResult) const
       
   233 	{
       
   234 	// Like ReadWord but 8-bit
       
   235 
       
   236 	// Check if we can satisfy the read from the cache
       
   237 	if (aLocation >= iPageCacheAddr)
       
   238 		{
       
   239 		TUint offset = aLocation - iPageCacheAddr;
       
   240 		if (offset < KPageSize)
       
   241 			{
       
   242 			aResult = ((TUint8*)iPageCache)[offset];
       
   243 			return KErrNone;
       
   244 			}
       
   245 		}
       
   246 
       
   247 	// If we reach here, not in page cache. Try and read in the new page
       
   248 	if (iPageCache)
       
   249 		{
       
   250 		TLinAddr pageAddr = aLocation & ~(KPageSize-1);
       
   251 		TInt err = ReadData(pageAddr, iPageCache, KPageSize);
       
   252 		if (!err)
       
   253 			{
       
   254 			iPageCacheAddr = pageAddr;
       
   255 			aResult = ((TUint8*)iPageCache)[(aLocation - iPageCacheAddr)];
       
   256 			return KErrNone;
       
   257 			}
       
   258 		}
       
   259 
       
   260 	// All else fails, try just reading it uncached
       
   261 	return ReadData(aLocation, &aResult, sizeof(TUint8));
       
   262 	}
       
   263 
       
   264 
       
   265 TInt RAllocatorHelper::WriteWord(TLinAddr aLocation, TUint32 aWord)
       
   266 	{
       
   267 	// Invalidate the page cache if necessary
       
   268 	if (aLocation >= iPageCacheAddr && aLocation - iPageCacheAddr < KPageSize)
       
   269 		{
       
   270 		iPageCacheAddr = 0;
       
   271 		}
       
   272 
       
   273 	return WriteData(aLocation, &aWord, sizeof(TUint32));
       
   274 	}
       
   275 
       
   276 TInt RAllocatorHelper::ReadData(TLinAddr aLocation, TAny* aResult, TInt aSize) const
       
   277 	{
       
   278 	// RAllocatorHelper base class impl is for allocators in same address space, so just copy it
       
   279 	memcpy(aResult, (const TAny*)aLocation, aSize);
       
   280 	return KErrNone;
       
   281 	}
       
   282 
       
   283 TInt RAllocatorHelper::WriteData(TLinAddr aLocation, const TAny* aData, TInt aSize)
       
   284 	{
       
   285 	memcpy((TAny*)aLocation, aData, aSize);
       
   286 	return KErrNone;
       
   287 	}
       
   288 
       
   289 #ifdef __KERNEL_MODE__
       
   290 
       
   291 LtkUtils::RKernelSideAllocatorHelper::RKernelSideAllocatorHelper()
       
   292 	: iThread(NULL)
       
   293 	{}
       
   294 
       
   295 void LtkUtils::RKernelSideAllocatorHelper::Close()
       
   296 	{
       
   297 	NKern::ThreadEnterCS();
       
   298 	if (iThread)
       
   299 		{
       
   300 		iThread->Close(NULL);
       
   301 		}
       
   302 	iThread = NULL;
       
   303 	RAllocatorHelper::Close();
       
   304 	NKern::ThreadLeaveCS();
       
   305 	}
       
   306 
       
   307 TInt LtkUtils::RKernelSideAllocatorHelper::ReadData(TLinAddr aLocation, TAny* aResult, TInt aSize) const
       
   308 	{
       
   309 	return Kern::ThreadRawRead(iThread, (const TAny*)aLocation, aResult, aSize);
       
   310 	}
       
   311 
       
   312 TInt LtkUtils::RKernelSideAllocatorHelper::WriteData(TLinAddr aLocation, const TAny* aData, TInt aSize)
       
   313 	{
       
   314 	return Kern::ThreadRawWrite(iThread, (TAny*)aLocation, aData, aSize);
       
   315 	}
       
   316 
       
   317 TInt LtkUtils::RKernelSideAllocatorHelper::TryLock()
       
   318 	{
       
   319 	return KErrNotSupported;
       
   320 	}
       
   321 
       
   322 void LtkUtils::RKernelSideAllocatorHelper::TryUnlock()
       
   323 	{
       
   324 	// Not supported
       
   325 	}
       
   326 
       
   327 TInt LtkUtils::RKernelSideAllocatorHelper::OpenUserHeap(TUint aThreadId, TLinAddr aAllocatorAddress, TBool aEuserIsUdeb)
       
   328 	{
       
   329 	NKern::ThreadEnterCS();
       
   330 	DObjectCon* threads = Kern::Containers()[EThread];
       
   331 	threads->Wait();
       
   332 	iThread = Kern::ThreadFromId(aThreadId);
       
   333 	if (iThread && iThread->Open() != KErrNone)
       
   334 		{
       
   335 		// Failed to open
       
   336 		iThread = NULL;
       
   337 		}
       
   338 	threads->Signal();
       
   339 	NKern::ThreadLeaveCS();
       
   340 	if (!iThread) return KErrNotFound;
       
   341 	iAllocatorAddress = aAllocatorAddress;
       
   342 	TInt err = IdentifyAllocatorType(aEuserIsUdeb);
       
   343 	if (err) Close();
       
   344 	return err;
       
   345 	}
       
   346 
       
   347 #endif // __KERNEL_MODE__
       
   348 
       
   349 TInt RAllocatorHelper::OpenChunkHeap(TLinAddr aChunkBase, TInt aChunkMaxSize)
       
   350 	{
       
   351 	iAllocatorAddress = aChunkBase;
       
   352 #ifdef __KERNEL_MODE__
       
   353 	// Must be in CS
       
   354 	// Assumes that this only ever gets called for the kernel heap. Otherwise goes through RKernelSideAllocatorHelper::OpenUserHeap.
       
   355 	TInt udeb = EFalse; // We can't figure this out until after we've got the heap
       
   356 	TBool isTheKernelHeap = ETrue;
       
   357 #else
       
   358 	// Assumes the chunk isn't the kernel heap. It's not a good idea to try messing with the kernel heap from user side...
       
   359 	TInt udeb = EuserIsUdeb();
       
   360 	if (udeb < 0) return udeb; // error
       
   361     TBool isTheKernelHeap = EFalse;
       
   362 #endif
       
   363 
       
   364 	TInt err = IdentifyAllocatorType(udeb, isTheKernelHeap);
       
   365 	if (err == KErrNone && iAllocatorType == EAllocator)
       
   366 		{
       
   367 		// We've no reason to assume it's an allocator because we don't know the iAllocatorAddress actually is an RAllocator*
       
   368 		err = KErrNotFound;
       
   369 		}
       
   370 	if (err && aChunkMaxSize > 0)
       
   371 		{
       
   372 		TInt oldErr = err;
       
   373 		TAllocatorType oldType = iAllocatorType;
       
   374 		// Try middle of chunk, in case it's an RHybridHeap
       
   375 		iAllocatorAddress += aChunkMaxSize / 2;
       
   376 		err = IdentifyAllocatorType(udeb, isTheKernelHeap);
       
   377 		if (err || iAllocatorType == EAllocator)
       
   378 			{
       
   379 			// No better than before
       
   380 			iAllocatorAddress = aChunkBase;
       
   381 			iAllocatorType = oldType;
       
   382 			err = oldErr;
       
   383 			}
       
   384 		}
       
   385 #ifdef __KERNEL_MODE__
       
   386 	if (err == KErrNone)
       
   387 		{
       
   388 		// Now we know the allocator, we can figure out the udeb-ness
       
   389 		RAllocator* kernelAllocator = reinterpret_cast<RAllocator*>(iAllocatorAddress);
       
   390 		kernelAllocator->DebugFunction(RAllocator::ESetFail, (TAny*)9999, (TAny*)0); // Use an invalid fail reason - this should have no effect on the operation of the heap
       
   391 		TInt err = kernelAllocator->DebugFunction(7, NULL, NULL); // 7 is RAllocator::TAllocDebugOp::EGetFail
       
   392 		if (err == 9999)
       
   393 			{
       
   394 			// udeb new hybrid heap
       
   395 			udeb = ETrue;
       
   396 			}
       
   397 		else if (err == KErrNotSupported)
       
   398 			{
       
   399 			// Old heap - fall back to slightly nasty non-thread-safe method
       
   400 			kernelAllocator->DebugFunction(RAllocator::ESetFail, (TAny*)RAllocator::EFailNext, (TAny*)1);
       
   401 			TAny* res = Kern::Alloc(4);
       
   402 			if (!res) udeb = ETrue;
       
   403 			Kern::Free(res);
       
   404 			}
       
   405 		else
       
   406 			{
       
   407 			// it's new urel
       
   408 			}
       
   409 
       
   410 		// Put everything back
       
   411 		kernelAllocator->DebugFunction(RAllocator::ESetFail, (TAny*)RAllocator::ENone, (TAny*)0);
       
   412 		// And update the type now we know the udeb-ness for certain
       
   413 		err = IdentifyAllocatorType(udeb, isTheKernelHeap);
       
   414 		}
       
   415 #endif
       
   416 	return err;
       
   417 	}
       
   418 
       
   419 
       
   420 // The guts of RAllocatorHelper
       
   421 
       
   422 enum TWhatToGet
       
   423 	{
       
   424 	ECommitted = 1,
       
   425 	EAllocated = 2,
       
   426 	ECount = 4,
       
   427 	EMaxSize = 8,
       
   428 	EUnusedPages = 16,
       
   429 	ECommittedFreeSpace = 32,
       
   430 	EMinSize = 64,
       
   431 	EHybridStats = 128,
       
   432 	};
       
   433 
       
   434 class RHackAllocator : public RAllocator
       
   435 	{
       
   436 public:
       
   437 	using RAllocator::iHandles;
       
   438 	using RAllocator::iTotalAllocSize;
       
   439 	using RAllocator::iCellCount;
       
   440 	};
       
   441 
       
   442 class RHackHeap : public RHeap
       
   443 	{
       
   444 public:
       
   445 	// Careful, only allowed to use things that are still in the new RHeap, and are still in the same place
       
   446 	using RHeap::iMaxLength;
       
   447 	using RHeap::iChunkHandle;
       
   448 	using RHeap::iLock;
       
   449 	using RHeap::iBase;
       
   450 	using RHeap::iAlign;
       
   451 	using RHeap::iTop;
       
   452 	};
       
   453 
       
   454 const TInt KChunkSizeOffset = 30*4;
       
   455 const TInt KPageMapOffset = 141*4;
       
   456 //const TInt KDlOnlyOffset = 33*4;
       
   457 const TInt KMallocStateOffset = 34*4;
       
   458 const TInt KMallocStateTopSizeOffset = 3*4;
       
   459 const TInt KMallocStateTopOffset = 5*4;
       
   460 const TInt KMallocStateSegOffset = 105*4;
       
   461 const TInt KUserHybridHeapSize = 186*4;
       
   462 const TInt KSparePageOffset = 167*4;
       
   463 const TInt KPartialPageOffset = 165*4;
       
   464 const TInt KFullSlabOffset = 166*4;
       
   465 const TInt KSlabAllocOffset = 172*4;
       
   466 const TInt KSlabParentOffset = 1*4;
       
   467 const TInt KSlabChild1Offset = 2*4;
       
   468 const TInt KSlabChild2Offset = 3*4;
       
   469 const TInt KSlabPayloadOffset = 4*4;
       
   470 const TInt KSlabsetSize = 4;
       
   471 
       
   472 #ifdef TEST_HYBRIDHEAP_ASSERTS
       
   473 __ASSERT_COMPILE(_FOFF(RHybridHeap, iChunkSize) == KChunkSizeOffset);
       
   474 __ASSERT_COMPILE(_FOFF(RHybridHeap, iPageMap) == KPageMapOffset);
       
   475 __ASSERT_COMPILE(_FOFF(RHybridHeap, iGlobalMallocState) == KMallocStateOffset);
       
   476 __ASSERT_COMPILE(sizeof(malloc_state) == 107*4);
       
   477 __ASSERT_COMPILE(_FOFF(malloc_state, iTopSize) == KMallocStateTopSizeOffset);
       
   478 __ASSERT_COMPILE(_FOFF(malloc_state, iTop) == KMallocStateTopOffset);
       
   479 __ASSERT_COMPILE(_FOFF(malloc_state, iSeg) == KMallocStateSegOffset);
       
   480 __ASSERT_COMPILE(sizeof(RHybridHeap) == KUserHybridHeapSize);
       
   481 __ASSERT_COMPILE(_FOFF(RHybridHeap, iSparePage) == KSparePageOffset);
       
   482 __ASSERT_COMPILE(_FOFF(RHybridHeap, iPartialPage) == KPartialPageOffset);
       
   483 __ASSERT_COMPILE(_FOFF(RHybridHeap, iSlabAlloc) == KSlabAllocOffset);
       
   484 __ASSERT_COMPILE(_FOFF(slab, iParent) == KSlabParentOffset);
       
   485 __ASSERT_COMPILE(_FOFF(slab, iChild1) == KSlabChild1Offset);
       
   486 __ASSERT_COMPILE(_FOFF(slab, iChild2) == KSlabChild2Offset);
       
   487 __ASSERT_COMPILE(_FOFF(slab, iPayload) == KSlabPayloadOffset);
       
   488 __ASSERT_COMPILE(sizeof(slabset) == KSlabsetSize);
       
   489 #endif
       
   490 
       
   491 TInt RAllocatorHelper::TryLock()
       
   492 	{
       
   493 #ifdef __KERNEL_MODE__
       
   494 	NKern::ThreadEnterCS();
       
   495 	DMutex* m = *(DMutex**)(iAllocatorAddress + _FOFF(RHackHeap, iLock));
       
   496 	if (m) Kern::MutexWait(*m);
       
   497 	return KErrNone;
       
   498 #else
       
   499 	if (iAllocatorType != EUnknown && iAllocatorType != EAllocator)
       
   500 		{
       
   501 		RFastLock& lock = *reinterpret_cast<RFastLock*>(iAllocatorAddress + _FOFF(RHackHeap, iLock));
       
   502 		lock.Wait();
       
   503 		return KErrNone;
       
   504 		}
       
   505 	return KErrNotSupported;
       
   506 #endif
       
   507 	}
       
   508 
       
   509 void RAllocatorHelper::TryUnlock()
       
   510 	{
       
   511 #ifdef __KERNEL_MODE__
       
   512 	DMutex* m = *(DMutex**)(iAllocatorAddress + _FOFF(RHackHeap, iLock));
       
   513 	if (m) Kern::MutexSignal(*m);
       
   514 	NKern::ThreadLeaveCS();
       
   515 #else
       
   516 	if (iAllocatorType != EUnknown && iAllocatorType != EAllocator)
       
   517 		{
       
   518 		RFastLock& lock = *reinterpret_cast<RFastLock*>(iAllocatorAddress + _FOFF(RHackHeap, iLock));
       
   519 		lock.Signal();
       
   520 		}
       
   521 #endif
       
   522 	}
       
   523 
       
   524 HUEXPORT_C void RAllocatorHelper::Close()
       
   525 	{
       
   526 	KERN_ENTER_CS();
       
   527 	iAllocatorType = EUnknown;
       
   528 	iAllocatorAddress = 0;
       
   529 	delete iInfo;
       
   530 	iInfo = NULL;
       
   531 	iValidInfo = 0;
       
   532 	MEM::Free(iTempSlabBitmap);
       
   533 	iTempSlabBitmap = NULL;
       
   534 	MEM::Free(iPageCache);
       
   535 	iPageCache = NULL;
       
   536 	iPageCacheAddr = 0;
       
   537 	KERN_LEAVE_CS();
       
   538 	}
       
   539 
       
   540 TInt RAllocatorHelper::IdentifyAllocatorType(TBool aAllocatorIsUdeb, TBool aIsTheKernelHeap)
       
   541 	{
       
   542 	iAllocatorType = EUnknown;
       
   543 
       
   544 	TUint32 handlesPtr = 0;
       
   545 	TInt err = ReadWord(iAllocatorAddress + _FOFF(RHackAllocator, iHandles), handlesPtr);
       
   546 
       
   547 	if (err) return err;
       
   548 	if (aIsTheKernelHeap || 
       
   549 	    handlesPtr == iAllocatorAddress + _FOFF(RHackHeap, iChunkHandle) || 
       
   550 	    handlesPtr == iAllocatorAddress + _FOFF(RHackHeap, iLock))
       
   551 		{
       
   552 		// It's an RHeap of some kind - I doubt any other RAllocator subclass will use iHandles in this way
       
   553 		TUint32 base = 0;
       
   554 		err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iBase), base);
       
   555 		if (err) return err;
       
   556 		TInt objsize = (TInt)base - (TInt)iAllocatorAddress;
       
   557 		if (objsize <= 32*4)
       
   558 			{
       
   559 			// Old RHeap
       
   560 			iAllocatorType = aAllocatorIsUdeb ? EUdebOldRHeap : EUrelOldRHeap;
       
   561 			}
       
   562 		else
       
   563 			{
       
   564 			// new hybrid heap - bigger than the old one. Likewise figure out if udeb or urel.
       
   565 			iAllocatorType = aAllocatorIsUdeb ? EUdebHybridHeap : EUrelHybridHeap;
       
   566 			}
       
   567 		}
       
   568 	else
       
   569 		{
       
   570 		iAllocatorType = EAllocator;
       
   571 		}
       
   572 	return KErrNone;
       
   573 	}
       
   574 
       
   575 HUEXPORT_C TInt RAllocatorHelper::SetCellNestingLevel(TAny* aCell, TInt aNestingLevel)
       
   576 	{
       
   577 	TInt err = KErrNone;
       
   578 
       
   579 	switch (iAllocatorType)
       
   580 		{
       
   581 		case EUdebOldRHeap:
       
   582 		case EUdebHybridHeap:
       
   583 			// By this reckoning, they're in the same place amazingly
       
   584 			{
       
   585 			TLinAddr nestingAddr = (TLinAddr)aCell - 8;
       
   586 			err = WriteWord(nestingAddr, aNestingLevel);
       
   587 			break;
       
   588 			}
       
   589 		default:
       
   590 			break;
       
   591 		}
       
   592 	return err;
       
   593 	}
       
   594 
       
   595 HUEXPORT_C TInt RAllocatorHelper::GetCellNestingLevel(TAny* aCell, TInt& aNestingLevel)
       
   596 	{
       
   597 	switch (iAllocatorType)
       
   598 		{
       
   599 		case EUdebOldRHeap:
       
   600 		case EUdebHybridHeap:
       
   601 			// By this reckoning, they're in the same place amazingly
       
   602 			{
       
   603 			TLinAddr nestingAddr = (TLinAddr)aCell - 8;
       
   604 			return ReadWord(nestingAddr, (TUint32&)aNestingLevel);
       
   605 			}
       
   606 		default:
       
   607 			return KErrNotSupported;
       
   608 		}
       
   609 	}
       
   610 
       
   611 TInt RAllocatorHelper::RefreshDetails(TUint aMask)
       
   612 	{
       
   613 	TInt err = FinishConstruction();
       
   614 	if (err) return err;
       
   615 
       
   616 	// Invalidate the page cache
       
   617 	iPageCacheAddr = 0;
       
   618 
       
   619 	TryLock();
       
   620 	err = DoRefreshDetails(aMask);
       
   621 	TryUnlock();
       
   622 	return err;
       
   623 	}
       
   624 
       
   625 const TInt KHeapWalkStatsForOldHeap = (EUnusedPages|ECommittedFreeSpace);
       
   626 const TInt KHeapWalkStatsForNewHeap = (EAllocated|ECount|EUnusedPages|ECommittedFreeSpace|EHybridStats);
       
   627 
       
   628 TInt RAllocatorHelper::DoRefreshDetails(TUint aMask)
       
   629 	{
       
   630 	TInt err = KErrNotSupported;
       
   631 	switch (iAllocatorType)
       
   632 		{
       
   633 		case EUrelOldRHeap:
       
   634 		case EUdebOldRHeap:
       
   635 			{
       
   636 			if (aMask & ECommitted)
       
   637 				{
       
   638 				// The old RHeap::Size() used to use iTop - iBase, which was effectively chunkSize - sizeof(RHeap)
       
   639 				// I think that for CommittedSize we should include the size of the heap object, just as it includes
       
   640 				// the size of heap cell metadata and overhead. Plus it makes sure the committedsize is a multiple of the page size
       
   641 				TUint32 top = 0;
       
   642 				//TUint32 base = 0;
       
   643 				//err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iBase), base);
       
   644 				//if (err) return err;
       
   645 				err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iTop), top);
       
   646 				if (err) return err;
       
   647 
       
   648 				//iInfo->iCommittedSize = top - base;
       
   649 				iInfo->iCommittedSize = top - iAllocatorAddress;
       
   650 				iValidInfo |= ECommitted;
       
   651 				}
       
   652 			if (aMask & EAllocated)
       
   653 				{
       
   654 				TUint32 allocSize = 0;
       
   655 				err = ReadWord(iAllocatorAddress + _FOFF(RHackAllocator, iTotalAllocSize), allocSize);
       
   656 				if (err) return err;
       
   657 				iInfo->iAllocatedSize = allocSize;
       
   658 				iValidInfo |= EAllocated;
       
   659 				}
       
   660 			if (aMask & ECount)
       
   661 				{
       
   662 				TUint32 count = 0;
       
   663 				err = ReadWord(iAllocatorAddress + _FOFF(RHackAllocator, iCellCount), count);
       
   664 				if (err) return err;
       
   665 				iInfo->iAllocationCount = count;
       
   666 				iValidInfo |= ECount;
       
   667 				}
       
   668 			if (aMask & EMaxSize)
       
   669 				{
       
   670 				TUint32 maxlen = 0;
       
   671 				err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iMaxLength), maxlen);
       
   672 				if (err) return err;
       
   673 				iInfo->iMaxCommittedSize = maxlen;
       
   674 				iValidInfo |= EMaxSize;
       
   675 				}
       
   676 			if (aMask & EMinSize)
       
   677 				{
       
   678 				TUint32 minlen = 0;
       
   679 				err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iMaxLength) - 4, minlen); // This isn't a typo! iMinLength is 4 bytes before iMaxLength, on old heap ONLY
       
   680 				if (err) return err;
       
   681 				iInfo->iMinCommittedSize = minlen;
       
   682 				iValidInfo |= EMinSize;
       
   683 				}
       
   684 			if (aMask & KHeapWalkStatsForOldHeap)
       
   685 				{
       
   686 				// Need a heap walk
       
   687 				iInfo->ClearStats();
       
   688 				iValidInfo = 0;
       
   689 				err = DoWalk(&WalkForStats, NULL);
       
   690 				if (err == KErrNone) iValidInfo |= KHeapWalkStatsForOldHeap;
       
   691 				}
       
   692 			return err;
       
   693 			}
       
   694 		case EUrelHybridHeap:
       
   695 		case EUdebHybridHeap:
       
   696 			{
       
   697 			TBool needWalk = EFalse;
       
   698 			if (aMask & ECommitted)
       
   699 				{
       
   700 				// RAllocator::Size uses iChunkSize - sizeof(RHybridHeap);
       
   701 				// We can't do exactly the same, because we can't calculate sizeof(RHybridHeap), only ROUND_UP(sizeof(RHybridHeap), iAlign)
       
   702 				// And if fact we don't bother and just use iChunkSize
       
   703 				TUint32 chunkSize = 0;
       
   704 				err = ReadWord(iAllocatorAddress + KChunkSizeOffset, chunkSize);
       
   705 				if (err) return err;
       
   706 				//TUint32 baseAddr = 0;
       
   707 				//err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iBase), baseAddr);
       
   708 				//if (err) return err;
       
   709 				iInfo->iCommittedSize = chunkSize; // - (baseAddr - iAllocatorAddress);
       
   710 				iValidInfo |= ECommitted;
       
   711 				}
       
   712 			if (aMask & (EAllocated|ECount))
       
   713 				{
       
   714 				if (iAllocatorType == EUdebHybridHeap)
       
   715 					{
       
   716 					// Easy, just get them from the counter
       
   717 					TUint32 totalAlloc = 0;
       
   718 					err = ReadWord(iAllocatorAddress + _FOFF(RHackAllocator, iTotalAllocSize), totalAlloc);
       
   719 					if (err) return err;
       
   720 					iInfo->iAllocatedSize = totalAlloc;
       
   721 					iValidInfo |= EAllocated;
       
   722 
       
   723 					TUint32 cellCount = 0;
       
   724 					err = ReadWord(iAllocatorAddress + _FOFF(RHackAllocator, iCellCount), cellCount);
       
   725 					if (err) return err;
       
   726 					iInfo->iAllocationCount = cellCount;
       
   727 					iValidInfo |= ECount;
       
   728 					}
       
   729 				else
       
   730 					{
       
   731 					// A heap walk is needed
       
   732 					needWalk = ETrue;
       
   733 					}
       
   734 				}
       
   735 			if (aMask & EMaxSize)
       
   736 				{
       
   737 				TUint32 maxlen = 0;
       
   738 				err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iMaxLength), maxlen);
       
   739 				if (err) return err;
       
   740 				iInfo->iMaxCommittedSize = maxlen;
       
   741 				iValidInfo |= EMaxSize;
       
   742 				}
       
   743 			if (aMask & EMinSize)
       
   744 				{
       
   745 				TUint32 minlen = 0;
       
   746 				err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iAlign) + 4*4, minlen); // iMinLength is in different place to old RHeap
       
   747 				if (err) return err;
       
   748 				iInfo->iMinCommittedSize = minlen;
       
   749 				iValidInfo |= EMinSize;
       
   750 				}
       
   751 			if (aMask & (EUnusedPages|ECommittedFreeSpace|EHybridStats))
       
   752 				{
       
   753 				// EAllocated and ECount have already been taken care of above
       
   754 				needWalk = ETrue;
       
   755 				}
       
   756 
       
   757 			if (needWalk)
       
   758 				{
       
   759 				iInfo->ClearStats();
       
   760 				iValidInfo = 0;
       
   761 				err = DoWalk(&WalkForStats, NULL);
       
   762 				if (err == KErrNone) iValidInfo |= KHeapWalkStatsForNewHeap;
       
   763 				}
       
   764 			return err;
       
   765 			}
       
   766 		default:
       
   767 			return KErrNotSupported;
       
   768 		}
       
   769 	}
       
   770 
       
   771 TInt RAllocatorHelper::CheckValid(TUint aMask)
       
   772 	{
       
   773 	if ((iValidInfo & aMask) == aMask)
       
   774 		{
       
   775 		return KErrNone;
       
   776 		}
       
   777 	else
       
   778 		{
       
   779 		return RefreshDetails(aMask);
       
   780 		}
       
   781 	}
       
   782 
       
   783 HUEXPORT_C TInt RAllocatorHelper::CommittedSize()
       
   784 	{
       
   785 	TInt err = CheckValid(ECommitted);
       
   786 	if (err) return err;
       
   787 	return iInfo->iCommittedSize;
       
   788 	}
       
   789 
       
   790 HUEXPORT_C TInt RAllocatorHelper::AllocatedSize()
       
   791 	{
       
   792 	TInt err = CheckValid(EAllocated);
       
   793 	if (err) return err;
       
   794 	return iInfo->iAllocatedSize;
       
   795 	}
       
   796 
       
   797 HUEXPORT_C TInt RAllocatorHelper::AllocationCount()
       
   798 	{
       
   799 	TInt err = CheckValid(ECount);
       
   800 	if (err) return err;
       
   801 	return iInfo->iAllocationCount;
       
   802 	}
       
   803 
       
   804 HUEXPORT_C TInt RAllocatorHelper::RefreshDetails()
       
   805 	{
       
   806 	return RefreshDetails(iValidInfo);
       
   807 	}
       
   808 
       
   809 HUEXPORT_C TInt RAllocatorHelper::MaxCommittedSize()
       
   810 	{
       
   811 	TInt err = CheckValid(EMaxSize);
       
   812 	if (err) return err;
       
   813 	return iInfo->iMaxCommittedSize;
       
   814 	}
       
   815 
       
   816 HUEXPORT_C TInt RAllocatorHelper::MinCommittedSize()
       
   817 	{
       
   818 	TInt err = CheckValid(EMinSize);
       
   819 	if (err) return err;
       
   820 	return iInfo->iMinCommittedSize;
       
   821 	}
       
   822 
       
   823 HUEXPORT_C TInt RAllocatorHelper::AllocCountForCell(TAny* aCell) const
       
   824 	{
       
   825 	TUint32 allocCount = 0;
       
   826 	switch (iAllocatorType)
       
   827 		{
       
   828 		case EUdebOldRHeap:
       
   829 		case EUdebHybridHeap: // Both are in the same place, amazingly
       
   830 			{
       
   831 			TLinAddr allocCountAddr = (TLinAddr)aCell - 4;
       
   832 			TInt err = ReadWord(allocCountAddr, allocCount);
       
   833 			if (err) return err;
       
   834 			return (TInt)allocCount;
       
   835 			}
       
   836 		default:
       
   837 			return KErrNotSupported;
       
   838 		}
       
   839 	}
       
   840 
       
   841 struct SContext3
       
   842 	{
       
   843 	RAllocatorHelper::TWalkFunc3 iOrigWalkFn;
       
   844 	TAny* iOrigContext;
       
   845 	};
       
   846 
       
   847 TBool RAllocatorHelper::DispatchClientWalkCallback(RAllocatorHelper& aHelper, TAny* aContext, RAllocatorHelper::TExtendedCellType aCellType, TLinAddr aCellPtr, TInt aCellLength)
       
   848 	{
       
   849 	WalkForStats(aHelper, NULL, aCellType, aCellPtr, aCellLength);
       
   850 	SContext3* context = static_cast<SContext3*>(aContext);
       
   851 	return (*context->iOrigWalkFn)(aHelper, context->iOrigContext, aCellType, aCellPtr, aCellLength);
       
   852 	}
       
   853 
       
   854 HUEXPORT_C TInt RAllocatorHelper::Walk(TWalkFunc3 aCallbackFn, TAny* aContext)
       
   855 	{
       
   856 	// Might as well take the opportunity of updating our stats at the same time as walking the heap for the client
       
   857 	SContext3 context = { aCallbackFn, aContext };
       
   858 
       
   859 	TInt err = FinishConstruction(); // In case this hasn't been done yet
       
   860 	if (err) return err;
       
   861 
       
   862 	TryLock();
       
   863 	err = DoWalk(&DispatchClientWalkCallback, &context);
       
   864 	TryUnlock();
       
   865 	return err;
       
   866 	}
       
   867 
       
   868 TInt RAllocatorHelper::DoWalk(TWalkFunc3 aCallbackFn, TAny* aContext)
       
   869 	{
       
   870 	TInt err = KErrNotSupported;
       
   871 	switch (iAllocatorType)
       
   872 		{
       
   873 		case EUdebOldRHeap:
       
   874 		case EUrelOldRHeap:
       
   875 			err = OldSkoolWalk(aCallbackFn, aContext);
       
   876 			break;
       
   877 		case EUrelHybridHeap:
       
   878 		case EUdebHybridHeap:
       
   879 			err = NewHotnessWalk(aCallbackFn, aContext);
       
   880 			break;
       
   881 		default:
       
   882 			err = KErrNotSupported;
       
   883 			break;
       
   884 		}
       
   885 	return err;
       
   886 	}
       
   887 
       
   888 struct SContext
       
   889 	{
       
   890 	RAllocatorHelper::TWalkFunc iOrigWalkFn;
       
   891 	TAny* iOrigContext;
       
   892 	};
       
   893 
       
   894 struct SContext2
       
   895 	{
       
   896 	RAllocatorHelper::TWalkFunc2 iOrigWalkFn;
       
   897 	TAny* iOrigContext;
       
   898 	};
       
   899 
       
   900 #define New2Old(aNew) (((aNew)&RAllocatorHelper::EAllocationMask) ? RAllocatorHelper::EAllocation : ((aNew)&RAllocatorHelper::EFreeMask) ? RAllocatorHelper::EFreeSpace : RAllocatorHelper::EBadness)
       
   901 
       
   902 TBool DispatchOldTWalkFuncCallback(RAllocatorHelper& /*aHelper*/, TAny* aContext, RAllocatorHelper::TExtendedCellType aCellType, TLinAddr aCellPtr, TInt aCellLength)
       
   903 	{
       
   904 	SContext* context = static_cast<SContext*>(aContext);
       
   905 	return (*context->iOrigWalkFn)(context->iOrigContext, New2Old(aCellType), aCellPtr, aCellLength);
       
   906 	}
       
   907 
       
   908 TBool DispatchOldTWalk2FuncCallback(RAllocatorHelper& aHelper, TAny* aContext, RAllocatorHelper::TExtendedCellType aCellType, TLinAddr aCellPtr, TInt aCellLength)
       
   909 	{
       
   910 	SContext2* context = static_cast<SContext2*>(aContext);
       
   911 	return (*context->iOrigWalkFn)(aHelper, context->iOrigContext, New2Old(aCellType), aCellPtr, aCellLength);
       
   912 	}
       
   913 
       
   914 HUEXPORT_C TInt RAllocatorHelper::Walk(TWalkFunc aCallbackFn, TAny* aContext)
       
   915 	{
       
   916 	// For backwards compatability insert a compatability callback to map between the different types of callback that clients requested
       
   917 	SContext context = { aCallbackFn, aContext };
       
   918 	return Walk(&DispatchOldTWalkFuncCallback, &context);
       
   919 	}
       
   920 
       
   921 HUEXPORT_C TInt RAllocatorHelper::Walk(TWalkFunc2 aCallbackFn, TAny* aContext)
       
   922 	{
       
   923 	SContext2 context = { aCallbackFn, aContext };
       
   924 	return Walk(&DispatchOldTWalk2FuncCallback, &context);
       
   925 	}
       
   926 
       
   927 
       
   928 TInt RAllocatorHelper::OldSkoolWalk(TWalkFunc3 aCallbackFn, TAny* aContext)
       
   929 	{
       
   930 	TLinAddr pC = 0;
       
   931 	TInt err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iBase), pC); // pC = iBase; // allocated cells
       
   932 	if (err) return err;
       
   933 	TLinAddr pF = iAllocatorAddress + _FOFF(RHackHeap, iAlign) + 3*4; // pF = &iFree; // free cells
       
   934 
       
   935 	TLinAddr top = 0;
       
   936 	err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iTop), top);
       
   937 	if (err) return err;
       
   938 	const TInt KAllocatedCellHeaderSize = iAllocatorType == EUdebOldRHeap ? 12 : 4;
       
   939 	TInt minCell = 0;
       
   940 	err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iAlign) + 4, (TUint32&)minCell);
       
   941 	if (err) return err;
       
   942 	TInt align = 0;
       
   943 	err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iAlign), (TUint32&)align);
       
   944 	if (err) return err;
       
   945 
       
   946 	FOREVER
       
   947 		{
       
   948 		err = ReadWord(pF+4, pF); // pF = pF->next; // next free cell
       
   949 		if (err) return err;
       
   950 		TLinAddr pFnext = 0;
       
   951 		if (pF) err = ReadWord(pF + 4, pFnext);
       
   952 		if (err) return err;
       
   953 
       
   954 		if (!pF)
       
   955 			{
       
   956 			pF = top; // to make size checking work
       
   957 			}
       
   958 		else if (pF>=top || (pFnext && pFnext<=pF) )
       
   959 			{
       
   960 			// free cell pointer off the end or going backwards
       
   961 			//Unlock();
       
   962 			(*aCallbackFn)(*this, aContext, EHeapBadFreeCellAddress, pF, 0);
       
   963 			return KErrCorrupt;
       
   964 			}
       
   965 		else
       
   966 			{
       
   967 			TInt l; // = pF->len
       
   968 			err = ReadWord(pF, (TUint32&)l);
       
   969 			if (err) return err;
       
   970 			if (l<minCell || (l & (align-1)))
       
   971 				{
       
   972 				// free cell length invalid
       
   973 				//Unlock();
       
   974 				(*aCallbackFn)(*this, aContext, EHeapBadFreeCellSize, pF, l);
       
   975 				return KErrCorrupt;
       
   976 				}
       
   977 			}
       
   978 		
       
   979 		while (pC!=pF)				// walk allocated cells up to next free cell
       
   980 			{
       
   981 			TInt l; // pC->len;
       
   982 			err = ReadWord(pC, (TUint32&)l);
       
   983 			if (err) return err;
       
   984 			if (l<minCell || (l & (align-1)))
       
   985 				{
       
   986 				// allocated cell length invalid
       
   987 				//Unlock();
       
   988 				(*aCallbackFn)(*this, aContext, EHeapBadAllocatedCellSize, pC, l);
       
   989 				return KErrCorrupt;
       
   990 				}
       
   991 			TBool shouldContinue = (*aCallbackFn)(*this, aContext, EHeapAllocation, pC + KAllocatedCellHeaderSize, l - KAllocatedCellHeaderSize);
       
   992 			if (!shouldContinue) return KErrNone;
       
   993 			
       
   994 			//SCell* pN = __NEXT_CELL(pC);
       
   995 			TLinAddr pN = pC + l;
       
   996 			if (pN > pF)
       
   997 				{
       
   998 				// cell overlaps next free cell
       
   999 				//Unlock();
       
  1000 				(*aCallbackFn)(*this, aContext, EHeapBadAllocatedCellAddress, pC, l);
       
  1001 				return KErrCorrupt;
       
  1002 				}
       
  1003 			pC = pN;
       
  1004 			}
       
  1005 		if (pF == top)
       
  1006 			break;		// reached end of heap
       
  1007 		TInt pFlen = 0;
       
  1008 		err = ReadWord(pF, (TUint32&)pFlen);
       
  1009 		if (err) return err;
       
  1010 		pC = pF + pFlen; // pC = __NEXT_CELL(pF);	// step to next allocated cell
       
  1011 		TBool shouldContinue = (*aCallbackFn)(*this, aContext, EHeapFreeCell, pF, pFlen);
       
  1012 		if (!shouldContinue) return KErrNone;
       
  1013 		}
       
  1014 	return KErrNone;
       
  1015 	}
       
  1016 
       
  1017 HUEXPORT_C TInt RAllocatorHelper::CountUnusedPages()
       
  1018 	{
       
  1019 	TInt err = CheckValid(EUnusedPages);
       
  1020 	if (err) return err;
       
  1021 	return iInfo->iUnusedPages;
       
  1022 	}
       
  1023 
       
  1024 HUEXPORT_C TInt RAllocatorHelper::CommittedFreeSpace()
       
  1025 	{
       
  1026 	TInt err = CheckValid(ECommittedFreeSpace);
       
  1027 	if (err) return err;
       
  1028 	return iInfo->iCommittedFreeSpace;
       
  1029 	}
       
  1030 
       
  1031 #define ROUND_DOWN(val, pow2) ((val) & ~((pow2)-1))
       
  1032 #define ROUND_UP(val, pow2) ROUND_DOWN((val) + (pow2) - 1, (pow2))
       
  1033 
       
  1034 HUEXPORT_C TLinAddr RAllocatorHelper::AllocatorAddress() const
       
  1035 	{
       
  1036 	return iAllocatorAddress;
       
  1037 	}
       
  1038 
       
  1039 TBool RAllocatorHelper::WalkForStats(RAllocatorHelper& aSelf, TAny* /*aContext*/, TExtendedCellType aType, TLinAddr aCellPtr, TInt aCellLength)
       
  1040 	{
       
  1041 	//ASSERT(aCellLength >= 0);
       
  1042 	THeapInfo& info = *aSelf.iInfo;
       
  1043 
       
  1044 	TInt pagesSpanned = 0; // The number of pages that fit entirely inside the payload of this cell
       
  1045 	if ((TUint)aCellLength > KPageSize)
       
  1046 		{
       
  1047 		TLinAddr nextPageAlignedAddr = ROUND_UP(aCellPtr, KPageSize);
       
  1048 		pagesSpanned = ROUND_DOWN(aCellPtr + aCellLength - nextPageAlignedAddr, KPageSize) / KPageSize;
       
  1049 		}
       
  1050 
       
  1051 	if (aSelf.iAllocatorType == EUrelOldRHeap || aSelf.iAllocatorType == EUdebOldRHeap)
       
  1052 		{
       
  1053 		if (aType & EFreeMask)
       
  1054 			{
       
  1055 			info.iUnusedPages += pagesSpanned;
       
  1056 			info.iCommittedFreeSpace += aCellLength;
       
  1057 			info.iHeapFreeCellCount++;
       
  1058 			}
       
  1059 		}
       
  1060 	else
       
  1061 		{
       
  1062 		if (aType & EAllocationMask)
       
  1063 			{
       
  1064 			info.iAllocatedSize += aCellLength;
       
  1065 			info.iAllocationCount++;
       
  1066 			}
       
  1067 		else if (aType & EFreeMask)
       
  1068 			{
       
  1069 			// I *think* that DLA will decommit pages from inside free cells...
       
  1070 			TInt committedLen = aCellLength - (pagesSpanned * KPageSize);
       
  1071 			info.iCommittedFreeSpace += committedLen;
       
  1072 			}
       
  1073 
       
  1074 		switch (aType)
       
  1075 			{
       
  1076 			case EDlaAllocation:
       
  1077 				info.iDlaAllocsSize += aCellLength;
       
  1078 				info.iDlaAllocsCount++;
       
  1079 				break;
       
  1080 			case EPageAllocation:
       
  1081 				info.iPageAllocsSize += aCellLength;
       
  1082 				info.iPageAllocsCount++;
       
  1083 				break;
       
  1084 			case ESlabAllocation:
       
  1085 				info.iSlabAllocsSize += aCellLength;
       
  1086 				info.iSlabAllocsCount++;
       
  1087 				break;
       
  1088 			case EDlaFreeCell:
       
  1089 				info.iDlaFreeSize += aCellLength;
       
  1090 				info.iDlaFreeCount++;
       
  1091 				break;
       
  1092 			case ESlabFreeCell:
       
  1093 				info.iSlabFreeCellSize += aCellLength;
       
  1094 				info.iSlabFreeCellCount++;
       
  1095 				break;
       
  1096 			case ESlabFreeSlab:
       
  1097 				info.iSlabFreeSlabSize += aCellLength;
       
  1098 				info.iSlabFreeSlabCount++;
       
  1099 				break;
       
  1100 			default:
       
  1101 				break;
       
  1102 			}
       
  1103 		}
       
  1104 
       
  1105 	return ETrue;
       
  1106 	}
       
  1107 
       
  1108 #define PAGESHIFT 12
       
  1109 
       
  1110 TUint RAllocatorHelper::PageMapOperatorBrackets(unsigned ix, TInt& err) const
       
  1111 	{
       
  1112 	//return 1U&(iBase[ix>>3] >> (ix&7));
       
  1113 	TUint32 basePtr = 0;
       
  1114 	err = ReadWord(iAllocatorAddress + KPageMapOffset, basePtr);
       
  1115 	if (err) return 0;
       
  1116 
       
  1117 	TUint8 res = 0;
       
  1118 	err = ReadByte(basePtr + (ix >> 3), res);
       
  1119 	if (err) return 0;
       
  1120 
       
  1121 	return 1U&(res >> (ix&7));
       
  1122 	}
       
  1123 
       
  1124 
       
  1125 TInt RAllocatorHelper::PageMapFind(TUint start, TUint bit, TInt& err)
       
  1126 	{
       
  1127 	TUint32 iNbits = 0;
       
  1128 	err = ReadWord(iAllocatorAddress + KPageMapOffset + 4, iNbits);
       
  1129 	if (err) return 0;
       
  1130 
       
  1131 	if (start<iNbits) do
       
  1132 		{
       
  1133 		//if ((*this)[start]==bit)
       
  1134 		if (PageMapOperatorBrackets(start, err) == bit || err)
       
  1135 			return start;
       
  1136 		} while (++start<iNbits);
       
  1137 	return -1;
       
  1138 	}
       
  1139 
       
  1140 TUint RAllocatorHelper::PagedDecode(TUint pos, TInt& err)
       
  1141 	{
       
  1142 	unsigned bits = PageMapBits(pos,2,err);
       
  1143 	if (err) return 0;
       
  1144 	bits >>= 1;
       
  1145 	if (bits == 0)
       
  1146 		return 1;
       
  1147 	bits = PageMapBits(pos+2,2,err);
       
  1148 	if (err) return 0;
       
  1149 	if ((bits & 1) == 0)
       
  1150 		return 2 + (bits>>1);
       
  1151 	else if ((bits>>1) == 0)
       
  1152 		{
       
  1153 		return PageMapBits(pos+4, 4,err);
       
  1154 		}
       
  1155 	else
       
  1156 		{
       
  1157 		return PageMapBits(pos+4, 18,err);
       
  1158 		}
       
  1159 	}
       
  1160 
       
  1161 TUint RAllocatorHelper::PageMapBits(unsigned ix, unsigned len, TInt& err)
       
  1162 	{
       
  1163 	int l=len;
       
  1164 	unsigned val=0;
       
  1165 	unsigned bit=0;
       
  1166 	while (--l>=0)
       
  1167 		{
       
  1168 		//val |= (*this)[ix++]<<bit++;
       
  1169 		val |= PageMapOperatorBrackets(ix++, err) << bit++;
       
  1170 		if (err) return 0;
       
  1171 		}
       
  1172 	return val;
       
  1173 	}
       
  1174 
       
  1175 enum TSlabType { ESlabFullInfo, ESlabPartialInfo, ESlabEmptyInfo };
       
  1176 
       
  1177 #ifndef TEST_HYBRIDHEAP_ASSERTS
       
  1178 #define MAXSLABSIZE		56
       
  1179 #define	SLABSHIFT		10
       
  1180 #define	SLABSIZE		(1 << SLABSHIFT)
       
  1181 const TInt KMaxSlabPayload = SLABSIZE - KSlabPayloadOffset;
       
  1182 #endif
       
  1183 
       
  1184 TInt RAllocatorHelper::NewHotnessWalk(TWalkFunc3 aCallbackFn, TAny* aContext)
       
  1185 	{
       
  1186 	// RHybridHeap does paged, slab then DLA, so that's what we do too
       
  1187 	// Remember Kernel RHybridHeaps don't even have the page and slab members
       
  1188 
       
  1189 	TUint32 basePtr;
       
  1190 	TInt err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iBase), basePtr);
       
  1191 	if (err) return err;
       
  1192 	if (basePtr < iAllocatorAddress + KUserHybridHeapSize)
       
  1193 		{
       
  1194 		// Must be a kernel one - don't do page and slab
       
  1195 		}
       
  1196 	else
       
  1197 		{
       
  1198 		// Paged
       
  1199 		TUint32 membase = 0;
       
  1200 		err = ReadWord(iAllocatorAddress + KPageMapOffset + 8, membase);
       
  1201 		if (err) return err;
       
  1202 
       
  1203 		TBool shouldContinue = ETrue;
       
  1204 		for (int ix = 0;(ix = PageMapFind(ix,1,err)) >= 0 && err == KErrNone;)
       
  1205 			{
       
  1206 			int npage = PagedDecode(ix, err);
       
  1207 			if (err) return err;
       
  1208 			// Introduce paged buffer to the walk function 
       
  1209 			TLinAddr bfr = membase + (1 << (PAGESHIFT-1))*ix;
       
  1210 			int len = npage << PAGESHIFT;
       
  1211 			if ( (TUint)len > KPageSize )
       
  1212 				{ // If buffer is not larger than one page it must be a slab page mapped into bitmap
       
  1213 				if (iAllocatorType == EUdebHybridHeap)
       
  1214 					{
       
  1215 					bfr += 8;
       
  1216 					len -= 8;
       
  1217 					}
       
  1218 				shouldContinue = (*aCallbackFn)(*this, aContext, EPageAllocation, bfr, len);
       
  1219 				if (!shouldContinue) return KErrNone;
       
  1220 				}
       
  1221 			ix += (npage<<1);
       
  1222 			}
       
  1223 		if (err) return err;
       
  1224 
       
  1225 		// Slab
       
  1226 		TUint32 sparePage = 0;
       
  1227 		err = ReadWord(iAllocatorAddress + KSparePageOffset, sparePage);
       
  1228 		if (err) return err;
       
  1229 		if (sparePage)
       
  1230 			{
       
  1231 			//Walk(wi, iSparePage, iPageSize, EGoodFreeCell, ESlabSpare); // Introduce Slab spare page to the walk function 
       
  1232 			// This counts as 4 spare slabs
       
  1233 			for (TInt i = 0; i < 4; i++)
       
  1234 				{
       
  1235 				shouldContinue = (*aCallbackFn)(*this, aContext, ESlabFreeSlab, sparePage + SLABSIZE*i, SLABSIZE);
       
  1236 				if (!shouldContinue) return KErrNone;
       
  1237 				}
       
  1238 			}
       
  1239 
       
  1240 		//TreeWalk(&iFullSlab, &SlabFullInfo, i, wi);
       
  1241 		TInt err = TreeWalk(iAllocatorAddress + KFullSlabOffset, ESlabFullInfo, aCallbackFn, aContext, shouldContinue);
       
  1242 		if (err || !shouldContinue) return err;
       
  1243 		for (int ix = 0; ix < (MAXSLABSIZE>>2); ++ix)
       
  1244 			{
       
  1245 			TUint32 partialAddr = iAllocatorAddress + KSlabAllocOffset + ix*KSlabsetSize;
       
  1246 			//TreeWalk(&iSlabAlloc[ix].iPartial, &SlabPartialInfo, i, wi);
       
  1247 			err = TreeWalk(partialAddr, ESlabPartialInfo, aCallbackFn, aContext, shouldContinue);
       
  1248 			if (err || !shouldContinue) return err;
       
  1249 			}
       
  1250 		//TreeWalk(&iPartialPage, &SlabEmptyInfo, i, wi);
       
  1251 		TreeWalk(iAllocatorAddress + KPartialPageOffset, ESlabEmptyInfo, aCallbackFn, aContext, shouldContinue);
       
  1252 		}
       
  1253 
       
  1254 	// DLA
       
  1255 #define CHUNK_OVERHEAD (sizeof(TUint))
       
  1256 #define CHUNK_ALIGN_MASK (7) 
       
  1257 #define CHUNK2MEM(p)        ((TLinAddr)(p) + 8)
       
  1258 #define MEM2CHUNK(mem)      ((TLinAddr)(p) - 8)
       
  1259 /* chunk associated with aligned address A */
       
  1260 #define ALIGN_OFFSET(A)\
       
  1261 	((((TLinAddr)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
       
  1262 	((8 - ((TLinAddr)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
       
  1263 #define ALIGN_AS_CHUNK(A)   ((A) + ALIGN_OFFSET(CHUNK2MEM(A)))
       
  1264 #define CINUSE_BIT 2
       
  1265 #define INUSE_BITS 3
       
  1266 
       
  1267 	TUint32 topSize = 0;
       
  1268 	err = ReadWord(iAllocatorAddress + KMallocStateOffset + KMallocStateTopSizeOffset, topSize);
       
  1269 	if (err) return err;
       
  1270 
       
  1271 	TUint32 top = 0;
       
  1272 	err = ReadWord(iAllocatorAddress + KMallocStateOffset + KMallocStateTopOffset, top);
       
  1273 	if (err) return err;
       
  1274 
       
  1275 	TInt max = ((topSize-1) & ~CHUNK_ALIGN_MASK) - CHUNK_OVERHEAD;
       
  1276 	if ( max < 0 )
       
  1277 		max = 0;
       
  1278 	
       
  1279 	TBool shouldContinue = (*aCallbackFn)(*this, aContext, EDlaFreeCell, top, max);
       
  1280 	if (!shouldContinue) return KErrNone;
       
  1281 	
       
  1282 	TUint32 mallocStateSegBase = 0;
       
  1283 	err = ReadWord(iAllocatorAddress + KMallocStateOffset + KMallocStateSegOffset, mallocStateSegBase);
       
  1284 	if (err) return err;
       
  1285 
       
  1286 	for (TLinAddr q = ALIGN_AS_CHUNK(mallocStateSegBase); q != top; /*q = NEXT_CHUNK(q)*/)
       
  1287 		{
       
  1288 		TUint32 qhead = 0;
       
  1289 		err = ReadWord(q + 4, qhead);
       
  1290 		if (err) return err;
       
  1291 		//TInt sz = CHUNKSIZE(q);
       
  1292 		TInt sz = qhead & ~(INUSE_BITS);
       
  1293 		if (!(qhead & CINUSE_BIT))
       
  1294 			{
       
  1295 			//Walk(wi, CHUNK2MEM(q), sz, EGoodFreeCell, EDougLeaAllocator); // Introduce DL free buffer to the walk function 
       
  1296 			shouldContinue = (*aCallbackFn)(*this, aContext, EDlaFreeCell, CHUNK2MEM(q), sz);
       
  1297 			if (!shouldContinue) return KErrNone;
       
  1298 			}
       
  1299 		else
       
  1300 			{
       
  1301 			//Walk(wi, CHUNK2MEM(q), (sz- CHUNK_OVERHEAD), EGoodAllocatedCell, EDougLeaAllocator); // Introduce DL allocated buffer to the walk function 
       
  1302 			TLinAddr addr = CHUNK2MEM(q);
       
  1303 			TInt size = sz - CHUNK_OVERHEAD;
       
  1304 			if (iAllocatorType == EUdebHybridHeap)
       
  1305 				{
       
  1306 				size -= 8;
       
  1307 				addr += 8;
       
  1308 				}
       
  1309 			shouldContinue = (*aCallbackFn)(*this, aContext, EDlaAllocation, addr, size);
       
  1310 			if (!shouldContinue) return KErrNone;
       
  1311 			}
       
  1312 		// This is q = NEXT_CHUNK(q) expanded
       
  1313 		q = q + sz;
       
  1314 		}
       
  1315 	return KErrNone;
       
  1316 	}
       
  1317 
       
  1318 TInt RAllocatorHelper::TreeWalk(TUint32 aSlabRoot, TInt aSlabType, TWalkFunc3 aCallbackFn, TAny* aContext, TBool& shouldContinue)
       
  1319 	{
       
  1320 	const TSlabType type = (TSlabType)aSlabType;
       
  1321 
       
  1322 	TUint32 s = 0;
       
  1323 	TInt err = ReadWord(aSlabRoot, s);
       
  1324 	if (err) return err;
       
  1325 	//slab* s = *root;
       
  1326 	if (!s)
       
  1327 		return KErrNone;
       
  1328 	
       
  1329 	for (;;)
       
  1330 		{
       
  1331 		//slab* c;
       
  1332 		//while ((c = s->iChild1) != 0)
       
  1333 		//	s = c;		// walk down left side to end
       
  1334 		TUint32 c;
       
  1335 		for(;;)
       
  1336 			{
       
  1337 			err = ReadWord(s + KSlabChild1Offset, c);
       
  1338 			if (err) return err;
       
  1339 			if (c == 0) break;
       
  1340 			else s = c;
       
  1341 			}
       
  1342 		for (;;)
       
  1343 			{
       
  1344 			//TODOf(s, i, wi);
       
  1345 			//TODO __HEAP_CORRUPTED_TEST_STATIC
       
  1346 			TUint32 h;
       
  1347 			err = ReadWord(s, h); // = aSlab->iHeader;
       
  1348 			if (err) return err;
       
  1349 			TUint32 size = (h&0x0003f000)>>12; //SlabHeaderSize(h);
       
  1350 			TUint debugheadersize = 0;
       
  1351 			if (iAllocatorType == EUdebHybridHeap) debugheadersize = 8;
       
  1352 			TUint32 usedCount = (((h&0x0ffc0000)>>18) + 4) / size; // (SlabHeaderUsedm4(h) + 4) / size;
       
  1353 			switch (type)
       
  1354 				{
       
  1355 				case ESlabFullInfo:
       
  1356 					{
       
  1357 					TUint32 count = usedCount;
       
  1358 					TUint32 i = 0;
       
  1359 					while ( i < count )
       
  1360 						{
       
  1361 						TUint32 addr = s + KSlabPayloadOffset + i*size; //&aSlab->iPayload[i*size];
       
  1362 						shouldContinue = (*aCallbackFn)(*this, aContext, ESlabAllocation, addr + debugheadersize, size - debugheadersize);
       
  1363 						if (!shouldContinue) return KErrNone;
       
  1364 						i++;
       
  1365 						}
       
  1366 					break;
       
  1367 					}
       
  1368 				case ESlabPartialInfo:
       
  1369 					{
       
  1370 					//TODO __HEAP_CORRUPTED_TEST_STATIC
       
  1371 					TUint32 count = KMaxSlabPayload / size;
       
  1372 					TUint32 freeOffset = (h & 0xff) << 2;
       
  1373 					if (freeOffset == 0)
       
  1374 						{
       
  1375 						// TODO Shouldn't happen for a slab on the partial list
       
  1376 						}
       
  1377 					memset(iTempSlabBitmap, 1, KTempBitmapSize); // Everything defaults to in use
       
  1378 					TUint wildernessCount = count - usedCount;
       
  1379 					while (freeOffset)
       
  1380 						{
       
  1381 						wildernessCount--;
       
  1382 						TInt idx = (freeOffset-KSlabPayloadOffset)/size;
       
  1383 						LOG("iTempSlabBitmap freeOffset %d index %d", freeOffset, idx);
       
  1384 						iTempSlabBitmap[idx] = 0; // Mark it as free
       
  1385 
       
  1386 						TUint32 addr = s + freeOffset;
       
  1387 						TUint8 nextCell = 0;
       
  1388 						err = ReadByte(addr, nextCell);
       
  1389 						if (err) return err;
       
  1390 						freeOffset = ((TUint32)nextCell) << 2;
       
  1391 						}
       
  1392 					memset(iTempSlabBitmap + count - wildernessCount, 0, wildernessCount); // Mark the wilderness as free
       
  1393 					for (TInt i = 0; i < count; i++)
       
  1394 						{
       
  1395 						TLinAddr addr = s + KSlabPayloadOffset + i*size;
       
  1396 						if (iTempSlabBitmap[i])
       
  1397 							{
       
  1398 							// In use
       
  1399 							shouldContinue = (*aCallbackFn)(*this, aContext, ESlabAllocation, addr + debugheadersize, size - debugheadersize);
       
  1400 							}
       
  1401 						else
       
  1402 							{
       
  1403 							// Free
       
  1404 							shouldContinue = (*aCallbackFn)(*this, aContext, ESlabFreeCell, addr, size);
       
  1405 							}
       
  1406 						if (!shouldContinue) return KErrNone;
       
  1407 						}
       
  1408 					break;
       
  1409 					}
       
  1410 				case ESlabEmptyInfo:
       
  1411 					{
       
  1412 					// Check which slabs of this page are empty
       
  1413 					TUint32 pageAddr = ROUND_DOWN(s, KPageSize);
       
  1414 					TUint32 headerForPage = 0;
       
  1415 					err = ReadWord(pageAddr, headerForPage);
       
  1416 					if (err) return err;
       
  1417 					TUint32 slabHeaderPageMap = (headerForPage & 0x00000f00)>>8; // SlabHeaderPagemap(unsigned h)
       
  1418 					for (TInt slabIdx = 0; slabIdx < 4; slabIdx++)
       
  1419 						{
       
  1420 						if (slabHeaderPageMap & (1<<slabIdx))
       
  1421 							{
       
  1422 							TUint32 addr = pageAddr + SLABSIZE*slabIdx + KSlabPayloadOffset; //&aSlab->iPayload[i*size];
       
  1423 							shouldContinue = (*aCallbackFn)(*this, aContext, ESlabFreeSlab, addr, KMaxSlabPayload);
       
  1424 							if (!shouldContinue) return KErrNone;
       
  1425 							}
       
  1426 						}
       
  1427 					break;
       
  1428 					}
       
  1429 				}
       
  1430 
       
  1431 			//c = s->iChild2;
       
  1432 			err = ReadWord(s + KSlabChild2Offset, c);
       
  1433 			if (err) return err;
       
  1434 
       
  1435 			if (c)
       
  1436 				{	// one step down right side, now try and walk down left
       
  1437 				s = c;
       
  1438 				break;
       
  1439 				}
       
  1440 			for (;;)
       
  1441 				{	// loop to walk up right side
       
  1442 				TUint32 pp = 0;
       
  1443 				err = ReadWord(s + KSlabParentOffset, pp);
       
  1444 				if (err) return err;
       
  1445 				//slab** pp = s->iParent;
       
  1446 				if (pp == aSlabRoot)
       
  1447 					return KErrNone;
       
  1448 #define SlabFor(x) ROUND_DOWN(x, SLABSIZE)
       
  1449 				s = SlabFor(pp);
       
  1450 				//if (pp == &s->iChild1)
       
  1451 				if (pp == s + KSlabChild1Offset)
       
  1452 					break;
       
  1453 				}
       
  1454 			}
       
  1455 		}
       
  1456 	}
       
  1457 
       
  1458 // Really should be called TotalSizeForCellType(...)
       
  1459 HUEXPORT_C TInt RAllocatorHelper::SizeForCellType(TExtendedCellType aType)
       
  1460 	{
       
  1461 	if (aType & EBadnessMask) return KErrArgument;
       
  1462 	if (aType == EAllocationMask) return AllocatedSize();
       
  1463 
       
  1464 	if (iAllocatorType == EUdebOldRHeap || iAllocatorType == EUrelOldRHeap)
       
  1465 		{
       
  1466 		switch (aType)
       
  1467 			{
       
  1468 			case EHeapAllocation:
       
  1469 				return AllocatedSize();
       
  1470 			case EHeapFreeCell:
       
  1471 			case EFreeMask:
       
  1472 				return CommittedFreeSpace();
       
  1473 			default:
       
  1474 				return KErrNotSupported;
       
  1475 			}
       
  1476 		}
       
  1477 	else if (iAllocatorType == EUrelHybridHeap || iAllocatorType == EUdebHybridHeap)
       
  1478 		{
       
  1479 		TInt err = CheckValid(EHybridStats);
       
  1480 		if (err) return err;
       
  1481 
       
  1482 		switch (aType)
       
  1483 			{
       
  1484 			case EHeapAllocation:
       
  1485 			case EHeapFreeCell:
       
  1486 				return KErrNotSupported;
       
  1487 			case EDlaAllocation:
       
  1488 				return iInfo->iDlaAllocsSize;
       
  1489 			case EPageAllocation:
       
  1490 				return iInfo->iPageAllocsSize;
       
  1491 			case ESlabAllocation:
       
  1492 				return iInfo->iSlabAllocsSize;
       
  1493 			case EDlaFreeCell:
       
  1494 				return iInfo->iDlaFreeSize;
       
  1495 			case ESlabFreeCell:
       
  1496 				return iInfo->iSlabFreeCellSize;
       
  1497 			case ESlabFreeSlab:
       
  1498 				return iInfo->iSlabFreeSlabSize;
       
  1499 			case EFreeMask:
       
  1500 				// Note this isn't the same as asking for CommittedFreeSpace(). SizeForCellType(EFreeMask) may include decommitted pages that lie inside a free cell
       
  1501 				return iInfo->iDlaFreeSize + iInfo->iSlabFreeCellSize + iInfo->iSlabFreeSlabSize;
       
  1502 			default:
       
  1503 				return KErrNotSupported;
       
  1504 			}
       
  1505 		}
       
  1506 	else
       
  1507 		{
       
  1508 		return KErrNotSupported;
       
  1509 		}
       
  1510 	}
       
  1511 
       
  1512 HUEXPORT_C TInt RAllocatorHelper::CountForCellType(TExtendedCellType aType)
       
  1513 	{
       
  1514 	if (aType & EBadnessMask) return KErrArgument;
       
  1515 	if (aType == EAllocationMask) return AllocationCount();
       
  1516 
       
  1517 	if (iAllocatorType == EUdebOldRHeap || iAllocatorType == EUrelOldRHeap)
       
  1518 		{
       
  1519 		switch (aType)
       
  1520 			{
       
  1521 			case EHeapAllocation:
       
  1522 				return AllocationCount();
       
  1523 			case EHeapFreeCell:
       
  1524 			case EFreeMask:
       
  1525 				{
       
  1526 				TInt err = CheckValid(ECommittedFreeSpace);
       
  1527 				if (err) return err;
       
  1528 				return iInfo->iHeapFreeCellCount;
       
  1529 				}
       
  1530 			default:
       
  1531 				return KErrNotSupported;
       
  1532 			}
       
  1533 		}
       
  1534 	else if (iAllocatorType == EUrelHybridHeap || iAllocatorType == EUdebHybridHeap)
       
  1535 		{
       
  1536 		TInt err = CheckValid(EHybridStats);
       
  1537 		if (err) return err;
       
  1538 
       
  1539 		switch (aType)
       
  1540 			{
       
  1541 			case EHeapAllocation:
       
  1542 			case EHeapFreeCell:
       
  1543 				return KErrNotSupported;
       
  1544 			case EDlaAllocation:
       
  1545 				return iInfo->iDlaAllocsCount;
       
  1546 			case EPageAllocation:
       
  1547 				return iInfo->iPageAllocsCount;
       
  1548 			case ESlabAllocation:
       
  1549 				return iInfo->iSlabAllocsCount;
       
  1550 			case EDlaFreeCell:
       
  1551 				return iInfo->iDlaFreeCount;
       
  1552 			case ESlabFreeCell:
       
  1553 				return iInfo->iSlabFreeCellCount;
       
  1554 			case ESlabFreeSlab:
       
  1555 				return iInfo->iSlabFreeSlabCount;
       
  1556 			case EFreeMask:
       
  1557 				// This isn't a hugely meaningful value, but if that's what they asked for...
       
  1558 				return iInfo->iDlaFreeCount + iInfo->iSlabFreeCellCount + iInfo->iSlabFreeSlabCount;
       
  1559 			default:
       
  1560 				return KErrNotSupported;
       
  1561 			}
       
  1562 		}
       
  1563 	else
       
  1564 		{
       
  1565 		return KErrNotSupported;
       
  1566 		}
       
  1567 	}
       
  1568 
       
  1569 HUEXPORT_C TBool LtkUtils::RAllocatorHelper::AllocatorIsUdeb() const
       
  1570 	{
       
  1571 	return iAllocatorType == EUdebOldRHeap || iAllocatorType == EUdebHybridHeap;
       
  1572 	}
       
  1573 
       
  1574 
       
  1575 HUEXPORT_C const TDesC& LtkUtils::RAllocatorHelper::Description() const
       
  1576 	{
       
  1577 	_LIT(KRHeap, "RHeap");
       
  1578 	_LIT(KRHybridHeap, "RHybridHeap");
       
  1579 	_LIT(KUnknown, "Unknown");
       
  1580 	switch (iAllocatorType)
       
  1581 		{
       
  1582 		case EUrelOldRHeap:
       
  1583 		case EUdebOldRHeap:
       
  1584 			return KRHeap;
       
  1585 		case EUrelHybridHeap:
       
  1586 		case EUdebHybridHeap:
       
  1587 			return KRHybridHeap;
       
  1588 		case EAllocator:
       
  1589 		case EUnknown:
       
  1590 		default:
       
  1591 			return KUnknown;
       
  1592 		}
       
  1593 	}
       
  1594 
       
  1595 #ifdef __KERNEL_MODE__
       
  1596 
       
  1597 DChunk* LtkUtils::RAllocatorHelper::OpenUnderlyingChunk()
       
  1598 	{
       
  1599 	// Enter and leave in CS and with no locks held. On exit the returned DChunk has been Open()ed.
       
  1600 	TInt err = iChunk->Open();
       
  1601 	if (err) return NULL;
       
  1602 	return iChunk;
       
  1603 	}
       
  1604 
       
  1605 DChunk* LtkUtils::RKernelSideAllocatorHelper::OpenUnderlyingChunk()
       
  1606 	{
       
  1607 	if (iAllocatorType != EUrelOldRHeap && iAllocatorType != EUdebOldRHeap && iAllocatorType != EUrelHybridHeap && iAllocatorType != EUdebHybridHeap) return NULL;
       
  1608 	// Note RKernelSideAllocatorHelper doesn't use or access RAllocatorHelper::iChunk, because we figure out the chunk handle in a different way.
       
  1609 	// It is for this reason that iChunk is private, to remove temptation
       
  1610 	
       
  1611 	// Enter and leave in CS and with no locks held. On exit the returned DChunk has been Open()ed.
       
  1612 	TUint32 chunkHandle = 0;
       
  1613 	TInt err = ReadData(iAllocatorAddress + _FOFF(RHackHeap, iChunkHandle), &chunkHandle, sizeof(TUint32));
       
  1614 	if (err) return NULL;
       
  1615 
       
  1616 	NKern::LockSystem();
       
  1617 	DChunk* result = (DChunk*)Kern::ObjectFromHandle(iThread, chunkHandle, EChunk);
       
  1618 	if (result && result->Open() != KErrNone)
       
  1619 		{
       
  1620 		result = NULL;
       
  1621 		}
       
  1622 	NKern::UnlockSystem();
       
  1623 	return result;
       
  1624 	}
       
  1625 
       
  1626 LtkUtils::RAllocatorHelper::TType LtkUtils::RAllocatorHelper::GetType() const
       
  1627 	{
       
  1628 	switch (iAllocatorType)
       
  1629 		{
       
  1630 		case EUrelOldRHeap:
       
  1631 		case EUdebOldRHeap:
       
  1632 			return ETypeRHeap;
       
  1633 		case EUrelHybridHeap:
       
  1634 		case EUdebHybridHeap:
       
  1635 			return ETypeRHybridHeap;
       
  1636 		case EAllocator:
       
  1637 		case EUnknown:
       
  1638 		default:
       
  1639 			return ETypeUnknown;
       
  1640 		}
       
  1641 	}
       
  1642 
       
  1643 #else
       
  1644 
       
  1645 TInt LtkUtils::RAllocatorHelper::EuserIsUdeb()
       
  1646 	{
       
  1647 	TAny* buf = User::Alloc(4096);
       
  1648 	if (!buf) return KErrNoMemory;
       
  1649 	RAllocator* dummyHeap = UserHeap::FixedHeap(buf, 4096, 4, ETrue);
       
  1650 	if (!dummyHeap) return KErrNoMemory; // Don't think this can happen
       
  1651 
       
  1652 	dummyHeap->__DbgSetAllocFail(RAllocator::EFailNext, 1);
       
  1653 	TAny* ptr = dummyHeap->Alloc(4);
       
  1654 	// Because we specified singleThreaded=ETrue we can allow dummyHeap to just go out of scope here
       
  1655 	User::Free(buf);
       
  1656 
       
  1657 	if (ptr)
       
  1658 		{
       
  1659 		// Clearly the __DbgSetAllocFail had no effect so we must be urel
       
  1660 		// We don't need to free ptr because it came from the dummy heap
       
  1661 		return EFalse;
       
  1662 		}
       
  1663 	else
       
  1664 		{
       
  1665 		return ETrue;
       
  1666 		}
       
  1667 	}
       
  1668 
       
  1669 #ifndef STANDALONE_ALLOCHELPER
       
  1670 
       
  1671 #include <fshell/ltkutils.h>
       
  1672 HUEXPORT_C void LtkUtils::MakeHeapCellInvisible(TAny* aCell)
       
  1673 	{
       
  1674 	RAllocatorHelper helper;
       
  1675 	TInt err = helper.Open(&User::Allocator());
       
  1676 	if (err == KErrNone)
       
  1677 		{
       
  1678 		helper.SetCellNestingLevel(aCell, -1);
       
  1679 		helper.Close();
       
  1680 		}
       
  1681 	}
       
  1682 #endif // STANDALONE_ALLOCHELPER
       
  1683 
       
  1684 #endif