perfsrv/memspy/Driver/Shared/heaputils.cpp
changeset 62 1c2bb2fc7c87
parent 52 c2f44e33b468
equal deleted inserted replaced
56:aa2539c91954 62:1c2bb2fc7c87
     7 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
     7 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
     8 // 
     8 // 
     9 // Initial Contributors:
     9 // Initial Contributors:
    10 // Accenture - Initial contribution
    10 // Accenture - Initial contribution
    11 //
    11 //
    12 #ifdef TEST_HYBRIDHEAP_ASSERTS
    12 // Contributors:
    13 #define private public
    13 // Adrian Issott (Nokia) - Updates for kernel-side alloc helper & RHybridHeap v2
    14 #include <e32def.h>
    14 //
    15 #include "slab.h"
       
    16 #include "page_alloc.h"
       
    17 #include "heap_hybrid.h"
       
    18 #endif
       
    19 
    15 
    20 #include "heaputils.h"
    16 #include "heaputils.h"
       
    17 #include "heapoffsets.h"
       
    18 
       
    19 enum THeapUtilsPanic
       
    20     {
       
    21     EUnsupportedAllocatorType,
       
    22     EUserHeapOffsetRequestedForKernelHeap,
       
    23     };
    21 
    24 
    22 #ifdef __KERNEL_MODE__
    25 #ifdef __KERNEL_MODE__
    23 
    26 
    24 #include <kern_priv.h>
    27 #include <kern_priv.h>
    25 #define MEM Kern
    28 #define MEM Kern
    26 __ASSERT_COMPILE(sizeof(LtkUtils::RUserAllocatorHelper) == 10*4);
    29 __ASSERT_COMPILE(sizeof(LtkUtils::RUserAllocatorHelper) == 10*4);
    27 #define KERN_ENTER_CS() NKern::ThreadEnterCS()
    30 #define KERN_ENTER_CS() NKern::ThreadEnterCS()
    28 #define KERN_LEAVE_CS() NKern::ThreadLeaveCS()
    31 #define KERN_LEAVE_CS() NKern::ThreadLeaveCS()
       
    32 #ifdef _DEBUG
       
    33 #define LOG(args...) Kern::Printf(args)
       
    34 #else
    29 #define LOG(args...)
    35 #define LOG(args...)
       
    36 #endif
    30 #define HUEXPORT_C
    37 #define HUEXPORT_C
    31 #else
    38 #define PANIC(r) Kern::Fault( "HeapUtils", (r) );
       
    39 
       
    40 #else // __KERNEL_MODE__
    32 
    41 
    33 #include <e32std.h>
    42 #include <e32std.h>
    34 #define MEM User
    43 #define MEM User
    35 #define KERN_ENTER_CS()
    44 #define KERN_ENTER_CS()
    36 #define KERN_LEAVE_CS()
    45 #define KERN_LEAVE_CS()
    37 //#include <e32debug.h>
    46 #ifdef _DEBUG
    38 //#define LOG(args...) RDebug::Printf(args)
    47 #include <e32debug.h>
       
    48 #define LOG(args...) RDebug::Printf(args)
       
    49 #else
    39 #define LOG(args...)
    50 #define LOG(args...)
    40 
    51 #endif
    41 #ifdef STANDALONE_ALLOCHELPER
    52 #ifdef STANDALONE_ALLOCHELPER
    42 #define HUEXPORT_C
    53 #define HUEXPORT_C
    43 #else
    54 #else
    44 #define HUEXPORT_C EXPORT_C
    55 #define HUEXPORT_C EXPORT_C
    45 #endif
    56 #endif
    46 
    57 #define PANIC(r) User::Panic( _L("HeapUtils"), (r) );
    47 #endif // __KERNEL_MODE__
    58 #endif // __KERNEL_MODE__
    48 
    59 
    49 using LtkUtils::RAllocatorHelper;
    60 using LtkUtils::RAllocatorHelper;
       
    61 
       
    62 #ifndef TEST_HYBRIDHEAP_V2_ASSERTS
    50 const TUint KPageSize = 4096;
    63 const TUint KPageSize = 4096;
       
    64 #endif // TEST_HYBRIDHEAP_V2_ASSERTS
       
    65 
    51 __ASSERT_COMPILE(sizeof(RAllocatorHelper) == 9*4);
    66 __ASSERT_COMPILE(sizeof(RAllocatorHelper) == 9*4);
    52 
    67 
    53 // RAllocatorHelper
    68 // RAllocatorHelper
    54 
    69 
    55 HUEXPORT_C RAllocatorHelper::RAllocatorHelper()
    70 HUEXPORT_C RAllocatorHelper::RAllocatorHelper()
    56 	: iAllocatorAddress(0), iAllocatorType(EUnknown), iInfo(NULL), iValidInfo(0), iTempSlabBitmap(NULL), iPageCache(NULL), iPageCacheAddr(0)
    71 	: iAllocatorAddress(0), iAllocatorType(EAllocatorNotSet), iInfo(NULL)
       
    72 	, iIsKernelHeapAllocator(EFalse), iTempSlabBitmap(NULL), iPageCache(NULL), iPageCacheAddr(0)
    57 #ifdef __KERNEL_MODE__
    73 #ifdef __KERNEL_MODE__
    58 	, iChunk(NULL)
    74 	, iChunk(NULL)
    59 #endif
    75 #endif
    60 	{
    76 	{
    61 	}
    77 	}
    72 
    88 
    73 		void ClearStats()
    89 		void ClearStats()
    74 			{
    90 			{
    75 			memclr(this, sizeof(THeapInfo));
    91 			memclr(this, sizeof(THeapInfo));
    76 			}
    92 			}
    77 
    93 		
       
    94 		TUint iValidInfo;		
    78 		TInt iAllocatedSize; // number of bytes in allocated cells (excludes free cells, cell header overhead)
    95 		TInt iAllocatedSize; // number of bytes in allocated cells (excludes free cells, cell header overhead)
    79 		TInt iCommittedSize; // amount of memory actually committed (includes cell header overhead, gaps smaller than an MMU page)
    96 		TInt iCommittedSize; // amount of memory actually committed (includes cell header overhead, gaps smaller than an MMU page)
    80 		TInt iAllocationCount; // number of allocations currently
    97 		TInt iAllocationCount; // number of allocations currently
    81 		TInt iMaxCommittedSize; // or thereabouts
    98 		TInt iMaxCommittedSize; // or thereabouts
    82 		TInt iMinCommittedSize;
    99 		TInt iMinCommittedSize;
   121     return allocatorAddress;
   138     return allocatorAddress;
   122     }
   139     }
   123 
   140 
   124 TInt RAllocatorHelper::OpenKernelHeap()
   141 TInt RAllocatorHelper::OpenKernelHeap()
   125 	{
   142 	{
       
   143     SetIsKernelHeapAllocator(ETrue);
       
   144     
   126 	_LIT(KName, "SvHeap");
   145 	_LIT(KName, "SvHeap");
   127 	NKern::ThreadEnterCS();
   146 	NKern::ThreadEnterCS();
   128 	DObjectCon* chunkContainer = Kern::Containers()[EChunk];
   147 	DObjectCon* chunkContainer = Kern::Containers()[EChunk];
   129 	chunkContainer->Wait();
   148 	chunkContainer->Wait();
   130 	const TInt chunkCount = chunkContainer->Count();
   149 	const TInt chunkCount = chunkContainer->Count();
   356 	return err;
   375 	return err;
   357 	}
   376 	}
   358 
   377 
   359 LtkUtils::RKernelCopyAllocatorHelper::RKernelCopyAllocatorHelper()
   378 LtkUtils::RKernelCopyAllocatorHelper::RKernelCopyAllocatorHelper()
   360     : iCopiedChunk(NULL), iOffset(0)
   379     : iCopiedChunk(NULL), iOffset(0)
   361     {}
   380     {
       
   381     SetIsKernelHeapAllocator(ETrue);
       
   382     }
   362 
   383 
   363 TInt LtkUtils::RKernelCopyAllocatorHelper::OpenCopiedHeap(DChunk* aOriginalChunk, DChunk* aCopiedChunk, TInt aOffset)
   384 TInt LtkUtils::RKernelCopyAllocatorHelper::OpenCopiedHeap(DChunk* aOriginalChunk, DChunk* aCopiedChunk, TInt aOffset)
   364     {
   385     {
   365     TInt err = aCopiedChunk->Open();
   386     TInt err = aCopiedChunk->Open();
   366     if (!err)
   387     if (!err)
   437 	TInt udeb = EuserIsUdeb();
   458 	TInt udeb = EuserIsUdeb();
   438 	if (udeb < 0) return udeb; // error
   459 	if (udeb < 0) return udeb; // error
   439     TBool isTheKernelHeap = EFalse;
   460     TBool isTheKernelHeap = EFalse;
   440 #endif
   461 #endif
   441 
   462 
       
   463 	if (iAllocatorAddress == 0)
       
   464 		{
       
   465 		// Subclasses with more knowledge about the layout of the allocator within the chunk may have already set the iAllocatorAddress (eg kernel heap's allocator doesn't start at the chunk base)
       
   466 		iAllocatorAddress = aChunkBase;
       
   467 		}
       
   468 
   442 	TInt err = IdentifyAllocatorType(udeb, isTheKernelHeap);
   469 	TInt err = IdentifyAllocatorType(udeb, isTheKernelHeap);
   443 	if (err == KErrNone && iAllocatorType == EAllocator)
   470 	if (err == KErrNone && iAllocatorType == EAllocatorUnknown)
   444 		{
   471 		{
   445 		// We've no reason to assume it's an allocator because we don't know the iAllocatorAddress actually is an RAllocator*
   472 		// We've no reason to assume it's an allocator because we don't know the iAllocatorAddress actually is an RAllocator*
   446 		err = KErrNotFound;
   473 		err = KErrNotFound;
   447 		}
   474 		}
   448 	if (err && aChunkMaxSize > 0)
   475 	if (err && aChunkMaxSize > 0 && iAllocatorAddress == aChunkBase)
   449 		{
   476 		{
   450 		TInt oldErr = err;
   477 		TInt oldErr = err;
   451 		TAllocatorType oldType = iAllocatorType;
   478 		TAllocatorType oldType = iAllocatorType;
   452 		// Try middle of chunk, in case it's an RHybridHeap
   479 		// Try middle of chunk, in case it's an RHybridHeap
   453 		iAllocatorAddress += aChunkMaxSize / 2;
   480 		iAllocatorAddress += aChunkMaxSize / 2;
   454 		err = IdentifyAllocatorType(udeb, isTheKernelHeap);
   481 		err = IdentifyAllocatorType(udeb, isTheKernelHeap);
   455 		if (err || iAllocatorType == EAllocator)
   482 		if (err || iAllocatorType == EAllocatorUnknown)
   456 			{
   483 			{
   457 			// No better than before
   484 			// No better than before
   458 			iAllocatorAddress = aChunkBase;
   485 			iAllocatorAddress = aChunkBase;
   459 			iAllocatorType = oldType;
   486 			iAllocatorType = oldType;
   460 			err = oldErr;
   487 			err = oldErr;
   467 		RAllocator* kernelAllocator = reinterpret_cast<RAllocator*>(iAllocatorAddress);
   494 		RAllocator* kernelAllocator = reinterpret_cast<RAllocator*>(iAllocatorAddress);
   468 		kernelAllocator->DebugFunction(RAllocator::ESetFail, (TAny*)9999, (TAny*)0); // Use an invalid fail reason - this should have no effect on the operation of the heap
   495 		kernelAllocator->DebugFunction(RAllocator::ESetFail, (TAny*)9999, (TAny*)0); // Use an invalid fail reason - this should have no effect on the operation of the heap
   469 		TInt err = kernelAllocator->DebugFunction(7, NULL, NULL); // 7 is RAllocator::TAllocDebugOp::EGetFail
   496 		TInt err = kernelAllocator->DebugFunction(7, NULL, NULL); // 7 is RAllocator::TAllocDebugOp::EGetFail
   470 		if (err == 9999)
   497 		if (err == 9999)
   471 			{
   498 			{
   472 			// udeb new hybrid heap
   499 			// udeb hybrid heap (v1 or v2)
   473 			udeb = ETrue;
   500 			udeb = ETrue;
   474 			}
   501 			}
   475 		else if (err == KErrNotSupported)
   502 		else if (err == KErrNotSupported)
   476 			{
   503 			{
   477 			// Old heap - fall back to slightly nasty non-thread-safe method
   504 			// Old heap - fall back to slightly nasty non-thread-safe method
   507 	ECommittedFreeSpace = 32,
   534 	ECommittedFreeSpace = 32,
   508 	EMinSize = 64,
   535 	EMinSize = 64,
   509 	EHybridStats = 128,
   536 	EHybridStats = 128,
   510 	};
   537 	};
   511 
   538 
   512 class RHackAllocator : public RAllocator
       
   513 	{
       
   514 public:
       
   515 	using RAllocator::iHandles;
       
   516 	using RAllocator::iTotalAllocSize;
       
   517 	using RAllocator::iCellCount;
       
   518 	};
       
   519 
       
   520 class RHackHeap : public RHeap
       
   521 	{
       
   522 public:
       
   523 	// Careful, only allowed to use things that are still in the new RHeap, and are still in the same place
       
   524 	using RHeap::iMaxLength;
       
   525 	using RHeap::iChunkHandle;
       
   526 	using RHeap::iLock;
       
   527 	using RHeap::iBase;
       
   528 	using RHeap::iAlign;
       
   529 	using RHeap::iTop;
       
   530 	};
       
   531 
       
   532 const TInt KChunkSizeOffset = 30*4;
       
   533 const TInt KPageMapOffset = 141*4;
       
   534 //const TInt KDlOnlyOffset = 33*4;
       
   535 const TInt KMallocStateOffset = 34*4;
       
   536 const TInt KMallocStateTopSizeOffset = 3*4;
       
   537 const TInt KMallocStateTopOffset = 5*4;
       
   538 const TInt KMallocStateSegOffset = 105*4;
       
   539 const TInt KUserHybridHeapSize = 186*4;
       
   540 const TInt KSparePageOffset = 167*4;
       
   541 const TInt KPartialPageOffset = 165*4;
       
   542 const TInt KFullSlabOffset = 166*4;
       
   543 const TInt KSlabAllocOffset = 172*4;
       
   544 const TInt KSlabParentOffset = 1*4;
       
   545 const TInt KSlabChild1Offset = 2*4;
       
   546 const TInt KSlabChild2Offset = 3*4;
       
   547 const TInt KSlabPayloadOffset = 4*4;
       
   548 const TInt KSlabsetSize = 4;
       
   549 
       
   550 #ifdef TEST_HYBRIDHEAP_ASSERTS
       
   551 __ASSERT_COMPILE(_FOFF(RHybridHeap, iChunkSize) == KChunkSizeOffset);
       
   552 __ASSERT_COMPILE(_FOFF(RHybridHeap, iPageMap) == KPageMapOffset);
       
   553 __ASSERT_COMPILE(_FOFF(RHybridHeap, iGlobalMallocState) == KMallocStateOffset);
       
   554 __ASSERT_COMPILE(sizeof(malloc_state) == 107*4);
       
   555 __ASSERT_COMPILE(_FOFF(malloc_state, iTopSize) == KMallocStateTopSizeOffset);
       
   556 __ASSERT_COMPILE(_FOFF(malloc_state, iTop) == KMallocStateTopOffset);
       
   557 __ASSERT_COMPILE(_FOFF(malloc_state, iSeg) == KMallocStateSegOffset);
       
   558 __ASSERT_COMPILE(sizeof(RHybridHeap) == KUserHybridHeapSize);
       
   559 __ASSERT_COMPILE(_FOFF(RHybridHeap, iSparePage) == KSparePageOffset);
       
   560 __ASSERT_COMPILE(_FOFF(RHybridHeap, iPartialPage) == KPartialPageOffset);
       
   561 __ASSERT_COMPILE(_FOFF(RHybridHeap, iSlabAlloc) == KSlabAllocOffset);
       
   562 __ASSERT_COMPILE(_FOFF(slab, iParent) == KSlabParentOffset);
       
   563 __ASSERT_COMPILE(_FOFF(slab, iChild1) == KSlabChild1Offset);
       
   564 __ASSERT_COMPILE(_FOFF(slab, iChild2) == KSlabChild2Offset);
       
   565 __ASSERT_COMPILE(_FOFF(slab, iPayload) == KSlabPayloadOffset);
       
   566 __ASSERT_COMPILE(sizeof(slabset) == KSlabsetSize);
       
   567 #endif
       
   568 
       
   569 TInt RAllocatorHelper::TryLock()
   539 TInt RAllocatorHelper::TryLock()
   570 	{
   540 	{
   571 #ifdef __KERNEL_MODE__
   541 #ifdef __KERNEL_MODE__
   572 	NKern::ThreadEnterCS();
   542 	NKern::ThreadEnterCS();
   573 	DMutex* m = *(DMutex**)(iAllocatorAddress + _FOFF(RHackHeap, iLock));
   543 	DMutex* m = *(DMutex**)(iAllocatorAddress + _FOFF(RHackHeap, iLock));
   574 	if (m) Kern::MutexWait(*m);
   544 	if (m) Kern::MutexWait(*m);
   575 	return KErrNone;
   545 	return KErrNone;
   576 #else
   546 #else
   577 	if (iAllocatorType != EUnknown && iAllocatorType != EAllocator)
   547 	if (iAllocatorType != EAllocatorNotSet && iAllocatorType != EAllocatorUnknown)
   578 		{
   548 		{
   579 		RFastLock& lock = *reinterpret_cast<RFastLock*>(iAllocatorAddress + _FOFF(RHackHeap, iLock));
   549 		RFastLock& lock = *reinterpret_cast<RFastLock*>(iAllocatorAddress + _FOFF(RHackHeap, iLock));
   580 		lock.Wait();
   550 		lock.Wait();
   581 		return KErrNone;
   551 		return KErrNone;
   582 		}
   552 		}
   589 #ifdef __KERNEL_MODE__
   559 #ifdef __KERNEL_MODE__
   590 	DMutex* m = *(DMutex**)(iAllocatorAddress + _FOFF(RHackHeap, iLock));
   560 	DMutex* m = *(DMutex**)(iAllocatorAddress + _FOFF(RHackHeap, iLock));
   591 	if (m) Kern::MutexSignal(*m);
   561 	if (m) Kern::MutexSignal(*m);
   592 	NKern::ThreadLeaveCS();
   562 	NKern::ThreadLeaveCS();
   593 #else
   563 #else
   594 	if (iAllocatorType != EUnknown && iAllocatorType != EAllocator)
   564 	if (iAllocatorType != EAllocatorNotSet && iAllocatorType != EAllocatorUnknown)
   595 		{
   565 		{
   596 		RFastLock& lock = *reinterpret_cast<RFastLock*>(iAllocatorAddress + _FOFF(RHackHeap, iLock));
   566 		RFastLock& lock = *reinterpret_cast<RFastLock*>(iAllocatorAddress + _FOFF(RHackHeap, iLock));
   597 		lock.Signal();
   567 		lock.Signal();
   598 		}
   568 		}
   599 #endif
   569 #endif
   600 	}
   570 	}
   601 
   571 
   602 HUEXPORT_C void RAllocatorHelper::Close()
   572 HUEXPORT_C void RAllocatorHelper::Close()
   603 	{
   573 	{
   604 	KERN_ENTER_CS();
   574 	KERN_ENTER_CS();
   605 	iAllocatorType = EUnknown;
   575 	iAllocatorType = EAllocatorNotSet;
   606 	iAllocatorAddress = 0;
   576 	iAllocatorAddress = 0;
   607 	delete iInfo;
   577 	delete iInfo;
   608 	iInfo = NULL;
   578 	iInfo = NULL;
   609 	iValidInfo = 0;
       
   610 	MEM::Free(iTempSlabBitmap);
   579 	MEM::Free(iTempSlabBitmap);
   611 	iTempSlabBitmap = NULL;
   580 	iTempSlabBitmap = NULL;
   612 	MEM::Free(iPageCache);
   581 	MEM::Free(iPageCache);
   613 	iPageCache = NULL;
   582 	iPageCache = NULL;
   614 	iPageCacheAddr = 0;
   583 	iPageCacheAddr = 0;
       
   584 	SetIsKernelHeapAllocator(EFalse);
   615 	KERN_LEAVE_CS();
   585 	KERN_LEAVE_CS();
   616 	}
   586 	}
   617 
   587 
   618 TInt RAllocatorHelper::IdentifyAllocatorType(TBool aAllocatorIsUdeb, TBool aIsTheKernelHeap)
   588 TInt RAllocatorHelper::IdentifyAllocatorType(TBool aAllocatorIsUdeb, TBool aIsTheKernelHeap)
   619 	{
   589 	{
   620 	iAllocatorType = EUnknown;
   590 	iAllocatorType = EAllocatorNotSet;
       
   591 	SetIsKernelHeapAllocator(aIsTheKernelHeap);
   621 
   592 
   622 	TUint32 handlesPtr = 0;
   593 	TUint32 handlesPtr = 0;
   623 	TInt err = ReadWord(iAllocatorAddress + _FOFF(RHackAllocator, iHandles), handlesPtr);
   594 	TInt err = ReadWord(iAllocatorAddress + _FOFF(RHackAllocator, iHandles), handlesPtr);
   624 
   595 
   625 	if (err) return err;
   596 	if (err) return err;
   630 		// It's an RHeap of some kind - I doubt any other RAllocator subclass will use iHandles in this way
   601 		// It's an RHeap of some kind - I doubt any other RAllocator subclass will use iHandles in this way
   631 		TUint32 base = 0;
   602 		TUint32 base = 0;
   632 		err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iBase), base);
   603 		err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iBase), base);
   633 		if (err) return err;
   604 		if (err) return err;
   634 		TInt objsize = (TInt)base - (TInt)iAllocatorAddress;
   605 		TInt objsize = (TInt)base - (TInt)iAllocatorAddress;
   635 		if (objsize <= 32*4)
   606 
       
   607 		if (objsize <= HeapV1::KUserInitialHeapMetaDataSize)
   636 			{
   608 			{
   637 			// Old RHeap
   609 			// Old RHeap
   638 			iAllocatorType = aAllocatorIsUdeb ? EUdebOldRHeap : EUrelOldRHeap;
   610 			iAllocatorType = aAllocatorIsUdeb ? EUdebOldRHeap : EUrelOldRHeap;
   639 			}
   611 			}
   640 		else
   612 		else if (objsize > HybridV2::KSelfReferenceOffset) // same value as HybridV1::KMallocStateOffset so will be true for RHybridHeap V1 and V2 
   641 			{
   613 			{
   642 			// new hybrid heap - bigger than the old one. Likewise figure out if udeb or urel.
   614             // First and second versions of hybrid heap are bigger than the original RHeap
   643 			iAllocatorType = aAllocatorIsUdeb ? EUdebHybridHeap : EUrelHybridHeap;
   615 		    // But the user and kernel side versions have different sizes
   644 			}
   616 
       
   617 		    TUint32 possibleSelfRef = 0; // in the new refactored RHybridHeap ...
       
   618 	        err = ReadWord(iAllocatorAddress + HybridV2::KSelfReferenceOffset, possibleSelfRef);
       
   619 	        if (err) return err;
       
   620 
       
   621 	        // Only the second version references itself
       
   622 	        if (possibleSelfRef == iAllocatorAddress)
       
   623 	            {
       
   624                 iAllocatorType = aAllocatorIsUdeb ? EUdebHybridHeapV2 : EUrelHybridHeapV2;	            
       
   625 	            }
       
   626 	        else
       
   627 	            {
       
   628 	            iAllocatorType = aAllocatorIsUdeb ? EUdebHybridHeap : EUrelHybridHeap;
       
   629 	            }
       
   630 			}
       
   631 		else 
       
   632 		    {
       
   633 		    iAllocatorType = EAllocatorUnknown;
       
   634 		    }
   645 		}
   635 		}
   646 	else
   636 	else
   647 		{
   637 		{
   648 		iAllocatorType = EAllocator;
   638 		iAllocatorType = EAllocatorUnknown;
   649 		}
   639 		}
       
   640 	
       
   641 	LOG("RAllocatorHelper::IdentifyAllocatorType() - allocator at 0x%08x has type: %d", iAllocatorAddress, iAllocatorType);
       
   642 	
   650 	return KErrNone;
   643 	return KErrNone;
   651 	}
   644 	}
   652 
   645 
   653 HUEXPORT_C TInt RAllocatorHelper::SetCellNestingLevel(TAny* aCell, TInt aNestingLevel)
   646 HUEXPORT_C TInt RAllocatorHelper::SetCellNestingLevel(TAny* aCell, TInt aNestingLevel)
   654 	{
   647 	{
   655 	TInt err = KErrNone;
   648 	TInt err = KErrNone;
   656 
   649 
   657 	switch (iAllocatorType)
   650 	switch (iAllocatorType)
   658 		{
   651 		{
       
   652 	    // All of them are in the same place amazingly
   659 		case EUdebOldRHeap:
   653 		case EUdebOldRHeap:
   660 		case EUdebHybridHeap:
   654 		case EUdebHybridHeap:
   661 			// By this reckoning, they're in the same place amazingly
   655 		case EUdebHybridHeapV2:
   662 			{
   656 			{
   663 			TLinAddr nestingAddr = (TLinAddr)aCell - 8;
   657 			TLinAddr nestingAddr = (TLinAddr)aCell - 8;
   664 			err = WriteWord(nestingAddr, aNestingLevel);
   658 			err = WriteWord(nestingAddr, aNestingLevel);
   665 			break;
   659 			break;
   666 			}
   660 			}
   667 		default:
   661 		default:
   668 			break;
   662             return KErrNotSupported;
   669 		}
   663 		}
   670 	return err;
   664 	return err;
   671 	}
   665 	}
   672 
   666 
   673 HUEXPORT_C TInt RAllocatorHelper::GetCellNestingLevel(TAny* aCell, TInt& aNestingLevel)
   667 HUEXPORT_C TInt RAllocatorHelper::GetCellNestingLevel(TAny* aCell, TInt& aNestingLevel)
   674 	{
   668 	{
   675 	switch (iAllocatorType)
   669 	switch (iAllocatorType)
   676 		{
   670 		{
       
   671         // All of them are in the same place amazingly	    
   677 		case EUdebOldRHeap:
   672 		case EUdebOldRHeap:
   678 		case EUdebHybridHeap:
   673 		case EUdebHybridHeap:
   679 			// By this reckoning, they're in the same place amazingly
   674 		case EUdebHybridHeapV2:
   680 			{
   675 			{
   681 			TLinAddr nestingAddr = (TLinAddr)aCell - 8;
   676 			TLinAddr nestingAddr = (TLinAddr)aCell - 8;
   682 			return ReadWord(nestingAddr, (TUint32&)aNestingLevel);
   677 			return ReadWord(nestingAddr, (TUint32&)aNestingLevel);
   683 			}
   678 			}
   684 		default:
   679 		default:
   723 				err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iTop), top);
   718 				err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iTop), top);
   724 				if (err) return err;
   719 				if (err) return err;
   725 
   720 
   726 				//iInfo->iCommittedSize = top - base;
   721 				//iInfo->iCommittedSize = top - base;
   727 				iInfo->iCommittedSize = top - iAllocatorAddress;
   722 				iInfo->iCommittedSize = top - iAllocatorAddress;
   728 				iValidInfo |= ECommitted;
   723 				iInfo->iValidInfo |= ECommitted;
   729 				}
   724 				}
   730 			if (aMask & EAllocated)
   725 			if (aMask & EAllocated)
   731 				{
   726 				{
   732 				TUint32 allocSize = 0;
   727 				TUint32 allocSize = 0;
   733 				err = ReadWord(iAllocatorAddress + _FOFF(RHackAllocator, iTotalAllocSize), allocSize);
   728 				err = ReadWord(iAllocatorAddress + _FOFF(RHackAllocator, iTotalAllocSize), allocSize);
   734 				if (err) return err;
   729 				if (err) return err;
   735 				iInfo->iAllocatedSize = allocSize;
   730 				iInfo->iAllocatedSize = allocSize;
   736 				iValidInfo |= EAllocated;
   731 				iInfo->iValidInfo |= EAllocated;
   737 				}
   732 				}
   738 			if (aMask & ECount)
   733 			if (aMask & ECount)
   739 				{
   734 				{
   740 				TUint32 count = 0;
   735 				TUint32 count = 0;
   741 				err = ReadWord(iAllocatorAddress + _FOFF(RHackAllocator, iCellCount), count);
   736 				err = ReadWord(iAllocatorAddress + _FOFF(RHackAllocator, iCellCount), count);
   742 				if (err) return err;
   737 				if (err) return err;
   743 				iInfo->iAllocationCount = count;
   738 				iInfo->iAllocationCount = count;
   744 				iValidInfo |= ECount;
   739 				iInfo->iValidInfo |= ECount;
   745 				}
   740 				}
   746 			if (aMask & EMaxSize)
   741 			if (aMask & EMaxSize)
   747 				{
   742 				{
   748 				TUint32 maxlen = 0;
   743 				TUint32 maxlen = 0;
   749 				err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iMaxLength), maxlen);
   744 				err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iMaxLength), maxlen);
   750 				if (err) return err;
   745 				if (err) return err;
   751 				iInfo->iMaxCommittedSize = maxlen;
   746 				iInfo->iMaxCommittedSize = maxlen;
   752 				iValidInfo |= EMaxSize;
   747 				iInfo->iValidInfo |= EMaxSize;
   753 				}
   748 				}
   754 			if (aMask & EMinSize)
   749 			if (aMask & EMinSize)
   755 				{
   750 				{
   756 				TUint32 minlen = 0;
   751 				TUint32 minlen = 0;
   757 				err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iMaxLength) - 4, minlen); // This isn't a typo! iMinLength is 4 bytes before iMaxLength, on old heap ONLY
   752 				err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iMaxLength) - 4, minlen); // This isn't a typo! iMinLength is 4 bytes before iMaxLength, on old heap ONLY
   758 				if (err) return err;
   753 				if (err) return err;
   759 				iInfo->iMinCommittedSize = minlen;
   754 				iInfo->iMinCommittedSize = minlen;
   760 				iValidInfo |= EMinSize;
   755 				iInfo->iValidInfo |= EMinSize;
   761 				}
   756 				}
   762 			if (aMask & KHeapWalkStatsForOldHeap)
   757 			if (aMask & KHeapWalkStatsForOldHeap)
   763 				{
   758 				{
   764 				// Need a heap walk
   759 				// Need a heap walk
   765 				iInfo->ClearStats();
   760 				iInfo->ClearStats();
   766 				iValidInfo = 0;
   761 				iInfo->iValidInfo = 0;
   767 				err = DoWalk(&WalkForStats, NULL);
   762 				err = DoWalk(&WalkForStats, NULL);
   768 				if (err == KErrNone) iValidInfo |= KHeapWalkStatsForOldHeap;
   763 				if (err == KErrNone) iInfo->iValidInfo |= KHeapWalkStatsForOldHeap;
   769 				}
   764 				}
   770 			return err;
   765 			return err;
   771 			}
   766 			}
   772 		case EUrelHybridHeap:
   767 		case EUrelHybridHeap:
   773 		case EUdebHybridHeap:
   768 		case EUdebHybridHeap:
       
   769         case EUrelHybridHeapV2:
       
   770         case EUdebHybridHeapV2:
   774 			{
   771 			{
   775 			TBool needWalk = EFalse;
   772 			TBool needWalk = EFalse;
   776 			if (aMask & ECommitted)
   773 			if (aMask & ECommitted)
   777 				{
   774 				{
   778 				// RAllocator::Size uses iChunkSize - sizeof(RHybridHeap);
   775 				// RAllocator::Size uses iChunkSize - sizeof(RHybridHeap);
   783 				if (err) return err;
   780 				if (err) return err;
   784 				//TUint32 baseAddr = 0;
   781 				//TUint32 baseAddr = 0;
   785 				//err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iBase), baseAddr);
   782 				//err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iBase), baseAddr);
   786 				//if (err) return err;
   783 				//if (err) return err;
   787 				iInfo->iCommittedSize = chunkSize; // - (baseAddr - iAllocatorAddress);
   784 				iInfo->iCommittedSize = chunkSize; // - (baseAddr - iAllocatorAddress);
   788 				iValidInfo |= ECommitted;
   785 				iInfo->iValidInfo |= ECommitted;
   789 				}
   786 				}
   790 			if (aMask & (EAllocated|ECount))
   787 			if (aMask & (EAllocated|ECount))
   791 				{
   788 				{
   792 				if (iAllocatorType == EUdebHybridHeap)
   789 				if (iAllocatorType == EUdebHybridHeap)
   793 					{
   790 					{
   794 					// Easy, just get them from the counter
   791 					// Easy, just get them from the counter
   795 					TUint32 totalAlloc = 0;
   792 					TUint32 totalAlloc = 0;
   796 					err = ReadWord(iAllocatorAddress + _FOFF(RHackAllocator, iTotalAllocSize), totalAlloc);
   793 					err = ReadWord(iAllocatorAddress + _FOFF(RHackAllocator, iTotalAllocSize), totalAlloc);
   797 					if (err) return err;
   794 					if (err) return err;
   798 					iInfo->iAllocatedSize = totalAlloc;
   795 					iInfo->iAllocatedSize = totalAlloc;
   799 					iValidInfo |= EAllocated;
   796 					iInfo->iValidInfo |= EAllocated;
   800 
   797 
   801 					TUint32 cellCount = 0;
   798 					TUint32 cellCount = 0;
   802 					err = ReadWord(iAllocatorAddress + _FOFF(RHackAllocator, iCellCount), cellCount);
   799 					err = ReadWord(iAllocatorAddress + _FOFF(RHackAllocator, iCellCount), cellCount);
   803 					if (err) return err;
   800 					if (err) return err;
   804 					iInfo->iAllocationCount = cellCount;
   801 					iInfo->iAllocationCount = cellCount;
   805 					iValidInfo |= ECount;
   802 					iInfo->iValidInfo |= ECount;
   806 					}
   803 					}
   807 				else
   804 				else
   808 					{
   805 					{
   809 					// A heap walk is needed
   806 					// A heap walk is needed
   810 					needWalk = ETrue;
   807 					needWalk = ETrue;
   814 				{
   811 				{
   815 				TUint32 maxlen = 0;
   812 				TUint32 maxlen = 0;
   816 				err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iMaxLength), maxlen);
   813 				err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iMaxLength), maxlen);
   817 				if (err) return err;
   814 				if (err) return err;
   818 				iInfo->iMaxCommittedSize = maxlen;
   815 				iInfo->iMaxCommittedSize = maxlen;
   819 				iValidInfo |= EMaxSize;
   816 				iInfo->iValidInfo |= EMaxSize;
   820 				}
   817 				}
   821 			if (aMask & EMinSize)
   818 			if (aMask & EMinSize)
   822 				{
   819 				{
   823 				TUint32 minlen = 0;
   820 				TUint32 minlen = 0;
   824 				err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iAlign) + 4*4, minlen); // iMinLength is in different place to old RHeap
   821 				err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iAlign) + 4*4, minlen); // iMinLength is in different place to old RHeap
   825 				if (err) return err;
   822 				if (err) return err;
   826 				iInfo->iMinCommittedSize = minlen;
   823 				iInfo->iMinCommittedSize = minlen;
   827 				iValidInfo |= EMinSize;
   824 				iInfo->iValidInfo |= EMinSize;
   828 				}
   825 				}
   829 			if (aMask & (EUnusedPages|ECommittedFreeSpace|EHybridStats))
   826 			if (aMask & (EUnusedPages|ECommittedFreeSpace|EHybridStats))
   830 				{
   827 				{
   831 				// EAllocated and ECount have already been taken care of above
   828 				// EAllocated and ECount have already been taken care of above
   832 				needWalk = ETrue;
   829 				needWalk = ETrue;
   833 				}
   830 				}
   834 
   831 
   835 			if (needWalk)
   832 			if (needWalk)
   836 				{
   833 				{
   837 				iInfo->ClearStats();
   834 				iInfo->ClearStats();
   838 				iValidInfo = 0;
   835 				iInfo->iValidInfo = 0;
   839 				err = DoWalk(&WalkForStats, NULL);
   836 				err = DoWalk(&WalkForStats, NULL);
   840 				if (err == KErrNone) iValidInfo |= KHeapWalkStatsForNewHeap;
   837 				if (err == KErrNone) iInfo->iValidInfo |= KHeapWalkStatsForNewHeap;
   841 				}
   838 				}
   842 			return err;
   839 			return err;
   843 			}
   840 			}
   844 		default:
   841 		default:
   845 			return KErrNotSupported;
   842 			return KErrNotSupported;
   846 		}
   843 		}
   847 	}
   844 	}
   848 
   845 
   849 TInt RAllocatorHelper::CheckValid(TUint aMask)
   846 TInt RAllocatorHelper::CheckValid(TUint aMask)
   850 	{
   847 	{
   851 	if ((iValidInfo & aMask) == aMask)
   848 	if ((iInfo->iValidInfo & aMask) == aMask)
   852 		{
   849 		{
   853 		return KErrNone;
   850 		return KErrNone;
   854 		}
   851 		}
   855 	else
   852 	else
   856 		{
   853 		{
   879 	return iInfo->iAllocationCount;
   876 	return iInfo->iAllocationCount;
   880 	}
   877 	}
   881 
   878 
   882 HUEXPORT_C TInt RAllocatorHelper::RefreshDetails()
   879 HUEXPORT_C TInt RAllocatorHelper::RefreshDetails()
   883 	{
   880 	{
   884 	return RefreshDetails(iValidInfo);
   881 	return RefreshDetails(iInfo->iValidInfo);
   885 	}
   882 	}
   886 
   883 
   887 HUEXPORT_C TInt RAllocatorHelper::MaxCommittedSize()
   884 HUEXPORT_C TInt RAllocatorHelper::MaxCommittedSize()
   888 	{
   885 	{
   889 	TInt err = CheckValid(EMaxSize);
   886 	TInt err = CheckValid(EMaxSize);
   901 HUEXPORT_C TInt RAllocatorHelper::AllocCountForCell(TAny* aCell) const
   898 HUEXPORT_C TInt RAllocatorHelper::AllocCountForCell(TAny* aCell) const
   902 	{
   899 	{
   903 	TUint32 allocCount = 0;
   900 	TUint32 allocCount = 0;
   904 	switch (iAllocatorType)
   901 	switch (iAllocatorType)
   905 		{
   902 		{
       
   903 	    // All of them are in the same place amazingly
   906 		case EUdebOldRHeap:
   904 		case EUdebOldRHeap:
   907 		case EUdebHybridHeap: // Both are in the same place, amazingly
   905 		case EUdebHybridHeap: 
       
   906         case EUdebHybridHeapV2:		    
   908 			{
   907 			{
   909 			TLinAddr allocCountAddr = (TLinAddr)aCell - 4;
   908 			TLinAddr allocCountAddr = (TLinAddr)aCell - 4;
   910 			TInt err = ReadWord(allocCountAddr, allocCount);
   909 			TInt err = ReadWord(allocCountAddr, allocCount);
   911 			if (err) return err;
   910 			if (err) return err;
   912 			return (TInt)allocCount;
   911 			return (TInt)allocCount;
   913 			}
   912 			}
   914 		default:
   913 		default:
   915 			return KErrNotSupported;
   914 			return KErrNotSupported;
   916 		}
   915 		}
   917 	}
   916 	}
       
   917 
       
   918 //
       
   919 
       
   920 void RAllocatorHelper::SetIsKernelHeapAllocator(TBool aIsKernelHeapAllocator)
       
   921     {
       
   922     iIsKernelHeapAllocator = aIsKernelHeapAllocator;
       
   923     }
       
   924 
       
   925 TBool RAllocatorHelper::GetIsKernelHeapAllocator() const
       
   926     {
       
   927     return iIsKernelHeapAllocator;
       
   928     }
       
   929 
       
   930 TInt RAllocatorHelper::PageMapOffset() const
       
   931     {
       
   932     if (GetIsKernelHeapAllocator())
       
   933         {
       
   934         PANIC(EUserHeapOffsetRequestedForKernelHeap);
       
   935         }
       
   936     
       
   937     switch (iAllocatorType)
       
   938         {
       
   939         case EUrelHybridHeap:
       
   940         case EUdebHybridHeap:
       
   941             return HybridV1::KUserPageMapOffset;
       
   942         case EUrelHybridHeapV2:
       
   943         case EUdebHybridHeapV2:
       
   944             return HybridV2::KUserPageMapOffset;
       
   945         default:
       
   946             PANIC(EUnsupportedAllocatorType);
       
   947             return KErrNotSupported; // only needed to make the compiler happy
       
   948         }
       
   949     }
       
   950 
       
   951 TInt RAllocatorHelper::MallocStateOffset() const
       
   952     {
       
   953     switch (iAllocatorType)
       
   954         {
       
   955         case EUrelHybridHeap:
       
   956         case EUdebHybridHeap:
       
   957             return HybridV1::KMallocStateOffset;
       
   958         case EUrelHybridHeapV2:
       
   959         case EUdebHybridHeapV2:
       
   960             if (GetIsKernelHeapAllocator())
       
   961                 {
       
   962                 return HybridV2::KKernelMallocStateOffset;
       
   963                 }
       
   964             else 
       
   965                 {
       
   966                 return HybridV2::KUserMallocStateOffset;
       
   967                 }
       
   968         default:
       
   969             PANIC(EUnsupportedAllocatorType);
       
   970             return KErrNotSupported; // only needed to make the compiler happy
       
   971         }    
       
   972     }
       
   973 
       
   974 TInt RAllocatorHelper::SparePageOffset() const
       
   975     {
       
   976     if (GetIsKernelHeapAllocator())
       
   977         {
       
   978         PANIC(EUserHeapOffsetRequestedForKernelHeap);
       
   979         }
       
   980 
       
   981     switch (iAllocatorType)
       
   982         {
       
   983         case EUrelHybridHeap:
       
   984         case EUdebHybridHeap:
       
   985             return HybridV1::KUserSparePageOffset;
       
   986         case EUrelHybridHeapV2:
       
   987         case EUdebHybridHeapV2:
       
   988             return HybridV2::KUserSparePageOffset;
       
   989         default:
       
   990             PANIC(EUnsupportedAllocatorType);
       
   991             return KErrNotSupported; // only needed to make the compiler happy
       
   992         }
       
   993     }
       
   994 
       
   995 TInt RAllocatorHelper::PartialPageOffset() const
       
   996     {
       
   997     if (GetIsKernelHeapAllocator())
       
   998         {
       
   999         PANIC(EUserHeapOffsetRequestedForKernelHeap);
       
  1000         }
       
  1001 
       
  1002     switch (iAllocatorType)
       
  1003         {
       
  1004         case EUrelHybridHeap:
       
  1005         case EUdebHybridHeap:
       
  1006             return HybridV1::KUserPartialPageOffset;
       
  1007         case EUrelHybridHeapV2:
       
  1008         case EUdebHybridHeapV2:
       
  1009             return HybridV2::KUserPartialPageOffset;
       
  1010         default:
       
  1011             PANIC(EUnsupportedAllocatorType);
       
  1012             return KErrNotSupported; // only needed to make the compiler happy
       
  1013         }    
       
  1014     }
       
  1015 
       
  1016 TInt RAllocatorHelper::FullSlabOffset() const
       
  1017     {
       
  1018     if (GetIsKernelHeapAllocator())
       
  1019         {
       
  1020         PANIC(EUserHeapOffsetRequestedForKernelHeap);
       
  1021         }
       
  1022 
       
  1023     switch (iAllocatorType)
       
  1024         {
       
  1025         case EUrelHybridHeap:
       
  1026         case EUdebHybridHeap:
       
  1027             return HybridV1::KUserFullSlabOffset;
       
  1028         case EUrelHybridHeapV2:
       
  1029         case EUdebHybridHeapV2:
       
  1030             return HybridV2::KUserFullSlabOffset;
       
  1031         default:
       
  1032             PANIC(EUnsupportedAllocatorType);
       
  1033             return KErrNotSupported; // only needed to make the compiler happy
       
  1034         }    
       
  1035     }
       
  1036 
       
  1037 TInt RAllocatorHelper::SlabAllocOffset() const
       
  1038     {
       
  1039     if (GetIsKernelHeapAllocator())
       
  1040         {
       
  1041         PANIC(EUserHeapOffsetRequestedForKernelHeap);
       
  1042         }
       
  1043 
       
  1044     switch (iAllocatorType)
       
  1045         {
       
  1046         case EUrelHybridHeap:
       
  1047         case EUdebHybridHeap:
       
  1048             return HybridV1::KUserSlabAllocOffset;
       
  1049         case EUrelHybridHeapV2:
       
  1050         case EUdebHybridHeapV2:
       
  1051             return HybridV2::KUserSlabAllocOffset;
       
  1052         default:
       
  1053             PANIC(EUnsupportedAllocatorType);
       
  1054             return KErrNotSupported; // only needed to make the compiler happy
       
  1055         }    
       
  1056     }
       
  1057 
       
  1058 TInt RAllocatorHelper::UserInitialHeapMetaDataSize() const
       
  1059     {
       
  1060     switch (iAllocatorType)
       
  1061         {
       
  1062         case EUrelHybridHeap:
       
  1063         case EUdebHybridHeap:
       
  1064             return HybridV1::KUserInitialHeapMetaDataSize;
       
  1065         case EUrelHybridHeapV2:
       
  1066         case EUdebHybridHeapV2:
       
  1067             return HybridV2::KUserInitialHeapMetaDataSize;
       
  1068         default:
       
  1069             PANIC(EUnsupportedAllocatorType);
       
  1070             return KErrNotSupported; // only needed to make the compiler happy
       
  1071         }    
       
  1072     }
   918 
  1073 
   919 struct SContext3
  1074 struct SContext3
   920 	{
  1075 	{
   921 	RAllocatorHelper::TWalkFunc3 iOrigWalkFn;
  1076 	RAllocatorHelper::TWalkFunc3 iOrigWalkFn;
   922 	TAny* iOrigContext;
  1077 	TAny* iOrigContext;
   952 		case EUrelOldRHeap:
  1107 		case EUrelOldRHeap:
   953 			err = OldSkoolWalk(aCallbackFn, aContext);
  1108 			err = OldSkoolWalk(aCallbackFn, aContext);
   954 			break;
  1109 			break;
   955 		case EUrelHybridHeap:
  1110 		case EUrelHybridHeap:
   956 		case EUdebHybridHeap:
  1111 		case EUdebHybridHeap:
       
  1112         case EUrelHybridHeapV2:
       
  1113         case EUdebHybridHeapV2:
   957 			err = NewHotnessWalk(aCallbackFn, aContext);
  1114 			err = NewHotnessWalk(aCallbackFn, aContext);
   958 			break;
  1115 			break;
   959 		default:
  1116 		default:
   960 			err = KErrNotSupported;
  1117 			err = KErrNotSupported;
   961 			break;
  1118 			break;
  1187 
  1344 
  1188 TUint RAllocatorHelper::PageMapOperatorBrackets(unsigned ix, TInt& err) const
  1345 TUint RAllocatorHelper::PageMapOperatorBrackets(unsigned ix, TInt& err) const
  1189 	{
  1346 	{
  1190 	//return 1U&(iBase[ix>>3] >> (ix&7));
  1347 	//return 1U&(iBase[ix>>3] >> (ix&7));
  1191 	TUint32 basePtr = 0;
  1348 	TUint32 basePtr = 0;
  1192 	err = ReadWord(iAllocatorAddress + KPageMapOffset, basePtr);
  1349 	err = ReadWord(iAllocatorAddress + PageMapOffset(), basePtr);
  1193 	if (err) return 0;
  1350 	if (err) return 0;
  1194 
  1351 
  1195 	TUint8 res = 0;
  1352 	TUint8 res = 0;
  1196 	err = ReadByte(basePtr + (ix >> 3), res);
  1353 	err = ReadByte(basePtr + (ix >> 3), res);
  1197 	if (err) return 0;
  1354 	if (err) return 0;
  1201 
  1358 
  1202 
  1359 
  1203 TInt RAllocatorHelper::PageMapFind(TUint start, TUint bit, TInt& err)
  1360 TInt RAllocatorHelper::PageMapFind(TUint start, TUint bit, TInt& err)
  1204 	{
  1361 	{
  1205 	TUint32 iNbits = 0;
  1362 	TUint32 iNbits = 0;
  1206 	err = ReadWord(iAllocatorAddress + KPageMapOffset + 4, iNbits);
  1363 	err = ReadWord(iAllocatorAddress + PageMapOffset() + 4, iNbits);
  1207 	if (err) return 0;
  1364 	if (err) return 0;
  1208 
  1365 
  1209 	if (start<iNbits) do
  1366 	if (start<iNbits) do
  1210 		{
  1367 		{
  1211 		//if ((*this)[start]==bit)
  1368 		//if ((*this)[start]==bit)
  1250 	return val;
  1407 	return val;
  1251 	}
  1408 	}
  1252 
  1409 
  1253 enum TSlabType { ESlabFullInfo, ESlabPartialInfo, ESlabEmptyInfo };
  1410 enum TSlabType { ESlabFullInfo, ESlabPartialInfo, ESlabEmptyInfo };
  1254 
  1411 
  1255 #ifndef TEST_HYBRIDHEAP_ASSERTS
       
  1256 #define MAXSLABSIZE		56
       
  1257 #define	SLABSHIFT		10
       
  1258 #define	SLABSIZE		(1 << SLABSHIFT)
       
  1259 const TInt KMaxSlabPayload = SLABSIZE - KSlabPayloadOffset;
       
  1260 #endif
       
  1261 
       
  1262 TInt RAllocatorHelper::NewHotnessWalk(TWalkFunc3 aCallbackFn, TAny* aContext)
  1412 TInt RAllocatorHelper::NewHotnessWalk(TWalkFunc3 aCallbackFn, TAny* aContext)
  1263 	{
  1413 	{
  1264 	// RHybridHeap does paged, slab then DLA, so that's what we do too
  1414 	// RHybridHeap does paged, slab then DLA, so that's what we do too
  1265 	// Remember Kernel RHybridHeaps don't even have the page and slab members
  1415 	// Remember Kernel RHybridHeaps don't even have the page and slab members
  1266 
  1416 
  1267 	TUint32 basePtr;
  1417 	TUint32 basePtr;
  1268 	TInt err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iBase), basePtr);
  1418 	TInt err = ReadWord(iAllocatorAddress + _FOFF(RHackHeap, iBase), basePtr);
  1269 	if (err) return err;
  1419 	if (err) return err;
  1270 	if (basePtr < iAllocatorAddress + KUserHybridHeapSize)
  1420 	if (basePtr < iAllocatorAddress + UserInitialHeapMetaDataSize())
  1271 		{
  1421 		{
  1272 		// Must be a kernel one - don't do page and slab
  1422 		// Must be a kernel one - don't do page and slab
  1273 		}
  1423 		}
  1274 	else
  1424 	else
  1275 		{
  1425 		{
  1276 		// Paged
  1426 		// Paged
  1277 		TUint32 membase = 0;
  1427 		TUint32 membase = 0;
  1278 		err = ReadWord(iAllocatorAddress + KPageMapOffset + 8, membase);
  1428 		err = ReadWord(iAllocatorAddress + PageMapOffset() + 8, membase);
  1279 		if (err) return err;
  1429 		if (err) return err;
  1280 
  1430 
  1281 		TBool shouldContinue = ETrue;
  1431 		TBool shouldContinue = ETrue;
  1282 		for (int ix = 0;(ix = PageMapFind(ix,1,err)) >= 0 && err == KErrNone;)
  1432 		for (int ix = 0;(ix = PageMapFind(ix,1,err)) >= 0 && err == KErrNone;)
  1283 			{
  1433 			{
  1300 			}
  1450 			}
  1301 		if (err) return err;
  1451 		if (err) return err;
  1302 
  1452 
  1303 		// Slab
  1453 		// Slab
  1304 		TUint32 sparePage = 0;
  1454 		TUint32 sparePage = 0;
  1305 		err = ReadWord(iAllocatorAddress + KSparePageOffset, sparePage);
  1455 		err = ReadWord(iAllocatorAddress + SparePageOffset(), sparePage);
  1306 		if (err) return err;
  1456 		if (err) return err;
  1307 		if (sparePage)
  1457 		if (sparePage)
  1308 			{
  1458 			{
  1309 			//Walk(wi, iSparePage, iPageSize, EGoodFreeCell, ESlabSpare); // Introduce Slab spare page to the walk function 
  1459 			//Walk(wi, iSparePage, iPageSize, EGoodFreeCell, ESlabSpare); // Introduce Slab spare page to the walk function 
  1310 			// This counts as 4 spare slabs
  1460 			// This counts as 4 spare slabs
  1314 				if (!shouldContinue) return KErrNone;
  1464 				if (!shouldContinue) return KErrNone;
  1315 				}
  1465 				}
  1316 			}
  1466 			}
  1317 
  1467 
  1318 		//TreeWalk(&iFullSlab, &SlabFullInfo, i, wi);
  1468 		//TreeWalk(&iFullSlab, &SlabFullInfo, i, wi);
  1319 		TInt err = TreeWalk(iAllocatorAddress + KFullSlabOffset, ESlabFullInfo, aCallbackFn, aContext, shouldContinue);
  1469 		TInt err = TreeWalk(iAllocatorAddress + FullSlabOffset(), ESlabFullInfo, aCallbackFn, aContext, shouldContinue);
  1320 		if (err || !shouldContinue) return err;
  1470 		if (err || !shouldContinue) return err;
  1321 		for (int ix = 0; ix < (MAXSLABSIZE>>2); ++ix)
  1471 		for (int ix = 0; ix < (MAXSLABSIZE>>2); ++ix)
  1322 			{
  1472 			{
  1323 			TUint32 partialAddr = iAllocatorAddress + KSlabAllocOffset + ix*KSlabsetSize;
  1473 			TUint32 partialAddr = iAllocatorAddress + SlabAllocOffset() + ix*HybridCom::KSlabsetSize;
  1324 			//TreeWalk(&iSlabAlloc[ix].iPartial, &SlabPartialInfo, i, wi);
  1474 			//TreeWalk(&iSlabAlloc[ix].iPartial, &SlabPartialInfo, i, wi);
  1325 			err = TreeWalk(partialAddr, ESlabPartialInfo, aCallbackFn, aContext, shouldContinue);
  1475 			err = TreeWalk(partialAddr, ESlabPartialInfo, aCallbackFn, aContext, shouldContinue);
  1326 			if (err || !shouldContinue) return err;
  1476 			if (err || !shouldContinue) return err;
  1327 			}
  1477 			}
  1328 		//TreeWalk(&iPartialPage, &SlabEmptyInfo, i, wi);
  1478 		//TreeWalk(&iPartialPage, &SlabEmptyInfo, i, wi);
  1329 		TreeWalk(iAllocatorAddress + KPartialPageOffset, ESlabEmptyInfo, aCallbackFn, aContext, shouldContinue);
  1479 		TreeWalk(iAllocatorAddress + PartialPageOffset(), ESlabEmptyInfo, aCallbackFn, aContext, shouldContinue);
  1330 		}
  1480 		}
  1331 
  1481 
  1332 	// DLA
  1482 	// DLA
  1333 #define CHUNK_OVERHEAD (sizeof(TUint))
  1483 #define CHUNK_OVERHEAD (sizeof(TUint))
  1334 #define CHUNK_ALIGN_MASK (7) 
  1484 #define CHUNK_ALIGN_MASK (7) 
  1341 #define ALIGN_AS_CHUNK(A)   ((A) + ALIGN_OFFSET(CHUNK2MEM(A)))
  1491 #define ALIGN_AS_CHUNK(A)   ((A) + ALIGN_OFFSET(CHUNK2MEM(A)))
  1342 #define CINUSE_BIT 2
  1492 #define CINUSE_BIT 2
  1343 #define INUSE_BITS 3
  1493 #define INUSE_BITS 3
  1344 
  1494 
  1345 	TUint32 topSize = 0;
  1495 	TUint32 topSize = 0;
  1346 	err = ReadWord(iAllocatorAddress + KMallocStateOffset + KMallocStateTopSizeOffset, topSize);
  1496 	err = ReadWord(iAllocatorAddress + MallocStateOffset() + HybridCom::KMallocStateTopSizeOffset, topSize);
  1347 	if (err) return err;
  1497 	if (err) return err;
  1348 
  1498 
  1349 	TUint32 top = 0;
  1499 	TUint32 top = 0;
  1350 	err = ReadWord(iAllocatorAddress + KMallocStateOffset + KMallocStateTopOffset, top);
  1500 	err = ReadWord(iAllocatorAddress + MallocStateOffset() + HybridCom::KMallocStateTopOffset, top);
  1351 	if (err) return err;
  1501 	if (err) return err;
  1352 
  1502 
  1353 	TInt max = ((topSize-1) & ~CHUNK_ALIGN_MASK) - CHUNK_OVERHEAD;
  1503 	TInt max = ((topSize-1) & ~CHUNK_ALIGN_MASK) - CHUNK_OVERHEAD;
  1354 	if ( max < 0 )
  1504 	if ( max < 0 )
  1355 		max = 0;
  1505 		max = 0;
  1356 	
  1506 	
  1357 	TBool shouldContinue = (*aCallbackFn)(*this, aContext, EDlaFreeCell, top, max);
  1507 	TBool shouldContinue = (*aCallbackFn)(*this, aContext, EDlaFreeCell, top, max);
  1358 	if (!shouldContinue) return KErrNone;
  1508 	if (!shouldContinue) return KErrNone;
  1359 	
  1509 	
  1360 	TUint32 mallocStateSegBase = 0;
  1510 	TUint32 mallocStateSegBase = 0;
  1361 	err = ReadWord(iAllocatorAddress + KMallocStateOffset + KMallocStateSegOffset, mallocStateSegBase);
  1511 	err = ReadWord(iAllocatorAddress + MallocStateOffset() + HybridCom::KMallocStateSegOffset, mallocStateSegBase);
  1362 	if (err) return err;
  1512 	if (err) return err;
  1363 
  1513 
  1364 	for (TLinAddr q = ALIGN_AS_CHUNK(mallocStateSegBase); q != top; /*q = NEXT_CHUNK(q)*/)
  1514 	for (TLinAddr q = ALIGN_AS_CHUNK(mallocStateSegBase); q != top; /*q = NEXT_CHUNK(q)*/)
  1365 		{
  1515 		{
  1366 		TUint32 qhead = 0;
  1516 		TUint32 qhead = 0;
  1410 		//while ((c = s->iChild1) != 0)
  1560 		//while ((c = s->iChild1) != 0)
  1411 		//	s = c;		// walk down left side to end
  1561 		//	s = c;		// walk down left side to end
  1412 		TUint32 c;
  1562 		TUint32 c;
  1413 		for(;;)
  1563 		for(;;)
  1414 			{
  1564 			{
  1415 			err = ReadWord(s + KSlabChild1Offset, c);
  1565 			err = ReadWord(s + HybridCom::KSlabChild1Offset, c);
  1416 			if (err) return err;
  1566 			if (err) return err;
  1417 			if (c == 0) break;
  1567 			if (c == 0) break;
  1418 			else s = c;
  1568 			else s = c;
  1419 			}
  1569 			}
  1420 		for (;;)
  1570 		for (;;)
  1434 					{
  1584 					{
  1435 					TUint32 count = usedCount;
  1585 					TUint32 count = usedCount;
  1436 					TUint32 i = 0;
  1586 					TUint32 i = 0;
  1437 					while ( i < count )
  1587 					while ( i < count )
  1438 						{
  1588 						{
  1439 						TUint32 addr = s + KSlabPayloadOffset + i*size; //&aSlab->iPayload[i*size];
  1589 						TUint32 addr = s + HybridCom::KSlabPayloadOffset + i*size; //&aSlab->iPayload[i*size];
  1440 						shouldContinue = (*aCallbackFn)(*this, aContext, ESlabAllocation, addr + debugheadersize, size - debugheadersize);
  1590 						shouldContinue = (*aCallbackFn)(*this, aContext, ESlabAllocation, addr + debugheadersize, size - debugheadersize);
  1441 						if (!shouldContinue) return KErrNone;
  1591 						if (!shouldContinue) return KErrNone;
  1442 						i++;
  1592 						i++;
  1443 						}
  1593 						}
  1444 					break;
  1594 					break;
  1445 					}
  1595 					}
  1446 				case ESlabPartialInfo:
  1596 				case ESlabPartialInfo:
  1447 					{
  1597 					{
  1448 					//TODO __HEAP_CORRUPTED_TEST_STATIC
  1598 					//TODO __HEAP_CORRUPTED_TEST_STATIC
  1449 					TUint32 count = KMaxSlabPayload / size;
  1599 					TUint32 count = HybridCom::KMaxSlabPayload / size;
  1450 					TUint32 freeOffset = (h & 0xff) << 2;
  1600 					TUint32 freeOffset = (h & 0xff) << 2;
  1451 					if (freeOffset == 0)
  1601 					if (freeOffset == 0)
  1452 						{
  1602 						{
  1453 						// TODO Shouldn't happen for a slab on the partial list
  1603 						// TODO Shouldn't happen for a slab on the partial list
  1454 						}
  1604 						}
  1455 					memset(iTempSlabBitmap, 1, KTempBitmapSize); // Everything defaults to in use
  1605 					memset(iTempSlabBitmap, 1, KTempBitmapSize); // Everything defaults to in use
  1456 					TUint wildernessCount = count - usedCount;
  1606 					TUint wildernessCount = count - usedCount;
  1457 					while (freeOffset)
  1607 					while (freeOffset)
  1458 						{
  1608 						{
  1459 						wildernessCount--;
  1609 						wildernessCount--;
  1460 						TInt idx = (freeOffset-KSlabPayloadOffset)/size;
  1610 						TInt idx = (freeOffset - HybridCom::KSlabPayloadOffset) / size;
  1461 						LOG("iTempSlabBitmap freeOffset %d index %d", freeOffset, idx);
  1611 						LOG("iTempSlabBitmap freeOffset %d index %d", freeOffset, idx);
  1462 						iTempSlabBitmap[idx] = 0; // Mark it as free
  1612 						iTempSlabBitmap[idx] = 0; // Mark it as free
  1463 
  1613 
  1464 						TUint32 addr = s + freeOffset;
  1614 						TUint32 addr = s + freeOffset;
  1465 						TUint8 nextCell = 0;
  1615 						TUint8 nextCell = 0;
  1468 						freeOffset = ((TUint32)nextCell) << 2;
  1618 						freeOffset = ((TUint32)nextCell) << 2;
  1469 						}
  1619 						}
  1470 					memset(iTempSlabBitmap + count - wildernessCount, 0, wildernessCount); // Mark the wilderness as free
  1620 					memset(iTempSlabBitmap + count - wildernessCount, 0, wildernessCount); // Mark the wilderness as free
  1471 					for (TInt i = 0; i < count; i++)
  1621 					for (TInt i = 0; i < count; i++)
  1472 						{
  1622 						{
  1473 						TLinAddr addr = s + KSlabPayloadOffset + i*size;
  1623 						TLinAddr addr = s + HybridCom::KSlabPayloadOffset + i*size;
  1474 						if (iTempSlabBitmap[i])
  1624 						if (iTempSlabBitmap[i])
  1475 							{
  1625 							{
  1476 							// In use
  1626 							// In use
  1477 							shouldContinue = (*aCallbackFn)(*this, aContext, ESlabAllocation, addr + debugheadersize, size - debugheadersize);
  1627 							shouldContinue = (*aCallbackFn)(*this, aContext, ESlabAllocation, addr + debugheadersize, size - debugheadersize);
  1478 							}
  1628 							}
  1495 					TUint32 slabHeaderPageMap = (headerForPage & 0x00000f00)>>8; // SlabHeaderPagemap(unsigned h)
  1645 					TUint32 slabHeaderPageMap = (headerForPage & 0x00000f00)>>8; // SlabHeaderPagemap(unsigned h)
  1496 					for (TInt slabIdx = 0; slabIdx < 4; slabIdx++)
  1646 					for (TInt slabIdx = 0; slabIdx < 4; slabIdx++)
  1497 						{
  1647 						{
  1498 						if (slabHeaderPageMap & (1<<slabIdx))
  1648 						if (slabHeaderPageMap & (1<<slabIdx))
  1499 							{
  1649 							{
  1500 							TUint32 addr = pageAddr + SLABSIZE*slabIdx + KSlabPayloadOffset; //&aSlab->iPayload[i*size];
  1650 							TUint32 addr = pageAddr + SLABSIZE*slabIdx + HybridCom::KSlabPayloadOffset; //&aSlab->iPayload[i*size];
  1501 							shouldContinue = (*aCallbackFn)(*this, aContext, ESlabFreeSlab, addr, KMaxSlabPayload);
  1651 							shouldContinue = (*aCallbackFn)(*this, aContext, ESlabFreeSlab, addr, HybridCom::KMaxSlabPayload);
  1502 							if (!shouldContinue) return KErrNone;
  1652 							if (!shouldContinue) return KErrNone;
  1503 							}
  1653 							}
  1504 						}
  1654 						}
  1505 					break;
  1655 					break;
  1506 					}
  1656 					}
  1507 				}
  1657 				}
  1508 
  1658 
  1509 			//c = s->iChild2;
  1659 			//c = s->iChild2;
  1510 			err = ReadWord(s + KSlabChild2Offset, c);
  1660 			err = ReadWord(s + HybridCom::KSlabChild2Offset, c);
  1511 			if (err) return err;
  1661 			if (err) return err;
  1512 
  1662 
  1513 			if (c)
  1663 			if (c)
  1514 				{	// one step down right side, now try and walk down left
  1664 				{	// one step down right side, now try and walk down left
  1515 				s = c;
  1665 				s = c;
  1516 				break;
  1666 				break;
  1517 				}
  1667 				}
  1518 			for (;;)
  1668 			for (;;)
  1519 				{	// loop to walk up right side
  1669 				{	// loop to walk up right side
  1520 				TUint32 pp = 0;
  1670 				TUint32 pp = 0;
  1521 				err = ReadWord(s + KSlabParentOffset, pp);
  1671 				err = ReadWord(s + HybridCom::KSlabParentOffset, pp);
  1522 				if (err) return err;
  1672 				if (err) return err;
  1523 				//slab** pp = s->iParent;
  1673 				//slab** pp = s->iParent;
  1524 				if (pp == aSlabRoot)
  1674 				if (pp == aSlabRoot)
  1525 					return KErrNone;
  1675 					return KErrNone;
  1526 #define SlabFor(x) ROUND_DOWN(x, SLABSIZE)
  1676 #define SlabFor(x) ROUND_DOWN(x, SLABSIZE)
  1527 				s = SlabFor(pp);
  1677 				s = SlabFor(pp);
  1528 				//if (pp == &s->iChild1)
  1678 				//if (pp == &s->iChild1)
  1529 				if (pp == s + KSlabChild1Offset)
  1679 				if (pp == s + HybridCom::KSlabChild1Offset)
  1530 					break;
  1680 					break;
  1531 				}
  1681 				}
  1532 			}
  1682 			}
  1533 		}
  1683 		}
  1534 	}
  1684 	}
  1644 		}
  1794 		}
  1645 	}
  1795 	}
  1646 
  1796 
  1647 HUEXPORT_C TBool LtkUtils::RAllocatorHelper::AllocatorIsUdeb() const
  1797 HUEXPORT_C TBool LtkUtils::RAllocatorHelper::AllocatorIsUdeb() const
  1648 	{
  1798 	{
  1649 	return iAllocatorType == EUdebOldRHeap || iAllocatorType == EUdebHybridHeap;
  1799 	return iAllocatorType == EUdebOldRHeap || iAllocatorType == EUdebHybridHeap || iAllocatorType == EUdebHybridHeapV2;
  1650 	}
  1800 	}
  1651 
  1801 
  1652 
  1802 
  1653 HUEXPORT_C const TDesC& LtkUtils::RAllocatorHelper::Description() const
  1803 HUEXPORT_C const TDesC& LtkUtils::RAllocatorHelper::Description() const
  1654 	{
  1804 	{
  1655 	_LIT(KRHeap, "RHeap");
  1805 	_LIT(KRHeap, "RHeap");
  1656 	_LIT(KRHybridHeap, "RHybridHeap");
  1806 	_LIT(KRHybridHeap, "RHybridHeap");
       
  1807     _LIT(KRHybridHeapRefactored, "RHybridHeap (Refactored)");
  1657 	_LIT(KUnknown, "Unknown");
  1808 	_LIT(KUnknown, "Unknown");
  1658 	switch (iAllocatorType)
  1809 	switch (iAllocatorType)
  1659 		{
  1810 		{
  1660 		case EUrelOldRHeap:
  1811 		case EUrelOldRHeap:
  1661 		case EUdebOldRHeap:
  1812 		case EUdebOldRHeap:
  1662 			return KRHeap;
  1813 			return KRHeap;
  1663 		case EUrelHybridHeap:
  1814 		case EUrelHybridHeap:
  1664 		case EUdebHybridHeap:
  1815 		case EUdebHybridHeap:
  1665 			return KRHybridHeap;
  1816 			return KRHybridHeap;
  1666 		case EAllocator:
  1817         case EUrelHybridHeapV2:
  1667 		case EUnknown:
  1818         case EUdebHybridHeapV2:
       
  1819             return KRHybridHeapRefactored;
       
  1820 		case EAllocatorUnknown:
       
  1821 		case EAllocatorNotSet:
  1668 		default:
  1822 		default:
  1669 			return KUnknown;
  1823 			return KUnknown;
  1670 		}
  1824 		}
  1671 	}
  1825 	}
  1672 
  1826 
  1680 	return iChunk;
  1834 	return iChunk;
  1681 	}
  1835 	}
  1682 
  1836 
  1683 DChunk* LtkUtils::RUserAllocatorHelper::OpenUnderlyingChunk()
  1837 DChunk* LtkUtils::RUserAllocatorHelper::OpenUnderlyingChunk()
  1684 	{
  1838 	{
  1685 	if (iAllocatorType != EUrelOldRHeap && iAllocatorType != EUdebOldRHeap && iAllocatorType != EUrelHybridHeap && iAllocatorType != EUdebHybridHeap) return NULL;
  1839 	if (iAllocatorType != EUrelOldRHeap && iAllocatorType != EUdebOldRHeap && 
  1686 	// Note RKernelSideAllocatorHelper doesn't use or access RAllocatorHelper::iChunk, because we figure out the chunk handle in a different way.
  1840 	    iAllocatorType != EUrelHybridHeap && iAllocatorType != EUdebHybridHeap &&
       
  1841 	    iAllocatorType != EUrelHybridHeapV2 && iAllocatorType != EUdebHybridHeapV2)
       
  1842 	    {
       
  1843 	    return NULL;
       
  1844 	    }
       
  1845 	
       
  1846 	// Note RUserAllocatorHelper doesn't use or access RAllocatorHelper::iChunk, because we figure out the chunk handle in a different way.
  1687 	// It is for this reason that iChunk is private, to remove temptation
  1847 	// It is for this reason that iChunk is private, to remove temptation
  1688 	
  1848 	
  1689 	// Enter and leave in CS and with no locks held. On exit the returned DChunk has been Open()ed.
  1849 	// Enter and leave in CS and with no locks held. On exit the returned DChunk has been Open()ed.
  1690 	TUint32 chunkHandle = 0;
  1850 	TUint32 chunkHandle = 0;
  1691 	TInt err = ReadData(iAllocatorAddress + _FOFF(RHackHeap, iChunkHandle), &chunkHandle, sizeof(TUint32));
  1851 	TInt err = ReadData(iAllocatorAddress + _FOFF(RHackHeap, iChunkHandle), &chunkHandle, sizeof(TUint32));
  1709 		case EUdebOldRHeap:
  1869 		case EUdebOldRHeap:
  1710 			return ETypeRHeap;
  1870 			return ETypeRHeap;
  1711 		case EUrelHybridHeap:
  1871 		case EUrelHybridHeap:
  1712 		case EUdebHybridHeap:
  1872 		case EUdebHybridHeap:
  1713 			return ETypeRHybridHeap;
  1873 			return ETypeRHybridHeap;
  1714 		case EAllocator:
  1874 		case EUrelHybridHeapV2:
  1715 		case EUnknown:
  1875 		case EUdebHybridHeapV2:
       
  1876 		    return ETypeRHybridHeapV2;
       
  1877 		case EAllocatorUnknown:
       
  1878 		case EAllocatorNotSet:
  1716 		default:
  1879 		default:
  1717 			return ETypeUnknown;
  1880 			return ETypeUnknown;
  1718 		}
  1881 		}
  1719 	}
  1882 	}
  1720 
  1883