main/al/newallocator.cpp
changeset 24 99ad1390cd33
parent 23 74c9f037fd5d
child 26 c499df2dbb33
equal deleted inserted replaced
23:74c9f037fd5d 24:99ad1390cd33
     1 /*
       
     2 * Copyright (c) 1994-2001 Nokia Corporation and/or its subsidiary(-ies).
       
     3 * All rights reserved.
       
     4 * This component and the accompanying materials are made available
       
     5 * under the terms of "Eclipse Public License v1.0"
       
     6 * which accompanies this distribution, and is available
       
     7 * at the URL "http://www.eclipse.org/legal/epl-v10.html".
       
     8 *
       
     9 * Initial Contributors:
       
    10 * Nokia Corporation - initial contribution.
       
    11 *
       
    12 * Contributors:
       
    13 *
       
    14 * Description: 
       
    15 *
       
    16 */
       
    17 
       
    18 #include <e32std.h>
       
    19 #include <e32cmn.h>
       
    20 #include <hal.h>
       
    21 #include <e32panic.h>
       
    22 #include <u32std.h>
       
    23 #include <e32btrace.h>
       
    24 #include <e32svr.h>
       
    25 
       
    26 #ifndef __WINS__
       
    27 #pragma push
       
    28 #pragma arm
       
    29 #endif
       
    30 
       
    31 #include "DLA.h"
       
    32 #include "newallocator.h"
       
    33 
       
    34 #define ALLOCATOR_ADP75
       
    35 //#define TRACING_HEAPS
       
    36 //#define DEBUG_DEVLON70
       
    37 //#define ENABLE_BTRACE
       
    38 
       
    39 // if non zero this causes the slabs to be configured only when the chunk size exceeds this level
       
    40 #define DELAYED_SLAB_THRESHOLD (64*1024)		// 64KB seems about right based on trace data
       
    41 #define SLAB_CONFIG (0xabe)
       
    42 
       
    43 _LIT(KDLHeapPanicCategory, "DL Heap");
       
    44 #define	GET_PAGE_SIZE(x)			HAL::Get(HALData::EMemoryPageSize, x)
       
    45 #define	__CHECK_CELL(p)
       
    46 #define __POWER_OF_2(x)				((TUint32)((x)^((x)-1))>=(TUint32)(x))
       
    47 #define HEAP_PANIC(r)               Panic(r)
       
    48 
       
    49 LOCAL_C void Panic(TCdtPanic aPanic)
       
    50 // Panic the process with USER as the category.
       
    51 	{
       
    52 	User::Panic(_L("USER"),aPanic);
       
    53 	}
       
    54 
       
    55 
       
    56 #define gm  (&iGlobalMallocState)
       
    57 
       
    58 RNewAllocator::RNewAllocator(TInt aMaxLength, TInt aAlign, TBool aSingleThread)
       
    59 // constructor for a fixed heap. Just use DL allocator
       
    60 	:iMinLength(aMaxLength), iMaxLength(aMaxLength), iOffset(0), iGrowBy(0), iChunkHandle(0),
       
    61 	iNestingLevel(0), iAllocCount(0), iFailType(ENone), iTestData(NULL), iChunkSize(aMaxLength)
       
    62 	{
       
    63 
       
    64 	// bodge so GKIServ (hudson generic low level layer) starts up ok - it uses an aAlign of 0 which panics, so if see 0 then force to 4
       
    65 	if ((TUint32)aAlign>=sizeof(TAny*) && __POWER_OF_2(iAlign))
       
    66 		{
       
    67 		iAlign = aAlign;
       
    68 		}
       
    69 	else
       
    70 		{
       
    71 		iAlign = 4;
       
    72 		}
       
    73 	iPageSize = 0;
       
    74 	iFlags = aSingleThread ? (ESingleThreaded|EFixedSize) : EFixedSize;
       
    75 
       
    76 	Init(0, 0, 0);
       
    77 	}
       
    78 #ifdef TRACING_HEAPS
       
    79 RNewAllocator::RNewAllocator(TInt aChunkHandle, TInt aOffset, TInt aMinLength, TInt aMaxLength, TInt aGrowBy,
       
    80 			TInt aAlign, TBool aSingleThread)
       
    81 		: iMinLength(aMinLength), iMaxLength(aMaxLength), iOffset(aOffset), iChunkHandle(aChunkHandle), iNestingLevel(0), iAllocCount(0),
       
    82 			iAlign(aAlign),iFailType(ENone), iTestData(NULL), iChunkSize(aMinLength),iHighWaterMark(aMinLength)
       
    83 #else
       
    84 RNewAllocator::RNewAllocator(TInt aChunkHandle, TInt aOffset, TInt aMinLength, TInt aMaxLength, TInt aGrowBy,
       
    85 			TInt aAlign, TBool aSingleThread)
       
    86 		: iMinLength(aMinLength), iMaxLength(aMaxLength), iOffset(aOffset), iChunkHandle(aChunkHandle), iNestingLevel(0), iAllocCount(0),
       
    87 			iAlign(aAlign),iFailType(ENone), iTestData(NULL), iChunkSize(aMinLength)
       
    88 #endif
       
    89 	{
       
    90 	// TODO: Locked the page size to 4 KB - change this to pick up from the OS
       
    91 	GET_PAGE_SIZE(iPageSize);
       
    92 	__ASSERT_ALWAYS(aOffset >=0, User::Panic(KDLHeapPanicCategory, ETHeapNewBadOffset));
       
    93 	iGrowBy = _ALIGN_UP(aGrowBy, iPageSize);
       
    94 	iFlags = aSingleThread ? ESingleThreaded : 0;
       
    95 
       
    96 	// Initialise
       
    97 	// if the heap is created with aMinLength==aMaxLength then it cannot allocate slab or page memory
       
    98 	// so these sub-allocators should be disabled. Otherwise initialise with default values
       
    99 	if (aMinLength == aMaxLength)
       
   100 		Init(0, 0, 0);
       
   101 	else
       
   102 		Init(0xabe, 16, iPageSize*4);	// slabs {48, 40, 32, 24, 20, 16, 12, 8}, page {64KB}, trim {16KB}
       
   103 #ifdef TRACING_HEAPS
       
   104 	RChunk chunk;
       
   105 	chunk.SetHandle(iChunkHandle);
       
   106 	TKName chunk_name;
       
   107 	chunk.FullName(chunk_name);
       
   108 	BTraceContextBig(BTrace::ETest1, 2, 22, chunk_name.Ptr(), chunk_name.Size());
       
   109 
       
   110 	TUint32 traceData[4];
       
   111 	traceData[0] = iChunkHandle;
       
   112 	traceData[1] = iMinLength;
       
   113 	traceData[2] = iMaxLength;
       
   114 	traceData[3] = iAlign;
       
   115 	BTraceContextN(BTrace::ETest1, 1, (TUint32)this, 11, traceData, sizeof(traceData));
       
   116 #endif
       
   117 
       
   118 	}
       
   119 
       
   120 TAny* RNewAllocator::operator new(TUint aSize, TAny* aBase) __NO_THROW
       
   121 	{
       
   122 	__ASSERT_ALWAYS(aSize>=sizeof(RNewAllocator), HEAP_PANIC(ETHeapNewBadSize));
       
   123 	RNewAllocator* h = (RNewAllocator*)aBase;
       
   124 	h->iAlign = 0x80000000;	// garbage value
       
   125 	h->iBase = ((TUint8*)aBase) + aSize;
       
   126 	return aBase;
       
   127 	}
       
   128 
       
   129 void RNewAllocator::Init(TInt aBitmapSlab, TInt aPagePower, size_t aTrimThreshold)
       
   130 	{
       
   131 	__ASSERT_ALWAYS((TUint32)iAlign>=sizeof(TAny*) && __POWER_OF_2(iAlign), HEAP_PANIC(ETHeapNewBadAlignment));
       
   132 
       
   133 	/*Moved code which does iunitilization */
       
   134 	iTop = (TUint8*)this + iMinLength;
       
   135 	iAllocCount = 0;
       
   136 	memset(&mparams,0,sizeof(mparams));
       
   137 
       
   138 	Init_Dlmalloc(iTop - iBase, 0, aTrimThreshold);
       
   139 
       
   140 	slab_init();
       
   141 	slab_config_bits = aBitmapSlab;
       
   142 #ifdef DELAYED_SLAB_THRESHOLD
       
   143 	if (iChunkSize < DELAYED_SLAB_THRESHOLD)
       
   144 		{
       
   145 		slab_init_threshold = DELAYED_SLAB_THRESHOLD;
       
   146 		}
       
   147 	else
       
   148 #endif // DELAYED_SLAB_THRESHOLD
       
   149 		{
       
   150 		slab_init_threshold = KMaxTUint;
       
   151 		slab_config(aBitmapSlab);
       
   152 		}
       
   153 
       
   154 	/*10-1K,11-2K,12-4k,13-8K,14-16K,15-32K,16-64K*/
       
   155 	paged_init(aPagePower);
       
   156 
       
   157 #ifdef ENABLE_BTRACE
       
   158 		TUint32 traceData[3];
       
   159 		traceData[0] = aBitmapSlab;
       
   160 		traceData[1] = aPagePower;
       
   161 		traceData[2] = aTrimThreshold;
       
   162 		BTraceContextN(BTrace::ETest1, BTrace::EHeapAlloc, (TUint32)this, 0, traceData, sizeof(traceData));
       
   163 #endif
       
   164 
       
   165 	}
       
   166 
       
   167 RNewAllocator::SCell* RNewAllocator::GetAddress(const TAny* aCell) const
       
   168 //
       
   169 // As much as possible, check a cell address and backspace it
       
   170 // to point at the cell header.
       
   171 //
       
   172 	{
       
   173 
       
   174 	TLinAddr m = TLinAddr(iAlign - 1);
       
   175 	__ASSERT_ALWAYS(!(TLinAddr(aCell)&m), HEAP_PANIC(ETHeapBadCellAddress));
       
   176 
       
   177 	SCell* pC = (SCell*)(((TUint8*)aCell)-EAllocCellSize);
       
   178 	__CHECK_CELL(pC);
       
   179 
       
   180 	return pC;
       
   181 	}
       
   182 
       
   183 TInt RNewAllocator::AllocLen(const TAny* aCell) const
       
   184 {
       
   185 	if (ptrdiff(aCell, this) >= 0)
       
   186 	{
       
   187 		mchunkptr m = mem2chunk(aCell);
       
   188 		return chunksize(m) - overhead_for(m);
       
   189 	}
       
   190 	if (lowbits(aCell, pagesize) > cellalign)
       
   191 		return header_size(slab::slabfor(aCell)->header);
       
   192 	if (lowbits(aCell, pagesize) == cellalign)
       
   193 		return *(unsigned*)(offset(aCell,-int(cellalign)))-cellalign;
       
   194 	return paged_descriptor(aCell)->size;
       
   195 }
       
   196 
       
   197 TAny* RNewAllocator::Alloc(TInt aSize)
       
   198 {
       
   199 	__ASSERT_ALWAYS((TUint)aSize<(KMaxTInt/2),HEAP_PANIC(ETHeapBadAllocatedCellSize));
       
   200 
       
   201 	TAny* addr;
       
   202 
       
   203 #ifdef ENABLE_BTRACE
       
   204 	TInt aCnt=0;
       
   205 #endif
       
   206 	Lock();
       
   207 	if (aSize < slab_threshold)
       
   208 	{
       
   209 		TInt ix = sizemap[(aSize+3)>>2];
       
   210 		ASSERT(ix != 0xff);
       
   211 		addr = slab_allocate(slaballoc[ix]);
       
   212 	}else if((aSize >> page_threshold)==0)
       
   213 		{
       
   214 #ifdef ENABLE_BTRACE
       
   215 		aCnt=1;
       
   216 #endif
       
   217 		addr = dlmalloc(aSize);
       
   218 		}
       
   219 	else
       
   220 		{
       
   221 #ifdef ENABLE_BTRACE
       
   222 		aCnt=2;
       
   223 #endif
       
   224 		addr = paged_allocate(aSize);
       
   225 		}
       
   226 
       
   227 	iCellCount++;
       
   228 	iTotalAllocSize += aSize;
       
   229 	Unlock();
       
   230 
       
   231 #ifdef ENABLE_BTRACE
       
   232 	if (iFlags & ETraceAllocs)
       
   233 		{
       
   234 		TUint32 traceData[3];
       
   235 		traceData[0] = AllocLen(addr);
       
   236 		traceData[1] = aSize;
       
   237 		traceData[2] = aCnt;
       
   238 		BTraceContextN(BTrace::EHeap, BTrace::EHeapAlloc, (TUint32)this, (TUint32)addr, traceData, sizeof(traceData));
       
   239 		}
       
   240 #endif
       
   241 
       
   242 #ifdef DEBUG_DEVLON70
       
   243 	if(!addr)
       
   244 		{
       
   245 		TUint32 traceD[5];
       
   246 		traceD[0] = 1;
       
   247 		traceD[1] = aSize;
       
   248 		traceD[2] = iMaxLength;
       
   249 		traceD[3] = iChunkSize;
       
   250 		traceD[4] = (TUint32)addr;
       
   251 		BTraceContextN(BTrace::ETest2, 2, (TUint32)this, 2, traceD, sizeof(traceD));
       
   252 		}
       
   253 #endif
       
   254 
       
   255 	return addr;
       
   256 }
       
   257 
       
   258 TInt RNewAllocator::Compress()
       
   259 	{
       
   260 	if (iFlags & EFixedSize)
       
   261 		return 0;
       
   262 
       
   263 	Lock();
       
   264 	dlmalloc_trim(0);
       
   265 	if (spare_page)
       
   266 		{
       
   267 		unmap(spare_page,pagesize);
       
   268 		spare_page = 0;
       
   269 		}
       
   270 	Unlock();
       
   271 	return 0;
       
   272 	}
       
   273 
       
   274 void RNewAllocator::Free(TAny* aPtr)
       
   275 {
       
   276 
       
   277 #ifdef ENABLE_BTRACE
       
   278 	TInt aCnt=0;
       
   279 #endif
       
   280 #ifdef ENABLE_DEBUG_TRACE
       
   281 	RThread me;
       
   282 	TBuf<100> thName;
       
   283 	me.FullName(thName);
       
   284 #endif
       
   285     //if (!aPtr) return; //return in case of NULL pointer
       
   286 
       
   287 	Lock();
       
   288 
       
   289 	if (!aPtr)
       
   290 		;
       
   291 	else if (ptrdiff(aPtr, this) >= 0)
       
   292 		{
       
   293 #ifdef ENABLE_BTRACE
       
   294 		aCnt = 1;
       
   295 #endif
       
   296 		dlfree( aPtr);
       
   297 		}
       
   298 	else if (lowbits(aPtr, pagesize) <= cellalign)
       
   299 		{
       
   300 #ifdef ENABLE_BTRACE
       
   301 		aCnt = 2;
       
   302 #endif
       
   303 		paged_free(aPtr);
       
   304 		}
       
   305 	else
       
   306 		{
       
   307 #ifdef ENABLE_BTRACE
       
   308 		aCnt = 0;
       
   309 #endif
       
   310 		slab_free(aPtr);
       
   311 		}
       
   312 	iCellCount--;
       
   313 	Unlock();
       
   314 
       
   315 #ifdef ENABLE_BTRACE
       
   316 	if (iFlags & ETraceAllocs)
       
   317 		{
       
   318 		TUint32 traceData;
       
   319 		traceData = aCnt;
       
   320 		BTraceContextN(BTrace::EHeap, BTrace::EHeapFree, (TUint32)this, (TUint32)aPtr, &traceData, sizeof(traceData));
       
   321 		}
       
   322 #endif
       
   323 }
       
   324 
       
   325 
       
   326 void RNewAllocator::Reset()
       
   327 	{
       
   328 	// TODO free everything
       
   329 	}
       
   330 
       
   331 #ifdef ENABLE_BTRACE
       
   332 TAny* RNewAllocator::DLReAllocImpl(TAny* aPtr, TInt aSize)
       
   333 	{
       
   334 	if(ptrdiff(aPtr,this)>=0)
       
   335 	{
       
   336 		// original cell is in DL zone
       
   337 		if(aSize >= slab_threshold && (aSize>>page_threshold)==0)
       
   338 			{
       
   339 			// and so is the new one
       
   340 			Lock();
       
   341 			TAny* addr = dlrealloc(aPtr,aSize);
       
   342 			Unlock();
       
   343 #ifdef DEBUG_DEVLON70
       
   344 			if(!addr)
       
   345 				{
       
   346 				TUint32 traceD[5];
       
   347 				traceD[0] = 15;
       
   348 				traceD[1] = aSize;
       
   349 				traceD[2] = iMaxLength;
       
   350 				traceD[3] = iChunkSize;
       
   351 				traceD[4] = (TUint32)addr;
       
   352 				BTraceContextN(BTrace::ETest2, 33, (TUint32)this, 10, traceD, sizeof(traceD));
       
   353 				}
       
   354 #endif
       
   355 			return addr;
       
   356 			}
       
   357 	}
       
   358 	else if(lowbits(aPtr,pagesize)<=cellalign)
       
   359 	{
       
   360 		// original cell is either NULL or in paged zone
       
   361 		if (!aPtr)
       
   362 			return Alloc(aSize);
       
   363 		if(aSize >> page_threshold)
       
   364 			{
       
   365 			// and so is the new one
       
   366 			Lock();
       
   367 			TAny* addr = paged_reallocate(aPtr,aSize);
       
   368 			Unlock();
       
   369 #ifdef DEBUG_DEVLON70
       
   370 			if(!addr)
       
   371 				{
       
   372 				TUint32 traceD[5];
       
   373 				traceD[0] = 15;
       
   374 				traceD[1] = aSize;
       
   375 				traceD[2] = iMaxLength;
       
   376 				traceD[3] = iChunkSize;
       
   377 				traceD[4] = (TUint32)addr;
       
   378 				BTraceContextN(BTrace::ETest2, 33, (TUint32)this, 11, traceD, sizeof(traceD));
       
   379 				}
       
   380 #endif
       
   381 			return addr;
       
   382 			}
       
   383 	}
       
   384 	else
       
   385 	{
       
   386 		// original cell is in slab znoe
       
   387 		if(aSize <= header_size(slab::slabfor(aPtr)->header))
       
   388 			return aPtr;
       
   389 	}
       
   390 	TAny* newp = Alloc(aSize);
       
   391 	if(newp)
       
   392 	{
       
   393 		TInt oldsize = AllocLen(aPtr);
       
   394 		memcpy(newp,aPtr,oldsize<aSize?oldsize:aSize);
       
   395 		Free(aPtr);
       
   396 	}
       
   397 	return newp;
       
   398 
       
   399 	}
       
   400 #endif
       
   401 TAny* RNewAllocator::ReAlloc(TAny* aPtr, TInt aSize, TInt /*aMode = 0*/)
       
   402 	{
       
   403 #ifdef  ENABLE_BTRACE
       
   404 	TAny* retval = DLReAllocImpl(aPtr,aSize);
       
   405 
       
   406 #ifdef ENABLE_BTRACE
       
   407 	if (retval && (iFlags & ETraceAllocs))
       
   408 		{
       
   409 		TUint32 traceData[3];
       
   410 		traceData[0] = AllocLen(retval);
       
   411 		traceData[1] = aSize;
       
   412 		traceData[2] = (TUint32)aPtr;
       
   413 		BTraceContextN(BTrace::EHeap, BTrace::EHeapReAlloc,(TUint32)this, (TUint32)retval,traceData, sizeof(traceData));
       
   414 		}
       
   415 #endif
       
   416 	return retval;
       
   417 #else
       
   418 	if(ptrdiff(aPtr,this)>=0)
       
   419 	{
       
   420 		// original cell is in DL zone
       
   421 		if(aSize >= slab_threshold && (aSize>>page_threshold)==0)
       
   422 			{
       
   423 			// and so is the new one
       
   424 			Lock();
       
   425 			TAny* addr = dlrealloc(aPtr,aSize);
       
   426 			Unlock();
       
   427 			return addr;
       
   428 			}
       
   429 	}
       
   430 	else if(lowbits(aPtr,pagesize)<=cellalign)
       
   431 	{
       
   432 		// original cell is either NULL or in paged zone
       
   433 		if (!aPtr)
       
   434 			return Alloc(aSize);
       
   435 		if(aSize >> page_threshold)
       
   436 			{
       
   437 			// and so is the new one
       
   438 			Lock();
       
   439 			TAny* addr = paged_reallocate(aPtr,aSize);
       
   440 			Unlock();
       
   441 			return addr;
       
   442 			}
       
   443 	}
       
   444 	else
       
   445 	{
       
   446 		// original cell is in slab znoe
       
   447 		if(aSize <= header_size(slab::slabfor(aPtr)->header))
       
   448 			return aPtr;
       
   449 	}
       
   450 	TAny* newp = Alloc(aSize);
       
   451 	if(newp)
       
   452 	{
       
   453 		TInt oldsize = AllocLen(aPtr);
       
   454 		memcpy(newp,aPtr,oldsize<aSize?oldsize:aSize);
       
   455 		Free(aPtr);
       
   456 	}
       
   457 	return newp;
       
   458 #endif
       
   459 	}
       
   460 
       
   461 TInt RNewAllocator::Available(TInt& aBiggestBlock) const
       
   462 {
       
   463 	aBiggestBlock = 0;
       
   464 	return 1000;
       
   465 	/*Need to see how to implement this*/
       
   466 	// TODO: return iHeap.Available(aBiggestBlock);
       
   467 }
       
   468 TInt RNewAllocator::AllocSize(TInt& aTotalAllocSize) const
       
   469 {
       
   470 	aTotalAllocSize = iTotalAllocSize;
       
   471 //	aTotalAllocSize = iChunkSize;
       
   472 	return iCellCount;
       
   473 }
       
   474 
       
   475 TInt RNewAllocator::DebugFunction(TInt /*aFunc*/, TAny* /*a1*/, TAny* /*a2*/)
       
   476 	{
       
   477 	return 0;
       
   478 	}
       
   479 TInt RNewAllocator::Extension_(TUint /* aExtensionId */, TAny*& /* a0 */, TAny* /* a1 */)
       
   480 	{
       
   481 	return KErrNotSupported;
       
   482 	}
       
   483 
       
   484 long    sysconf		(int size )
       
   485 	{
       
   486 	if (GET_PAGE_SIZE(size)!=KErrNone)
       
   487 		size = 0x1000;
       
   488 	return size;
       
   489 	}
       
   490 
       
   491 
       
   492 //
       
   493 // imported from dla.cpp
       
   494 //
       
   495 
       
   496 //#include <unistd.h>
       
   497 //#define DEBUG_REALLOC
       
   498 #ifdef DEBUG_REALLOC
       
   499 #include <e32debug.h>
       
   500 #endif
       
   501 inline int RNewAllocator::init_mparams(size_t aTrimThreshold /*= DEFAULT_TRIM_THRESHOLD*/)
       
   502 {
       
   503 	if (mparams.page_size == 0)
       
   504 	{
       
   505 		size_t s;
       
   506 		mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
       
   507 		mparams.trim_threshold = aTrimThreshold;
       
   508 		#if MORECORE_CONTIGUOUS
       
   509 			mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT;
       
   510 		#else  /* MORECORE_CONTIGUOUS */
       
   511 			mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT;
       
   512 		#endif /* MORECORE_CONTIGUOUS */
       
   513 
       
   514 			s = (size_t)0x58585858U;
       
   515 		ACQUIRE_MAGIC_INIT_LOCK(&mparams);
       
   516 		if (mparams.magic == 0) {
       
   517 		  mparams.magic = s;
       
   518 		  /* Set up lock for main malloc area */
       
   519 		  INITIAL_LOCK(&gm->mutex);
       
   520 		  gm->mflags = mparams.default_mflags;
       
   521 		}
       
   522 		RELEASE_MAGIC_INIT_LOCK(&mparams);
       
   523 
       
   524 		// DAN replaced
       
   525 		// mparams.page_size = malloc_getpagesize;
       
   526 		int temp = 0;
       
   527 		GET_PAGE_SIZE(temp);
       
   528 		mparams.page_size = temp;
       
   529 
       
   530 		mparams.granularity = ((DEFAULT_GRANULARITY != 0)?
       
   531 							   DEFAULT_GRANULARITY : mparams.page_size);
       
   532 
       
   533 		/* Sanity-check configuration:
       
   534 		   size_t must be unsigned and as wide as pointer type.
       
   535 		   ints must be at least 4 bytes.
       
   536 		   alignment must be at least 8.
       
   537 		   Alignment, min chunk size, and page size must all be powers of 2.
       
   538 		*/
       
   539 
       
   540 		if ((sizeof(size_t) != sizeof(TUint8*)) ||
       
   541 			(MAX_SIZE_T < MIN_CHUNK_SIZE)  ||
       
   542 			(sizeof(int) < 4)  ||
       
   543 			(MALLOC_ALIGNMENT < (size_t)8U) ||
       
   544 			((MALLOC_ALIGNMENT    & (MALLOC_ALIGNMENT-SIZE_T_ONE))    != 0) ||
       
   545 			((MCHUNK_SIZE         & (MCHUNK_SIZE-SIZE_T_ONE))         != 0) ||
       
   546 			((mparams.granularity & (mparams.granularity-SIZE_T_ONE)) != 0) ||
       
   547 			((mparams.page_size   & (mparams.page_size-SIZE_T_ONE))   != 0))
       
   548 		  ABORT;
       
   549 	}
       
   550 	return 0;
       
   551 }
       
   552 
       
   553 inline void RNewAllocator::init_bins(mstate m) {
       
   554   /* Establish circular links for smallbins */
       
   555   bindex_t i;
       
   556   for (i = 0; i < NSMALLBINS; ++i) {
       
   557     sbinptr bin = smallbin_at(m,i);
       
   558     bin->fd = bin->bk = bin;
       
   559   }
       
   560 }
       
   561 /* ---------------------------- malloc support --------------------------- */
       
   562 
       
   563 /* allocate a large request from the best fitting chunk in a treebin */
       
   564 void* RNewAllocator::tmalloc_large(mstate m, size_t nb) {
       
   565   tchunkptr v = 0;
       
   566   size_t rsize = -nb; /* Unsigned negation */
       
   567   tchunkptr t;
       
   568   bindex_t idx;
       
   569   compute_tree_index(nb, idx);
       
   570 
       
   571   if ((t = *treebin_at(m, idx)) != 0) {
       
   572     /* Traverse tree for this bin looking for node with size == nb */
       
   573     size_t sizebits =
       
   574     nb <<
       
   575     leftshift_for_tree_index(idx);
       
   576     tchunkptr rst = 0;  /* The deepest untaken right subtree */
       
   577     for (;;) {
       
   578       tchunkptr rt;
       
   579       size_t trem = chunksize(t) - nb;
       
   580       if (trem < rsize) {
       
   581         v = t;
       
   582         if ((rsize = trem) == 0)
       
   583           break;
       
   584       }
       
   585       rt = t->child[1];
       
   586       t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
       
   587       if (rt != 0 && rt != t)
       
   588         rst = rt;
       
   589       if (t == 0) {
       
   590         t = rst; /* set t to least subtree holding sizes > nb */
       
   591         break;
       
   592       }
       
   593       sizebits <<= 1;
       
   594     }
       
   595   }
       
   596   if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
       
   597     binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
       
   598     if (leftbits != 0) {
       
   599       bindex_t i;
       
   600       binmap_t leastbit = least_bit(leftbits);
       
   601       compute_bit2idx(leastbit, i);
       
   602       t = *treebin_at(m, i);
       
   603     }
       
   604   }
       
   605   while (t != 0) { /* find smallest of tree or subtree */
       
   606     size_t trem = chunksize(t) - nb;
       
   607     if (trem < rsize) {
       
   608       rsize = trem;
       
   609       v = t;
       
   610     }
       
   611     t = leftmost_child(t);
       
   612   }
       
   613   /*  If dv is a better fit, return 0 so malloc will use it */
       
   614   if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
       
   615     if (RTCHECK(ok_address(m, v))) { /* split */
       
   616       mchunkptr r = chunk_plus_offset(v, nb);
       
   617       assert(chunksize(v) == rsize + nb);
       
   618       if (RTCHECK(ok_next(v, r))) {
       
   619         unlink_large_chunk(m, v);
       
   620         if (rsize < MIN_CHUNK_SIZE)
       
   621           set_inuse_and_pinuse(m, v, (rsize + nb));
       
   622         else {
       
   623           set_size_and_pinuse_of_inuse_chunk(m, v, nb);
       
   624           set_size_and_pinuse_of_free_chunk(r, rsize);
       
   625           insert_chunk(m, r, rsize);
       
   626         }
       
   627         return chunk2mem(v);
       
   628       }
       
   629     }
       
   630     CORRUPTION_ERROR_ACTION(m);
       
   631   }
       
   632   return 0;
       
   633 }
       
   634 
       
   635 /* allocate a small request from the best fitting chunk in a treebin */
       
   636 void* RNewAllocator::tmalloc_small(mstate m, size_t nb) {
       
   637   tchunkptr t, v;
       
   638   size_t rsize;
       
   639   bindex_t i;
       
   640   binmap_t leastbit = least_bit(m->treemap);
       
   641   compute_bit2idx(leastbit, i);
       
   642 
       
   643   v = t = *treebin_at(m, i);
       
   644   rsize = chunksize(t) - nb;
       
   645 
       
   646   while ((t = leftmost_child(t)) != 0) {
       
   647     size_t trem = chunksize(t) - nb;
       
   648     if (trem < rsize) {
       
   649       rsize = trem;
       
   650       v = t;
       
   651     }
       
   652   }
       
   653 
       
   654   if (RTCHECK(ok_address(m, v))) {
       
   655     mchunkptr r = chunk_plus_offset(v, nb);
       
   656     assert(chunksize(v) == rsize + nb);
       
   657     if (RTCHECK(ok_next(v, r))) {
       
   658       unlink_large_chunk(m, v);
       
   659       if (rsize < MIN_CHUNK_SIZE)
       
   660         set_inuse_and_pinuse(m, v, (rsize + nb));
       
   661       else {
       
   662         set_size_and_pinuse_of_inuse_chunk(m, v, nb);
       
   663         set_size_and_pinuse_of_free_chunk(r, rsize);
       
   664         replace_dv(m, r, rsize);
       
   665       }
       
   666       return chunk2mem(v);
       
   667     }
       
   668   }
       
   669   CORRUPTION_ERROR_ACTION(m);
       
   670   return 0;
       
   671 }
       
   672 
       
   673 inline void RNewAllocator::init_top(mstate m, mchunkptr p, size_t psize)
       
   674 {
       
   675 	/* Ensure alignment */
       
   676 	size_t offset = align_offset(chunk2mem(p));
       
   677 	p = (mchunkptr)((TUint8*)p + offset);
       
   678 	psize -= offset;
       
   679 	m->top = p;
       
   680 	m->topsize = psize;
       
   681 	p->head = psize | PINUSE_BIT;
       
   682 	/* set size of fake trailing chunk holding overhead space only once */
       
   683 	mchunkptr chunkPlusOff = chunk_plus_offset(p, psize);
       
   684 	chunkPlusOff->head = TOP_FOOT_SIZE;
       
   685 	m->trim_check = mparams.trim_threshold; /* reset on each update */
       
   686 }
       
   687 
       
   688 void* RNewAllocator::internal_realloc(mstate m, void* oldmem, size_t bytes)
       
   689 {
       
   690   if (bytes >= MAX_REQUEST) {
       
   691     MALLOC_FAILURE_ACTION;
       
   692     return 0;
       
   693   }
       
   694   if (!PREACTION(m)) {
       
   695     mchunkptr oldp = mem2chunk(oldmem);
       
   696     size_t oldsize = chunksize(oldp);
       
   697     mchunkptr next = chunk_plus_offset(oldp, oldsize);
       
   698     mchunkptr newp = 0;
       
   699     void* extra = 0;
       
   700 
       
   701     /* Try to either shrink or extend into top. Else malloc-copy-free */
       
   702 
       
   703     if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) &&
       
   704                 ok_next(oldp, next) && ok_pinuse(next))) {
       
   705       size_t nb = request2size(bytes);
       
   706       if (is_mmapped(oldp))
       
   707         newp = mmap_resize(m, oldp, nb);
       
   708       else
       
   709 	  if (oldsize >= nb) { /* already big enough */
       
   710         size_t rsize = oldsize - nb;
       
   711         newp = oldp;
       
   712         if (rsize >= MIN_CHUNK_SIZE) {
       
   713           mchunkptr remainder = chunk_plus_offset(newp, nb);
       
   714           set_inuse(m, newp, nb);
       
   715           set_inuse(m, remainder, rsize);
       
   716           extra = chunk2mem(remainder);
       
   717         }
       
   718       }
       
   719 		/*AMOD: Modified to optimized*/
       
   720 		else if (next == m->top && oldsize + m->topsize > nb)
       
   721 		{
       
   722 			/* Expand into top */
       
   723 			if(oldsize + m->topsize > nb)
       
   724 			{
       
   725 		        size_t newsize = oldsize + m->topsize;
       
   726 		        size_t newtopsize = newsize - nb;
       
   727 		        mchunkptr newtop = chunk_plus_offset(oldp, nb);
       
   728 		        set_inuse(m, oldp, nb);
       
   729 		        newtop->head = newtopsize |PINUSE_BIT;
       
   730 		        m->top = newtop;
       
   731 		        m->topsize = newtopsize;
       
   732 		        newp = oldp;
       
   733 			}
       
   734       }
       
   735     }
       
   736     else {
       
   737       USAGE_ERROR_ACTION(m, oldmem);
       
   738       POSTACTION(m);
       
   739       return 0;
       
   740     }
       
   741 
       
   742     POSTACTION(m);
       
   743 
       
   744     if (newp != 0) {
       
   745       if (extra != 0) {
       
   746         internal_free(m, extra);
       
   747       }
       
   748       check_inuse_chunk(m, newp);
       
   749       return chunk2mem(newp);
       
   750     }
       
   751     else {
       
   752       void* newmem = internal_malloc(m, bytes);
       
   753       if (newmem != 0) {
       
   754         size_t oc = oldsize - overhead_for(oldp);
       
   755         memcpy(newmem, oldmem, (oc < bytes)? oc : bytes);
       
   756         internal_free(m, oldmem);
       
   757       }
       
   758       return newmem;
       
   759     }
       
   760   }
       
   761   return 0;
       
   762 }
       
   763 /* ----------------------------- statistics ------------------------------ */
       
   764 mallinfo RNewAllocator::internal_mallinfo(mstate m) {
       
   765   struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
       
   766   TInt chunkCnt = 0;
       
   767   if (!PREACTION(m)) {
       
   768     check_malloc_state(m);
       
   769     if (is_initialized(m)) {
       
   770       size_t nfree = SIZE_T_ONE; /* top always free */
       
   771       size_t mfree = m->topsize + TOP_FOOT_SIZE;
       
   772       size_t sum = mfree;
       
   773       msegmentptr s = &m->seg;
       
   774       TInt tmp = (TUint8*)m->top - (TUint8*)s->base;
       
   775       while (s != 0) {
       
   776         mchunkptr q = align_as_chunk(s->base);
       
   777         chunkCnt++;
       
   778         while (segment_holds(s, q) &&
       
   779                q != m->top && q->head != FENCEPOST_HEAD) {
       
   780           size_t sz = chunksize(q);
       
   781           sum += sz;
       
   782           if (!cinuse(q)) {
       
   783             mfree += sz;
       
   784             ++nfree;
       
   785           }
       
   786           q = next_chunk(q);
       
   787         }
       
   788         s = s->next;
       
   789       }
       
   790       nm.arena    = sum;
       
   791       nm.ordblks  = nfree;
       
   792       nm.hblkhd   = m->footprint - sum;
       
   793       nm.usmblks  = m->max_footprint;
       
   794       nm.uordblks = m->footprint - mfree;
       
   795       nm.fordblks = mfree;
       
   796       nm.keepcost = m->topsize;
       
   797       nm.cellCount= chunkCnt;/*number of chunks allocated*/
       
   798     }
       
   799     POSTACTION(m);
       
   800   }
       
   801   return nm;
       
   802 }
       
   803 
       
   804 void  RNewAllocator::internal_malloc_stats(mstate m) {
       
   805 if (!PREACTION(m)) {
       
   806   size_t maxfp = 0;
       
   807   size_t fp = 0;
       
   808   size_t used = 0;
       
   809   check_malloc_state(m);
       
   810   if (is_initialized(m)) {
       
   811     msegmentptr s = &m->seg;
       
   812     maxfp = m->max_footprint;
       
   813     fp = m->footprint;
       
   814     used = fp - (m->topsize + TOP_FOOT_SIZE);
       
   815 
       
   816     while (s != 0) {
       
   817       mchunkptr q = align_as_chunk(s->base);
       
   818       while (segment_holds(s, q) &&
       
   819              q != m->top && q->head != FENCEPOST_HEAD) {
       
   820         if (!cinuse(q))
       
   821           used -= chunksize(q);
       
   822         q = next_chunk(q);
       
   823       }
       
   824       s = s->next;
       
   825     }
       
   826   }
       
   827   POSTACTION(m);
       
   828 }
       
   829 }
       
   830 /* support for mallopt */
       
   831 int RNewAllocator::change_mparam(int param_number, int value) {
       
   832   size_t val = (size_t)value;
       
   833   init_mparams(DEFAULT_TRIM_THRESHOLD);
       
   834   switch(param_number) {
       
   835   case M_TRIM_THRESHOLD:
       
   836     mparams.trim_threshold = val;
       
   837     return 1;
       
   838   case M_GRANULARITY:
       
   839     if (val >= mparams.page_size && ((val & (val-1)) == 0)) {
       
   840       mparams.granularity = val;
       
   841       return 1;
       
   842     }
       
   843     else
       
   844       return 0;
       
   845   case M_MMAP_THRESHOLD:
       
   846     mparams.mmap_threshold = val;
       
   847     return 1;
       
   848   default:
       
   849     return 0;
       
   850   }
       
   851 }
       
   852 /* Get memory from system using MORECORE or MMAP */
       
   853 void* RNewAllocator::sys_alloc(mstate m, size_t nb)
       
   854 {
       
   855 	TUint8* tbase = CMFAIL;
       
   856 	size_t tsize = 0;
       
   857 	flag_t mmap_flag = 0;
       
   858 	//init_mparams();/*No need to do init_params here*/
       
   859 	/* Directly map large chunks */
       
   860 	if (use_mmap(m) && nb >= mparams.mmap_threshold)
       
   861 	{
       
   862 		void* mem = mmap_alloc(m, nb);
       
   863 		if (mem != 0)
       
   864 			return mem;
       
   865 	}
       
   866   /*
       
   867     Try getting memory in any of three ways (in most-preferred to
       
   868     least-preferred order):
       
   869     1. A call to MORECORE that can normally contiguously extend memory.
       
   870        (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or
       
   871        or main space is mmapped or a previous contiguous call failed)
       
   872     2. A call to MMAP new space (disabled if not HAVE_MMAP).
       
   873        Note that under the default settings, if MORECORE is unable to
       
   874        fulfill a request, and HAVE_MMAP is true, then mmap is
       
   875        used as a noncontiguous system allocator. This is a useful backup
       
   876        strategy for systems with holes in address spaces -- in this case
       
   877        sbrk cannot contiguously expand the heap, but mmap may be able to
       
   878        find space.
       
   879     3. A call to MORECORE that cannot usually contiguously extend memory.
       
   880        (disabled if not HAVE_MORECORE)
       
   881   */
       
   882   /*Trying to allocate the memory*/
       
   883 	if(MORECORE_CONTIGUOUS && !use_noncontiguous(m))
       
   884 	{
       
   885 	TUint8* br = CMFAIL;
       
   886     msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (TUint8*)m->top);
       
   887     size_t asize = 0;
       
   888     ACQUIRE_MORECORE_LOCK(m);
       
   889     if (ss == 0)
       
   890 	{  /* First time through or recovery */
       
   891 		TUint8* base = (TUint8*)CALL_MORECORE(0);
       
   892 		if (base != CMFAIL)
       
   893 		{
       
   894 			asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE);
       
   895 			/* Adjust to end on a page boundary */
       
   896 			if (!is_page_aligned(base))
       
   897 				asize += (page_align((size_t)base) - (size_t)base);
       
   898 			/* Can't call MORECORE if size is negative when treated as signed */
       
   899 			if (asize < HALF_MAX_SIZE_T &&(br = (TUint8*)(CALL_MORECORE(asize))) == base)
       
   900 			{
       
   901 				tbase = base;
       
   902 				tsize = asize;
       
   903 			}
       
   904 		}
       
   905     }
       
   906     else
       
   907 	{
       
   908       /* Subtract out existing available top space from MORECORE request. */
       
   909 		asize = granularity_align(nb - m->topsize + TOP_FOOT_SIZE + SIZE_T_ONE);
       
   910     /* Use mem here only if it did continuously extend old space */
       
   911       if (asize < HALF_MAX_SIZE_T &&
       
   912           (br = (TUint8*)(CALL_MORECORE(asize))) == ss->base+ss->size) {
       
   913         tbase = br;
       
   914         tsize = asize;
       
   915       }
       
   916     }
       
   917     if (tbase == CMFAIL) {    /* Cope with partial failure */
       
   918       if (br != CMFAIL) {    /* Try to use/extend the space we did get */
       
   919         if (asize < HALF_MAX_SIZE_T &&
       
   920             asize < nb + TOP_FOOT_SIZE + SIZE_T_ONE) {
       
   921           size_t esize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE - asize);
       
   922           if (esize < HALF_MAX_SIZE_T) {
       
   923             TUint8* end = (TUint8*)CALL_MORECORE(esize);
       
   924             if (end != CMFAIL)
       
   925               asize += esize;
       
   926             else {            /* Can't use; try to release */
       
   927               CALL_MORECORE(-asize);
       
   928               br = CMFAIL;
       
   929             }
       
   930           }
       
   931         }
       
   932       }
       
   933       if (br != CMFAIL) {    /* Use the space we did get */
       
   934         tbase = br;
       
   935         tsize = asize;
       
   936       }
       
   937       else
       
   938         disable_contiguous(m); /* Don't try contiguous path in the future */
       
   939     }
       
   940     RELEASE_MORECORE_LOCK(m);
       
   941   }
       
   942   if (HAVE_MMAP && tbase == CMFAIL) {  /* Try MMAP */
       
   943     size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE;
       
   944     size_t rsize = granularity_align(req);
       
   945     if (rsize > nb) { /* Fail if wraps around zero */
       
   946       TUint8* mp = (TUint8*)(CALL_MMAP(rsize));
       
   947       if (mp != CMFAIL) {
       
   948         tbase = mp;
       
   949         tsize = rsize;
       
   950         mmap_flag = IS_MMAPPED_BIT;
       
   951       }
       
   952     }
       
   953   }
       
   954   if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */
       
   955     size_t asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE);
       
   956     if (asize < HALF_MAX_SIZE_T) {
       
   957       TUint8* br = CMFAIL;
       
   958       TUint8* end = CMFAIL;
       
   959       ACQUIRE_MORECORE_LOCK(m);
       
   960       br = (TUint8*)(CALL_MORECORE(asize));
       
   961       end = (TUint8*)(CALL_MORECORE(0));
       
   962       RELEASE_MORECORE_LOCK(m);
       
   963       if (br != CMFAIL && end != CMFAIL && br < end) {
       
   964         size_t ssize = end - br;
       
   965         if (ssize > nb + TOP_FOOT_SIZE) {
       
   966           tbase = br;
       
   967           tsize = ssize;
       
   968         }
       
   969       }
       
   970     }
       
   971   }
       
   972   if (tbase != CMFAIL) {
       
   973     if ((m->footprint += tsize) > m->max_footprint)
       
   974       m->max_footprint = m->footprint;
       
   975     if (!is_initialized(m)) { /* first-time initialization */
       
   976       m->seg.base = m->least_addr = tbase;
       
   977       m->seg.size = tsize;
       
   978       m->seg.sflags = mmap_flag;
       
   979       m->magic = mparams.magic;
       
   980       init_bins(m);
       
   981       if (is_global(m))
       
   982         init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
       
   983       else {
       
   984         /* Offset top by embedded malloc_state */
       
   985         mchunkptr mn = next_chunk(mem2chunk(m));
       
   986         init_top(m, mn, (size_t)((tbase + tsize) - (TUint8*)mn) -TOP_FOOT_SIZE);
       
   987       }
       
   988     }else {
       
   989       /* Try to merge with an existing segment */
       
   990       msegmentptr sp = &m->seg;
       
   991       while (sp != 0 && tbase != sp->base + sp->size)
       
   992         sp = sp->next;
       
   993       if (sp != 0 && !is_extern_segment(sp) &&
       
   994           (sp->sflags & IS_MMAPPED_BIT) == mmap_flag &&
       
   995           segment_holds(sp, m->top))
       
   996     	  { /* append */
       
   997         sp->size += tsize;
       
   998         init_top(m, m->top, m->topsize + tsize);
       
   999       }
       
  1000       else {
       
  1001         if (tbase < m->least_addr)
       
  1002           m->least_addr = tbase;
       
  1003         sp = &m->seg;
       
  1004         while (sp != 0 && sp->base != tbase + tsize)
       
  1005           sp = sp->next;
       
  1006         if (sp != 0 &&
       
  1007             !is_extern_segment(sp) &&
       
  1008             (sp->sflags & IS_MMAPPED_BIT) == mmap_flag) {
       
  1009           TUint8* oldbase = sp->base;
       
  1010           sp->base = tbase;
       
  1011           sp->size += tsize;
       
  1012           return prepend_alloc(m, tbase, oldbase, nb);
       
  1013         }
       
  1014         else
       
  1015           add_segment(m, tbase, tsize, mmap_flag);
       
  1016       }
       
  1017     }
       
  1018     if (nb < m->topsize) { /* Allocate from new or extended top space */
       
  1019       size_t rsize = m->topsize -= nb;
       
  1020       mchunkptr p = m->top;
       
  1021       mchunkptr r = m->top = chunk_plus_offset(p, nb);
       
  1022       r->head = rsize | PINUSE_BIT;
       
  1023       set_size_and_pinuse_of_inuse_chunk(m, p, nb);
       
  1024       check_top_chunk(m, m->top);
       
  1025       check_malloced_chunk(m, chunk2mem(p), nb);
       
  1026       return chunk2mem(p);
       
  1027     }
       
  1028   }
       
  1029   /*need to check this*/
       
  1030   //errno = -1;
       
  1031   return 0;
       
  1032 }
       
  1033 msegmentptr RNewAllocator::segment_holding(mstate m, TUint8* addr) {
       
  1034   msegmentptr sp = &m->seg;
       
  1035   for (;;) {
       
  1036     if (addr >= sp->base && addr < sp->base + sp->size)
       
  1037       return sp;
       
  1038     if ((sp = sp->next) == 0)
       
  1039       return 0;
       
  1040   }
       
  1041 }
       
  1042 /* Unlink the first chunk from a smallbin */
       
  1043 inline void RNewAllocator::unlink_first_small_chunk(mstate M,mchunkptr B,mchunkptr P,bindex_t& I)
       
  1044 {
       
  1045   mchunkptr F = P->fd;
       
  1046   assert(P != B);
       
  1047   assert(P != F);
       
  1048   assert(chunksize(P) == small_index2size(I));
       
  1049   if (B == F)
       
  1050     clear_smallmap(M, I);
       
  1051   else if (RTCHECK(ok_address(M, F))) {
       
  1052     B->fd = F;
       
  1053     F->bk = B;
       
  1054   }
       
  1055   else {
       
  1056     CORRUPTION_ERROR_ACTION(M);
       
  1057   }
       
  1058 }
       
  1059 /* Link a free chunk into a smallbin  */
       
  1060 inline void RNewAllocator::insert_small_chunk(mstate M,mchunkptr P, size_t S)
       
  1061 {
       
  1062   bindex_t I  = small_index(S);
       
  1063   mchunkptr B = smallbin_at(M, I);
       
  1064   mchunkptr F = B;
       
  1065   assert(S >= MIN_CHUNK_SIZE);
       
  1066   if (!smallmap_is_marked(M, I))
       
  1067     mark_smallmap(M, I);
       
  1068   else if (RTCHECK(ok_address(M, B->fd)))
       
  1069     F = B->fd;
       
  1070   else {
       
  1071     CORRUPTION_ERROR_ACTION(M);
       
  1072   }
       
  1073   B->fd = P;
       
  1074   F->bk = P;
       
  1075   P->fd = F;
       
  1076   P->bk = B;
       
  1077 }
       
  1078 
       
  1079 
       
  1080 inline void RNewAllocator::insert_chunk(mstate M,mchunkptr P,size_t S)
       
  1081 {
       
  1082 	if (is_small(S))
       
  1083 		insert_small_chunk(M, P, S);
       
  1084 	else{
       
  1085 		tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S);
       
  1086 	 }
       
  1087 }
       
  1088 
       
  1089 inline void RNewAllocator::unlink_large_chunk(mstate M,tchunkptr X)
       
  1090 {
       
  1091   tchunkptr XP = X->parent;
       
  1092   tchunkptr R;
       
  1093   if (X->bk != X) {
       
  1094     tchunkptr F = X->fd;
       
  1095     R = X->bk;
       
  1096     if (RTCHECK(ok_address(M, F))) {
       
  1097       F->bk = R;
       
  1098       R->fd = F;
       
  1099     }
       
  1100     else {
       
  1101       CORRUPTION_ERROR_ACTION(M);
       
  1102     }
       
  1103   }
       
  1104   else {
       
  1105     tchunkptr* RP;
       
  1106     if (((R = *(RP = &(X->child[1]))) != 0) ||
       
  1107         ((R = *(RP = &(X->child[0]))) != 0)) {
       
  1108       tchunkptr* CP;
       
  1109       while ((*(CP = &(R->child[1])) != 0) ||
       
  1110              (*(CP = &(R->child[0])) != 0)) {
       
  1111         R = *(RP = CP);
       
  1112       }
       
  1113       if (RTCHECK(ok_address(M, RP)))
       
  1114         *RP = 0;
       
  1115       else {
       
  1116         CORRUPTION_ERROR_ACTION(M);
       
  1117       }
       
  1118     }
       
  1119   }
       
  1120   if (XP != 0) {
       
  1121     tbinptr* H = treebin_at(M, X->index);
       
  1122     if (X == *H) {
       
  1123       if ((*H = R) == 0)
       
  1124         clear_treemap(M, X->index);
       
  1125     }
       
  1126     else if (RTCHECK(ok_address(M, XP))) {
       
  1127       if (XP->child[0] == X)
       
  1128         XP->child[0] = R;
       
  1129       else
       
  1130         XP->child[1] = R;
       
  1131     }
       
  1132     else
       
  1133       CORRUPTION_ERROR_ACTION(M);
       
  1134     if (R != 0) {
       
  1135       if (RTCHECK(ok_address(M, R))) {
       
  1136         tchunkptr C0, C1;
       
  1137         R->parent = XP;
       
  1138         if ((C0 = X->child[0]) != 0) {
       
  1139           if (RTCHECK(ok_address(M, C0))) {
       
  1140             R->child[0] = C0;
       
  1141             C0->parent = R;
       
  1142           }
       
  1143           else
       
  1144             CORRUPTION_ERROR_ACTION(M);
       
  1145         }
       
  1146         if ((C1 = X->child[1]) != 0) {
       
  1147           if (RTCHECK(ok_address(M, C1))) {
       
  1148             R->child[1] = C1;
       
  1149             C1->parent = R;
       
  1150           }
       
  1151           else
       
  1152             CORRUPTION_ERROR_ACTION(M);
       
  1153         }
       
  1154       }
       
  1155       else
       
  1156         CORRUPTION_ERROR_ACTION(M);
       
  1157     }
       
  1158   }
       
  1159 }
       
  1160 
       
  1161 /* Unlink a chunk from a smallbin  */
       
  1162 inline void RNewAllocator::unlink_small_chunk(mstate M, mchunkptr P,size_t S)
       
  1163 {
       
  1164   mchunkptr F = P->fd;
       
  1165   mchunkptr B = P->bk;
       
  1166   bindex_t I = small_index(S);
       
  1167   assert(P != B);
       
  1168   assert(P != F);
       
  1169   assert(chunksize(P) == small_index2size(I));
       
  1170   if (F == B)
       
  1171     clear_smallmap(M, I);
       
  1172   else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&
       
  1173                    (B == smallbin_at(M,I) || ok_address(M, B)))) {
       
  1174     F->bk = B;
       
  1175     B->fd = F;
       
  1176   }
       
  1177   else {
       
  1178     CORRUPTION_ERROR_ACTION(M);
       
  1179   }
       
  1180 }
       
  1181 
       
  1182 inline void RNewAllocator::unlink_chunk(mstate M, mchunkptr P, size_t S)
       
  1183 {
       
  1184   if (is_small(S))
       
  1185 	unlink_small_chunk(M, P, S);
       
  1186   else
       
  1187   {
       
  1188 	  tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP);
       
  1189   }
       
  1190 }
       
  1191 
       
  1192 inline void RNewAllocator::compute_tree_index(size_t S, bindex_t& I)
       
  1193 {
       
  1194   size_t X = S >> TREEBIN_SHIFT;
       
  1195   if (X == 0)
       
  1196     I = 0;
       
  1197   else if (X > 0xFFFF)
       
  1198     I = NTREEBINS-1;
       
  1199   else {
       
  1200     unsigned int Y = (unsigned int)X;
       
  1201     unsigned int N = ((Y - 0x100) >> 16) & 8;
       
  1202     unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;
       
  1203     N += K;
       
  1204     N += K = (((Y <<= K) - 0x4000) >> 16) & 2;
       
  1205     K = 14 - N + ((Y <<= K) >> 15);
       
  1206     I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));
       
  1207   }
       
  1208 }
       
  1209 
       
  1210 /* ------------------------- Operations on trees ------------------------- */
       
  1211 
       
  1212 /* Insert chunk into tree */
       
  1213 inline void RNewAllocator::insert_large_chunk(mstate M,tchunkptr X,size_t S)
       
  1214 {
       
  1215   tbinptr* H;
       
  1216   bindex_t I;
       
  1217   compute_tree_index(S, I);
       
  1218   H = treebin_at(M, I);
       
  1219   X->index = I;
       
  1220   X->child[0] = X->child[1] = 0;
       
  1221   if (!treemap_is_marked(M, I)) {
       
  1222     mark_treemap(M, I);
       
  1223     *H = X;
       
  1224     X->parent = (tchunkptr)H;
       
  1225     X->fd = X->bk = X;
       
  1226   }
       
  1227   else {
       
  1228     tchunkptr T = *H;
       
  1229     size_t K = S << leftshift_for_tree_index(I);
       
  1230     for (;;) {
       
  1231       if (chunksize(T) != S) {
       
  1232         tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);
       
  1233         K <<= 1;
       
  1234         if (*C != 0)
       
  1235           T = *C;
       
  1236         else if (RTCHECK(ok_address(M, C))) {
       
  1237           *C = X;
       
  1238           X->parent = T;
       
  1239           X->fd = X->bk = X;
       
  1240           break;
       
  1241         }
       
  1242         else {
       
  1243           CORRUPTION_ERROR_ACTION(M);
       
  1244           break;
       
  1245         }
       
  1246       }
       
  1247       else {
       
  1248         tchunkptr F = T->fd;
       
  1249         if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {
       
  1250           T->fd = F->bk = X;
       
  1251           X->fd = F;
       
  1252           X->bk = T;
       
  1253           X->parent = 0;
       
  1254           break;
       
  1255         }
       
  1256         else {
       
  1257           CORRUPTION_ERROR_ACTION(M);
       
  1258           break;
       
  1259         }
       
  1260       }
       
  1261     }
       
  1262   }
       
  1263 }
       
  1264 
       
  1265 /*
       
  1266   Unlink steps:
       
  1267 
       
  1268   1. If x is a chained node, unlink it from its same-sized fd/bk links
       
  1269      and choose its bk node as its replacement.
       
  1270   2. If x was the last node of its size, but not a leaf node, it must
       
  1271      be replaced with a leaf node (not merely one with an open left or
       
  1272      right), to make sure that lefts and rights of descendents
       
  1273      correspond properly to bit masks.  We use the rightmost descendent
       
  1274      of x.  We could use any other leaf, but this is easy to locate and
       
  1275      tends to counteract removal of leftmosts elsewhere, and so keeps
       
  1276      paths shorter than minimally guaranteed.  This doesn't loop much
       
  1277      because on average a node in a tree is near the bottom.
       
  1278   3. If x is the base of a chain (i.e., has parent links) relink
       
  1279      x's parent and children to x's replacement (or null if none).
       
  1280 */
       
  1281 
       
  1282 /* Replace dv node, binning the old one */
       
  1283 /* Used only when dvsize known to be small */
       
  1284 inline void RNewAllocator::replace_dv(mstate M, mchunkptr P, size_t S)
       
  1285 {
       
  1286   size_t DVS = M->dvsize;
       
  1287   if (DVS != 0) {
       
  1288     mchunkptr DV = M->dv;
       
  1289     assert(is_small(DVS));
       
  1290     insert_small_chunk(M, DV, DVS);
       
  1291   }
       
  1292   M->dvsize = S;
       
  1293   M->dv = P;
       
  1294 }
       
  1295 
       
  1296 inline void RNewAllocator::compute_bit2idx(binmap_t X,bindex_t& I)
       
  1297 {
       
  1298 	unsigned int Y = X - 1;
       
  1299 	unsigned int K = Y >> (16-4) & 16;
       
  1300 	unsigned int N = K;        Y >>= K;
       
  1301 	N += K = Y >> (8-3) &  8;  Y >>= K;
       
  1302 	N += K = Y >> (4-2) &  4;  Y >>= K;
       
  1303 	N += K = Y >> (2-1) &  2;  Y >>= K;
       
  1304 	N += K = Y >> (1-0) &  1;  Y >>= K;
       
  1305 	I = (bindex_t)(N + Y);
       
  1306 }
       
  1307 
       
  1308 void RNewAllocator::add_segment(mstate m, TUint8* tbase, size_t tsize, flag_t mmapped) {
       
  1309   /* Determine locations and sizes of segment, fenceposts, old top */
       
  1310   TUint8* old_top = (TUint8*)m->top;
       
  1311   msegmentptr oldsp = segment_holding(m, old_top);
       
  1312   TUint8* old_end = oldsp->base + oldsp->size;
       
  1313   size_t ssize = pad_request(sizeof(struct malloc_segment));
       
  1314   TUint8* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
       
  1315   size_t offset = align_offset(chunk2mem(rawsp));
       
  1316   TUint8* asp = rawsp + offset;
       
  1317   TUint8* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
       
  1318   mchunkptr sp = (mchunkptr)csp;
       
  1319   msegmentptr ss = (msegmentptr)(chunk2mem(sp));
       
  1320   mchunkptr tnext = chunk_plus_offset(sp, ssize);
       
  1321   mchunkptr p = tnext;
       
  1322   int nfences = 0;
       
  1323 
       
  1324   /* reset top to new space */
       
  1325   init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
       
  1326 
       
  1327   /* Set up segment record */
       
  1328   assert(is_aligned(ss));
       
  1329   set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
       
  1330   *ss = m->seg; /* Push current record */
       
  1331   m->seg.base = tbase;
       
  1332   m->seg.size = tsize;
       
  1333   m->seg.sflags = mmapped;
       
  1334   m->seg.next = ss;
       
  1335 
       
  1336   /* Insert trailing fenceposts */
       
  1337   for (;;) {
       
  1338     mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
       
  1339     p->head = FENCEPOST_HEAD;
       
  1340     ++nfences;
       
  1341     if ((TUint8*)(&(nextp->head)) < old_end)
       
  1342       p = nextp;
       
  1343     else
       
  1344       break;
       
  1345   }
       
  1346   assert(nfences >= 2);
       
  1347 
       
  1348   /* Insert the rest of old top into a bin as an ordinary free chunk */
       
  1349   if (csp != old_top) {
       
  1350     mchunkptr q = (mchunkptr)old_top;
       
  1351     size_t psize = csp - old_top;
       
  1352     mchunkptr tn = chunk_plus_offset(q, psize);
       
  1353     set_free_with_pinuse(q, psize, tn);
       
  1354     insert_chunk(m, q, psize);
       
  1355   }
       
  1356 
       
  1357   check_top_chunk(m, m->top);
       
  1358 }
       
  1359 
       
  1360 
       
  1361 void* RNewAllocator::prepend_alloc(mstate m, TUint8* newbase, TUint8* oldbase,
       
  1362                            size_t nb) {
       
  1363   mchunkptr p = align_as_chunk(newbase);
       
  1364   mchunkptr oldfirst = align_as_chunk(oldbase);
       
  1365   size_t psize = (TUint8*)oldfirst - (TUint8*)p;
       
  1366   mchunkptr q = chunk_plus_offset(p, nb);
       
  1367   size_t qsize = psize - nb;
       
  1368   set_size_and_pinuse_of_inuse_chunk(m, p, nb);
       
  1369 
       
  1370   assert((TUint8*)oldfirst > (TUint8*)q);
       
  1371   assert(pinuse(oldfirst));
       
  1372   assert(qsize >= MIN_CHUNK_SIZE);
       
  1373 
       
  1374   /* consolidate remainder with first chunk of old base */
       
  1375   if (oldfirst == m->top) {
       
  1376     size_t tsize = m->topsize += qsize;
       
  1377     m->top = q;
       
  1378     q->head = tsize | PINUSE_BIT;
       
  1379     check_top_chunk(m, q);
       
  1380   }
       
  1381   else if (oldfirst == m->dv) {
       
  1382     size_t dsize = m->dvsize += qsize;
       
  1383     m->dv = q;
       
  1384     set_size_and_pinuse_of_free_chunk(q, dsize);
       
  1385   }
       
  1386   else {
       
  1387     if (!cinuse(oldfirst)) {
       
  1388       size_t nsize = chunksize(oldfirst);
       
  1389       unlink_chunk(m, oldfirst, nsize);
       
  1390       oldfirst = chunk_plus_offset(oldfirst, nsize);
       
  1391       qsize += nsize;
       
  1392     }
       
  1393     set_free_with_pinuse(q, qsize, oldfirst);
       
  1394     insert_chunk(m, q, qsize);
       
  1395     check_free_chunk(m, q);
       
  1396   }
       
  1397 
       
  1398   check_malloced_chunk(m, chunk2mem(p), nb);
       
  1399   return chunk2mem(p);
       
  1400 }
       
  1401 
       
  1402 void* RNewAllocator::mmap_alloc(mstate m, size_t nb) {
       
  1403   size_t mmsize = granularity_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
       
  1404   if (mmsize > nb) {     /* Check for wrap around 0 */
       
  1405     TUint8* mm = (TUint8*)(DIRECT_MMAP(mmsize));
       
  1406     if (mm != CMFAIL) {
       
  1407       size_t offset = align_offset(chunk2mem(mm));
       
  1408       size_t psize = mmsize - offset - MMAP_FOOT_PAD;
       
  1409       mchunkptr p = (mchunkptr)(mm + offset);
       
  1410       p->prev_foot = offset | IS_MMAPPED_BIT;
       
  1411       (p)->head = (psize|CINUSE_BIT);
       
  1412       mark_inuse_foot(m, p, psize);
       
  1413       chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
       
  1414       chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
       
  1415 
       
  1416       if (mm < m->least_addr)
       
  1417         m->least_addr = mm;
       
  1418       if ((m->footprint += mmsize) > m->max_footprint)
       
  1419         m->max_footprint = m->footprint;
       
  1420       assert(is_aligned(chunk2mem(p)));
       
  1421       check_mmapped_chunk(m, p);
       
  1422       return chunk2mem(p);
       
  1423     }
       
  1424   }
       
  1425   return 0;
       
  1426 }
       
  1427 
       
  1428 	int RNewAllocator::sys_trim(mstate m, size_t pad)
       
  1429 	{
       
  1430 	  size_t released = 0;
       
  1431 	  if (pad < MAX_REQUEST && is_initialized(m)) {
       
  1432 	    pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
       
  1433 
       
  1434 	    if (m->topsize > pad) {
       
  1435 	      /* Shrink top space in granularity-size units, keeping at least one */
       
  1436 	      size_t unit = mparams.granularity;
       
  1437 				size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit - SIZE_T_ONE) * unit;
       
  1438 	      msegmentptr sp = segment_holding(m, (TUint8*)m->top);
       
  1439 
       
  1440 	      if (!is_extern_segment(sp)) {
       
  1441 	        if (is_mmapped_segment(sp)) {
       
  1442 	          if (HAVE_MMAP &&
       
  1443 	              sp->size >= extra &&
       
  1444 	              !has_segment_link(m, sp)) { /* can't shrink if pinned */
       
  1445 	            size_t newsize = sp->size - extra;
       
  1446 	            /* Prefer mremap, fall back to munmap */
       
  1447 	            if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||
       
  1448 	                (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
       
  1449 	              released = extra;
       
  1450 	            }
       
  1451 	          }
       
  1452 	        }
       
  1453 	        else if (HAVE_MORECORE) {
       
  1454 	          if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */
       
  1455 	            extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
       
  1456 	          ACQUIRE_MORECORE_LOCK(m);
       
  1457 	          {
       
  1458 	            /* Make sure end of memory is where we last set it. */
       
  1459 	            TUint8* old_br = (TUint8*)(CALL_MORECORE(0));
       
  1460 	            if (old_br == sp->base + sp->size) {
       
  1461 	              TUint8* rel_br = (TUint8*)(CALL_MORECORE(-extra));
       
  1462 	              TUint8* new_br = (TUint8*)(CALL_MORECORE(0));
       
  1463 	              if (rel_br != CMFAIL && new_br < old_br)
       
  1464 	                released = old_br - new_br;
       
  1465 	            }
       
  1466 	          }
       
  1467 	          RELEASE_MORECORE_LOCK(m);
       
  1468 	        }
       
  1469 	      }
       
  1470 
       
  1471 	      if (released != 0) {
       
  1472 	        sp->size -= released;
       
  1473 	        m->footprint -= released;
       
  1474 	        init_top(m, m->top, m->topsize - released);
       
  1475 	        check_top_chunk(m, m->top);
       
  1476 	      }
       
  1477 	    }
       
  1478 
       
  1479 	    /* Unmap any unused mmapped segments */
       
  1480 	    if (HAVE_MMAP)
       
  1481 	      released += release_unused_segments(m);
       
  1482 
       
  1483 	    /* On failure, disable autotrim to avoid repeated failed future calls */
       
  1484 	    if (released == 0)
       
  1485 	      m->trim_check = MAX_SIZE_T;
       
  1486 	  }
       
  1487 
       
  1488 	  return (released != 0)? 1 : 0;
       
  1489 	}
       
  1490 
       
  1491 	inline int RNewAllocator::has_segment_link(mstate m, msegmentptr ss)
       
  1492 	{
       
  1493 	  msegmentptr sp = &m->seg;
       
  1494 	  for (;;) {
       
  1495 	    if ((TUint8*)sp >= ss->base && (TUint8*)sp < ss->base + ss->size)
       
  1496 	      return 1;
       
  1497 	    if ((sp = sp->next) == 0)
       
  1498 	      return 0;
       
  1499 	  }
       
  1500 	}
       
  1501 
       
  1502 	/* Unmap and unlink any mmapped segments that don't contain used chunks */
       
  1503 	size_t RNewAllocator::release_unused_segments(mstate m)
       
  1504 	{
       
  1505 	  size_t released = 0;
       
  1506 	  msegmentptr pred = &m->seg;
       
  1507 	  msegmentptr sp = pred->next;
       
  1508 	  while (sp != 0) {
       
  1509 	    TUint8* base = sp->base;
       
  1510 	    size_t size = sp->size;
       
  1511 	    msegmentptr next = sp->next;
       
  1512 	    if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
       
  1513 	      mchunkptr p = align_as_chunk(base);
       
  1514 	      size_t psize = chunksize(p);
       
  1515 	      /* Can unmap if first chunk holds entire segment and not pinned */
       
  1516 	      if (!cinuse(p) && (TUint8*)p + psize >= base + size - TOP_FOOT_SIZE) {
       
  1517 	        tchunkptr tp = (tchunkptr)p;
       
  1518 	        assert(segment_holds(sp, (TUint8*)sp));
       
  1519 	        if (p == m->dv) {
       
  1520 	          m->dv = 0;
       
  1521 	          m->dvsize = 0;
       
  1522 	        }
       
  1523 	        else {
       
  1524 	          unlink_large_chunk(m, tp);
       
  1525 	        }
       
  1526 	        if (CALL_MUNMAP(base, size) == 0) {
       
  1527 	          released += size;
       
  1528 	          m->footprint -= size;
       
  1529 	          /* unlink obsoleted record */
       
  1530 	          sp = pred;
       
  1531 	          sp->next = next;
       
  1532 	        }
       
  1533 	        else { /* back out if cannot unmap */
       
  1534 	          insert_large_chunk(m, tp, psize);
       
  1535 	        }
       
  1536 	      }
       
  1537 	    }
       
  1538 	    pred = sp;
       
  1539 	    sp = next;
       
  1540 	  }/*End of while*/
       
  1541 	  return released;
       
  1542 	}
       
  1543 	/* Realloc using mmap */
       
  1544 	inline	mchunkptr RNewAllocator::mmap_resize(mstate m, mchunkptr oldp, size_t nb)
       
  1545 	{
       
  1546 	  size_t oldsize = chunksize(oldp);
       
  1547 	  if (is_small(nb)) /* Can't shrink mmap regions below small size */
       
  1548 	    return 0;
       
  1549 	  /* Keep old chunk if big enough but not too big */
       
  1550 	  if (oldsize >= nb + SIZE_T_SIZE &&
       
  1551 	      (oldsize - nb) <= (mparams.granularity << 1))
       
  1552 	    return oldp;
       
  1553 	  else {
       
  1554 	    size_t offset = oldp->prev_foot & ~IS_MMAPPED_BIT;
       
  1555 	    size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
       
  1556 	    size_t newmmsize = granularity_align(nb + SIX_SIZE_T_SIZES +
       
  1557 	                                         CHUNK_ALIGN_MASK);
       
  1558 	    TUint8* cp = (TUint8*)CALL_MREMAP((char*)oldp - offset,
       
  1559 	                                  oldmmsize, newmmsize, 1);
       
  1560 	    if (cp != CMFAIL) {
       
  1561 	      mchunkptr newp = (mchunkptr)(cp + offset);
       
  1562 	      size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
       
  1563 	      newp->head = (psize|CINUSE_BIT);
       
  1564 	      mark_inuse_foot(m, newp, psize);
       
  1565 	      chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
       
  1566 	      chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
       
  1567 
       
  1568 	      if (cp < m->least_addr)
       
  1569 	        m->least_addr = cp;
       
  1570 	      if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
       
  1571 	        m->max_footprint = m->footprint;
       
  1572 	      check_mmapped_chunk(m, newp);
       
  1573 	      return newp;
       
  1574 	    }
       
  1575 	  }
       
  1576 	  return 0;
       
  1577 	}
       
  1578 
       
  1579 
       
  1580 void RNewAllocator::Init_Dlmalloc(size_t capacity, int locked, size_t aTrimThreshold)
       
  1581 	{
       
  1582 		memset(gm,0,sizeof(malloc_state));
       
  1583 		init_mparams(aTrimThreshold); /* Ensure pagesize etc initialized */
       
  1584 		// The maximum amount that can be allocated can be calculated as:-
       
  1585 		// 2^sizeof(size_t) - sizeof(malloc_state) - TOP_FOOT_SIZE - page size (all accordingly padded)
       
  1586 		// If the capacity exceeds this, no allocation will be done.
       
  1587 		gm->seg.base = gm->least_addr = iBase;
       
  1588 		gm->seg.size = capacity;
       
  1589 		gm->seg.sflags = !IS_MMAPPED_BIT;
       
  1590 		set_lock(gm, locked);
       
  1591 		gm->magic = mparams.magic;
       
  1592 		init_bins(gm);
       
  1593 		init_top(gm, (mchunkptr)iBase, capacity - TOP_FOOT_SIZE);
       
  1594 	}
       
  1595 
       
  1596 void* RNewAllocator::dlmalloc(size_t bytes) {
       
  1597   /*
       
  1598      Basic algorithm:
       
  1599      If a small request (< 256 bytes minus per-chunk overhead):
       
  1600        1. If one exists, use a remainderless chunk in associated smallbin.
       
  1601           (Remainderless means that there are too few excess bytes to
       
  1602           represent as a chunk.)
       
  1603        2. If it is big enough, use the dv chunk, which is normally the
       
  1604           chunk adjacent to the one used for the most recent small request.
       
  1605        3. If one exists, split the smallest available chunk in a bin,
       
  1606           saving remainder in dv.
       
  1607        4. If it is big enough, use the top chunk.
       
  1608        5. If available, get memory from system and use it
       
  1609      Otherwise, for a large request:
       
  1610        1. Find the smallest available binned chunk that fits, and use it
       
  1611           if it is better fitting than dv chunk, splitting if necessary.
       
  1612        2. If better fitting than any binned chunk, use the dv chunk.
       
  1613        3. If it is big enough, use the top chunk.
       
  1614        4. If request size >= mmap threshold, try to directly mmap this chunk.
       
  1615        5. If available, get memory from system and use it
       
  1616 
       
  1617      The ugly goto's here ensure that postaction occurs along all paths.
       
  1618   */
       
  1619   if (!PREACTION(gm)) {
       
  1620     void* mem;
       
  1621     size_t nb;
       
  1622     if (bytes <= MAX_SMALL_REQUEST) {
       
  1623       bindex_t idx;
       
  1624       binmap_t smallbits;
       
  1625       nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
       
  1626       idx = small_index(nb);
       
  1627       smallbits = gm->smallmap >> idx;
       
  1628 
       
  1629       if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
       
  1630         mchunkptr b, p;
       
  1631         idx += ~smallbits & 1;       /* Uses next bin if idx empty */
       
  1632         b = smallbin_at(gm, idx);
       
  1633         p = b->fd;
       
  1634         assert(chunksize(p) == small_index2size(idx));
       
  1635         unlink_first_small_chunk(gm, b, p, idx);
       
  1636         set_inuse_and_pinuse(gm, p, small_index2size(idx));
       
  1637         mem = chunk2mem(p);
       
  1638         check_malloced_chunk(gm, mem, nb);
       
  1639         goto postaction;
       
  1640       }
       
  1641 
       
  1642       else if (nb > gm->dvsize) {
       
  1643         if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
       
  1644           mchunkptr b, p, r;
       
  1645           size_t rsize;
       
  1646           bindex_t i;
       
  1647           binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
       
  1648           binmap_t leastbit = least_bit(leftbits);
       
  1649           compute_bit2idx(leastbit, i);
       
  1650           b = smallbin_at(gm, i);
       
  1651           p = b->fd;
       
  1652           assert(chunksize(p) == small_index2size(i));
       
  1653           unlink_first_small_chunk(gm, b, p, i);
       
  1654           rsize = small_index2size(i) - nb;
       
  1655           /* Fit here cannot be remainderless if 4byte sizes */
       
  1656           if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
       
  1657             set_inuse_and_pinuse(gm, p, small_index2size(i));
       
  1658           else {
       
  1659             set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
       
  1660             r = chunk_plus_offset(p, nb);
       
  1661             set_size_and_pinuse_of_free_chunk(r, rsize);
       
  1662             replace_dv(gm, r, rsize);
       
  1663           }
       
  1664           mem = chunk2mem(p);
       
  1665           check_malloced_chunk(gm, mem, nb);
       
  1666           goto postaction;
       
  1667         }
       
  1668 
       
  1669         else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) {
       
  1670           check_malloced_chunk(gm, mem, nb);
       
  1671           goto postaction;
       
  1672         }
       
  1673       }
       
  1674     }
       
  1675     else if (bytes >= MAX_REQUEST)
       
  1676       nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
       
  1677     else {
       
  1678       nb = pad_request(bytes);
       
  1679       if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
       
  1680         check_malloced_chunk(gm, mem, nb);
       
  1681         goto postaction;
       
  1682       }
       
  1683     }
       
  1684 
       
  1685     if (nb <= gm->dvsize) {
       
  1686       size_t rsize = gm->dvsize - nb;
       
  1687       mchunkptr p = gm->dv;
       
  1688       if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
       
  1689         mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
       
  1690         gm->dvsize = rsize;
       
  1691         set_size_and_pinuse_of_free_chunk(r, rsize);
       
  1692         set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
       
  1693       }
       
  1694       else { /* exhaust dv */
       
  1695         size_t dvs = gm->dvsize;
       
  1696         gm->dvsize = 0;
       
  1697         gm->dv = 0;
       
  1698         set_inuse_and_pinuse(gm, p, dvs);
       
  1699       }
       
  1700       mem = chunk2mem(p);
       
  1701       check_malloced_chunk(gm, mem, nb);
       
  1702       goto postaction;
       
  1703     }
       
  1704 
       
  1705     else if (nb < gm->topsize) { /* Split top */
       
  1706       size_t rsize = gm->topsize -= nb;
       
  1707       mchunkptr p = gm->top;
       
  1708       mchunkptr r = gm->top = chunk_plus_offset(p, nb);
       
  1709       r->head = rsize | PINUSE_BIT;
       
  1710       set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
       
  1711       mem = chunk2mem(p);
       
  1712       check_top_chunk(gm, gm->top);
       
  1713       check_malloced_chunk(gm, mem, nb);
       
  1714       goto postaction;
       
  1715     }
       
  1716 
       
  1717     mem = sys_alloc(gm, nb);
       
  1718 
       
  1719   postaction:
       
  1720     POSTACTION(gm);
       
  1721     return mem;
       
  1722   }
       
  1723 
       
  1724   return 0;
       
  1725 }
       
  1726 
       
  1727 void RNewAllocator::dlfree(void* mem) {
       
  1728   /*
       
  1729      Consolidate freed chunks with preceeding or succeeding bordering
       
  1730      free chunks, if they exist, and then place in a bin.  Intermixed
       
  1731      with special cases for top, dv, mmapped chunks, and usage errors.
       
  1732   */
       
  1733 
       
  1734 	if (mem != 0)
       
  1735 	{
       
  1736 		mchunkptr p  = mem2chunk(mem);
       
  1737 #if FOOTERS
       
  1738 		mstate fm = get_mstate_for(p);
       
  1739 		if (!ok_magic(fm))
       
  1740 		{
       
  1741 			USAGE_ERROR_ACTION(fm, p);
       
  1742 			return;
       
  1743 		}
       
  1744 #else /* FOOTERS */
       
  1745 #define fm gm
       
  1746 #endif /* FOOTERS */
       
  1747 
       
  1748 		if (!PREACTION(fm))
       
  1749 		{
       
  1750 			check_inuse_chunk(fm, p);
       
  1751 			if (RTCHECK(ok_address(fm, p) && ok_cinuse(p)))
       
  1752 			{
       
  1753 				size_t psize = chunksize(p);
       
  1754 				iTotalAllocSize -= psize;			// TODO DAN
       
  1755 				mchunkptr next = chunk_plus_offset(p, psize);
       
  1756 				if (!pinuse(p))
       
  1757 				{
       
  1758 					size_t prevsize = p->prev_foot;
       
  1759 					if ((prevsize & IS_MMAPPED_BIT) != 0)
       
  1760 					{
       
  1761 						prevsize &= ~IS_MMAPPED_BIT;
       
  1762 						psize += prevsize + MMAP_FOOT_PAD;
       
  1763 						/*TInt tmp = TOP_FOOT_SIZE;
       
  1764 						TUint8* top = (TUint8*)fm->top + fm->topsize + 40;
       
  1765 						if((top == (TUint8*)p)&& fm->topsize > 4096)
       
  1766 						{
       
  1767 							fm->topsize += psize;
       
  1768 							msegmentptr sp = segment_holding(fm, (TUint8*)fm->top);
       
  1769 							sp->size+=psize;
       
  1770 							if (should_trim(fm, fm->topsize))
       
  1771 								sys_trim(fm, 0);
       
  1772  							goto postaction;
       
  1773 						}
       
  1774 						else*/
       
  1775 						{
       
  1776 							if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
       
  1777 								fm->footprint -= psize;
       
  1778 							goto postaction;
       
  1779 						}
       
  1780 					}
       
  1781 					else
       
  1782 					{
       
  1783 						mchunkptr prev = chunk_minus_offset(p, prevsize);
       
  1784 						psize += prevsize;
       
  1785 						p = prev;
       
  1786 						if (RTCHECK(ok_address(fm, prev)))
       
  1787 						{ /* consolidate backward */
       
  1788 							if (p != fm->dv)
       
  1789 							{
       
  1790 								unlink_chunk(fm, p, prevsize);
       
  1791 							}
       
  1792 							else if ((next->head & INUSE_BITS) == INUSE_BITS)
       
  1793 							{
       
  1794 								fm->dvsize = psize;
       
  1795 								set_free_with_pinuse(p, psize, next);
       
  1796 								goto postaction;
       
  1797 							}
       
  1798 						}
       
  1799 						else
       
  1800 							goto erroraction;
       
  1801 					}
       
  1802 				}
       
  1803 
       
  1804 				if (RTCHECK(ok_next(p, next) && ok_pinuse(next)))
       
  1805 				{
       
  1806 					if (!cinuse(next))
       
  1807 					{  /* consolidate forward */
       
  1808 						if (next == fm->top)
       
  1809 						{
       
  1810 							size_t tsize = fm->topsize += psize;
       
  1811 							fm->top = p;
       
  1812 							p->head = tsize | PINUSE_BIT;
       
  1813 							if (p == fm->dv)
       
  1814 							{
       
  1815 								fm->dv = 0;
       
  1816 								fm->dvsize = 0;
       
  1817 							}
       
  1818 							if (should_trim(fm, tsize))
       
  1819 								sys_trim(fm, 0);
       
  1820 							goto postaction;
       
  1821 						}
       
  1822 						else if (next == fm->dv)
       
  1823 						{
       
  1824 							size_t dsize = fm->dvsize += psize;
       
  1825 							fm->dv = p;
       
  1826 							set_size_and_pinuse_of_free_chunk(p, dsize);
       
  1827 							goto postaction;
       
  1828 						}
       
  1829 						else
       
  1830 						{
       
  1831 							size_t nsize = chunksize(next);
       
  1832 							psize += nsize;
       
  1833 							unlink_chunk(fm, next, nsize);
       
  1834 							set_size_and_pinuse_of_free_chunk(p, psize);
       
  1835 							if (p == fm->dv)
       
  1836 							{
       
  1837 								fm->dvsize = psize;
       
  1838 								goto postaction;
       
  1839 							}
       
  1840 						}
       
  1841 					}
       
  1842 					else
       
  1843 						set_free_with_pinuse(p, psize, next);
       
  1844 					insert_chunk(fm, p, psize);
       
  1845 					check_free_chunk(fm, p);
       
  1846 					goto postaction;
       
  1847 				}
       
  1848 			}
       
  1849 erroraction:
       
  1850     	USAGE_ERROR_ACTION(fm, p);
       
  1851 postaction:
       
  1852     	POSTACTION(fm);
       
  1853 		}
       
  1854 	}
       
  1855 #if !FOOTERS
       
  1856 #undef fm
       
  1857 #endif /* FOOTERS */
       
  1858 }
       
  1859 
       
  1860 void* RNewAllocator::dlrealloc(void* oldmem, size_t bytes) {
       
  1861   if (oldmem == 0)
       
  1862     return dlmalloc(bytes);
       
  1863 #ifdef REALLOC_ZERO_BYTES_FREES
       
  1864   if (bytes == 0) {
       
  1865     dlfree(oldmem);
       
  1866     return 0;
       
  1867   }
       
  1868 #endif /* REALLOC_ZERO_BYTES_FREES */
       
  1869   else {
       
  1870 #if ! FOOTERS
       
  1871     mstate m = gm;
       
  1872 #else /* FOOTERS */
       
  1873     mstate m = get_mstate_for(mem2chunk(oldmem));
       
  1874     if (!ok_magic(m)) {
       
  1875       USAGE_ERROR_ACTION(m, oldmem);
       
  1876       return 0;
       
  1877     }
       
  1878 #endif /* FOOTERS */
       
  1879     return internal_realloc(m, oldmem, bytes);
       
  1880   }
       
  1881 }
       
  1882 
       
  1883 
       
  1884 int RNewAllocator::dlmalloc_trim(size_t pad) {
       
  1885   int result = 0;
       
  1886   if (!PREACTION(gm)) {
       
  1887     result = sys_trim(gm, pad);
       
  1888     POSTACTION(gm);
       
  1889   }
       
  1890   return result;
       
  1891 }
       
  1892 
       
  1893 size_t RNewAllocator::dlmalloc_footprint(void) {
       
  1894   return gm->footprint;
       
  1895 }
       
  1896 
       
  1897 size_t RNewAllocator::dlmalloc_max_footprint(void) {
       
  1898   return gm->max_footprint;
       
  1899 }
       
  1900 
       
  1901 #if !NO_MALLINFO
       
  1902 struct mallinfo RNewAllocator::dlmallinfo(void) {
       
  1903   return internal_mallinfo(gm);
       
  1904 }
       
  1905 #endif /* NO_MALLINFO */
       
  1906 
       
  1907 void RNewAllocator::dlmalloc_stats() {
       
  1908   internal_malloc_stats(gm);
       
  1909 }
       
  1910 
       
  1911 int RNewAllocator::dlmallopt(int param_number, int value) {
       
  1912   return change_mparam(param_number, value);
       
  1913 }
       
  1914 
       
  1915 //inline slab* slab::slabfor(void* p)
       
  1916 inline slab* slab::slabfor( const void* p)
       
  1917 	{return (slab*)(floor(p, slabsize));}
       
  1918 
       
  1919 
       
  1920 void RNewAllocator::tree_remove(slab* s)
       
  1921 {
       
  1922 	slab** r = s->parent;
       
  1923 	slab* c1 = s->child1;
       
  1924 	slab* c2 = s->child2;
       
  1925 	for (;;)
       
  1926 	{
       
  1927 		if (!c2)
       
  1928 		{
       
  1929 			*r = c1;
       
  1930 			if (c1)
       
  1931 				c1->parent = r;
       
  1932 			return;
       
  1933 		}
       
  1934 		if (!c1)
       
  1935 		{
       
  1936 			*r = c2;
       
  1937 			c2->parent = r;
       
  1938 			return;
       
  1939 		}
       
  1940 		if (c1 > c2)
       
  1941 		{
       
  1942 			slab* c3 = c1;
       
  1943 			c1 = c2;
       
  1944 			c2 = c3;
       
  1945 		}
       
  1946 		slab* newc2 = c1->child2;
       
  1947 		*r = c1;
       
  1948 		c1->parent = r;
       
  1949 		c1->child2 = c2;
       
  1950 		c2->parent = &c1->child2;
       
  1951 		s = c1;
       
  1952 		c1 = s->child1;
       
  1953 		c2 = newc2;
       
  1954 		r = &s->child1;
       
  1955 	}
       
  1956 }
       
  1957 void RNewAllocator::tree_insert(slab* s,slab** r)
       
  1958 	{
       
  1959 		slab* n = *r;
       
  1960 		for (;;)
       
  1961 		{
       
  1962 			if (!n)
       
  1963 			{	// tree empty
       
  1964 				*r = s;
       
  1965 				s->parent = r;
       
  1966 				s->child1 = s->child2 = 0;
       
  1967 				break;
       
  1968 			}
       
  1969 			if (s < n)
       
  1970 			{	// insert between parent and n
       
  1971 				*r = s;
       
  1972 				s->parent = r;
       
  1973 				s->child1 = n;
       
  1974 				s->child2 = 0;
       
  1975 				n->parent = &s->child1;
       
  1976 				break;
       
  1977 			}
       
  1978 			slab* c1 = n->child1;
       
  1979 			slab* c2 = n->child2;
       
  1980 			if (c1 < c2)
       
  1981 			{
       
  1982 				r = &n->child1;
       
  1983 				n = c1;
       
  1984 			}
       
  1985 			else
       
  1986 			{
       
  1987 				r = &n->child2;
       
  1988 				n = c2;
       
  1989 			}
       
  1990 		}
       
  1991 	}
       
  1992 void* RNewAllocator::allocnewslab(slabset& allocator)
       
  1993 //
       
  1994 // Acquire and initialise a new slab, returning a cell from the slab
       
  1995 // The strategy is:
       
  1996 // 1. Use the lowest address free slab, if available. This is done by using the lowest slab
       
  1997 //    in the page at the root of the partial_page heap (which is address ordered). If the
       
  1998 //    is now fully used, remove it from the partial_page heap.
       
  1999 // 2. Allocate a new page for slabs if no empty slabs are available
       
  2000 //
       
  2001 {
       
  2002 	page* p = page::pagefor(partial_page);
       
  2003 	if (!p)
       
  2004 		return allocnewpage(allocator);
       
  2005 
       
  2006 	unsigned h = p->slabs[0].header;
       
  2007 	unsigned pagemap = header_pagemap(h);
       
  2008 	ASSERT(&p->slabs[hibit(pagemap)] == partial_page);
       
  2009 
       
  2010 	unsigned slabix = lowbit(pagemap);
       
  2011 	p->slabs[0].header = h &~ (0x100<<slabix);
       
  2012 	if (!(pagemap &~ (1<<slabix)))
       
  2013 	{
       
  2014 		tree_remove(partial_page);	// last free slab in page
       
  2015 	}
       
  2016 	return initnewslab(allocator,&p->slabs[slabix]);
       
  2017 }
       
  2018 
       
  2019 /**Defination of this functionis not there in proto code***/
       
  2020 #if 0
       
  2021 void RNewAllocator::partial_insert(slab* s)
       
  2022 	{
       
  2023 		// slab has had first cell freed and needs to be linked back into partial tree
       
  2024 		slabset& ss = slaballoc[sizemap[s->clz]];
       
  2025 
       
  2026 		ASSERT(s->used == slabfull);
       
  2027 		s->used = ss.fulluse - s->clz;		// full-1 loading
       
  2028 		tree_insert(s,&ss.partial);
       
  2029 		checktree(ss.partial);
       
  2030 	}
       
  2031 /**Defination of this functionis not there in proto code***/
       
  2032 #endif
       
  2033 
       
  2034 void* RNewAllocator::allocnewpage(slabset& allocator)
       
  2035 //
       
  2036 // Acquire and initialise a new page, returning a cell from a new slab
       
  2037 // The partial_page tree is empty (otherwise we'd have used a slab from there)
       
  2038 // The partial_page link is put in the highest addressed slab in the page, and the
       
  2039 // lowest addressed slab is used to fulfill the allocation request
       
  2040 //
       
  2041 {
       
  2042 	page* p	 = spare_page;
       
  2043 	if (p)
       
  2044 		spare_page = 0;
       
  2045 	else
       
  2046 	{
       
  2047 		p = static_cast<page*>(map(0,pagesize));
       
  2048 		if (!p)
       
  2049 			return 0;
       
  2050 	}
       
  2051 	ASSERT(p == floor(p,pagesize));
       
  2052 	p->slabs[0].header = ((1<<3) + (1<<2) + (1<<1))<<8;		// set pagemap
       
  2053 	p->slabs[3].parent = &partial_page;
       
  2054 	p->slabs[3].child1 = p->slabs[3].child2 = 0;
       
  2055 	partial_page = &p->slabs[3];
       
  2056 	return initnewslab(allocator,&p->slabs[0]);
       
  2057 }
       
  2058 
       
  2059 void RNewAllocator::freepage(page* p)
       
  2060 //
       
  2061 // Release an unused page to the OS
       
  2062 // A single page is cached for reuse to reduce thrashing
       
  2063 // the OS allocator.
       
  2064 //
       
  2065 {
       
  2066 	ASSERT(ceiling(p,pagesize) == p);
       
  2067 	if (!spare_page)
       
  2068 	{
       
  2069 		spare_page = p;
       
  2070 		return;
       
  2071 	}
       
  2072 	unmap(p,pagesize);
       
  2073 }
       
  2074 
       
  2075 void RNewAllocator::freeslab(slab* s)
       
  2076 //
       
  2077 // Release an empty slab to the slab manager
       
  2078 // The strategy is:
       
  2079 // 1. The page containing the slab is checked to see the state of the other slabs in the page by
       
  2080 //    inspecting the pagemap field in the header of the first slab in the page.
       
  2081 // 2. The pagemap is updated to indicate the new unused slab
       
  2082 // 3. If this is the only unused slab in the page then the slab header is used to add the page to
       
  2083 //    the partial_page tree/heap
       
  2084 // 4. If all the slabs in the page are now unused the page is release back to the OS
       
  2085 // 5. If this slab has a higher address than the one currently used to track this page in
       
  2086 //    the partial_page heap, the linkage is moved to the new unused slab
       
  2087 //
       
  2088 {
       
  2089 	tree_remove(s);
       
  2090 	checktree(*s->parent);
       
  2091 	ASSERT(header_usedm4(s->header) == header_size(s->header)-4);
       
  2092 	CHECK(s->header |= 0xFF00000);			// illegal value for debug purposes
       
  2093 	page* p = page::pagefor(s);
       
  2094 	unsigned h = p->slabs[0].header;
       
  2095 	int slabix = s - &p->slabs[0];
       
  2096 	unsigned pagemap = header_pagemap(h);
       
  2097 	p->slabs[0].header = h | (0x100<<slabix);
       
  2098 	if (pagemap == 0)
       
  2099 	{	// page was full before, use this slab as link in empty heap
       
  2100 		tree_insert(s, &partial_page);
       
  2101 	}
       
  2102 	else
       
  2103 	{	// find the current empty-link slab
       
  2104 		slab* sl = &p->slabs[hibit(pagemap)];
       
  2105 		pagemap ^= (1<<slabix);
       
  2106 		if (pagemap == 0xf)
       
  2107 		{	// page is now empty so recycle page to os
       
  2108 			tree_remove(sl);
       
  2109 			freepage(p);
       
  2110 			return;
       
  2111 		}
       
  2112 		// ensure the free list link is in highest address slab in page
       
  2113 		if (s > sl)
       
  2114 		{	// replace current link with new one. Address-order tree so position stays the same
       
  2115 			slab** r = sl->parent;
       
  2116 			slab* c1 = sl->child1;
       
  2117 			slab* c2 = sl->child2;
       
  2118 			s->parent = r;
       
  2119 			s->child1 = c1;
       
  2120 			s->child2 = c2;
       
  2121 			*r = s;
       
  2122 			if (c1)
       
  2123 				c1->parent = &s->child1;
       
  2124 			if (c2)
       
  2125 				c2->parent = &s->child2;
       
  2126 		}
       
  2127 		CHECK(if (s < sl) s=sl);
       
  2128 	}
       
  2129 	ASSERT(header_pagemap(p->slabs[0].header) != 0);
       
  2130 	ASSERT(hibit(header_pagemap(p->slabs[0].header)) == unsigned(s - &p->slabs[0]));
       
  2131 }
       
  2132 
       
  2133 void RNewAllocator::slab_init()
       
  2134 {
       
  2135 	slab_threshold=0;
       
  2136 	partial_page = 0;
       
  2137 	spare_page = 0;
       
  2138 	memset(&sizemap[0],0xff,sizeof(sizemap));
       
  2139 	memset(&slaballoc[0],0,sizeof(slaballoc));
       
  2140 }
       
  2141 
       
  2142 void RNewAllocator::slab_config(unsigned slabbitmap)
       
  2143 {
       
  2144 	ASSERT((slabbitmap & ~okbits) == 0);
       
  2145 	ASSERT(maxslabsize <= 60);
       
  2146 
       
  2147 	unsigned char ix = 0xff;
       
  2148 	unsigned bit = 1<<((maxslabsize>>2)-1);
       
  2149 	for (int sz = maxslabsize; sz >= 0; sz -= 4, bit >>= 1)
       
  2150 	{
       
  2151 		if (slabbitmap & bit)
       
  2152 		{
       
  2153 			if (ix == 0xff)
       
  2154 				slab_threshold=sz+1;
       
  2155 			ix = (sz>>2)-1;
       
  2156 		}
       
  2157 		sizemap[sz>>2] = ix;
       
  2158 	}
       
  2159 }
       
  2160 
       
  2161 void* RNewAllocator::slab_allocate(slabset& ss)
       
  2162 //
       
  2163 // Allocate a cell from the given slabset
       
  2164 // Strategy:
       
  2165 // 1. Take the partially full slab at the top of the heap (lowest address).
       
  2166 // 2. If there is no such slab, allocate from a new slab
       
  2167 // 3. If the slab has a non-empty freelist, pop the cell from the front of the list and update the slab
       
  2168 // 4. Otherwise, if the slab is not full, return the cell at the end of the currently used region of
       
  2169 //    the slab, updating the slab
       
  2170 // 5. Otherwise, release the slab from the partial tree/heap, marking it as 'floating' and go back to
       
  2171 //    step 1
       
  2172 //
       
  2173 {
       
  2174 	for (;;)
       
  2175 	{
       
  2176 		slab *s = ss.partial;
       
  2177 		if (!s)
       
  2178 			break;
       
  2179 		unsigned h = s->header;
       
  2180 		unsigned free = h & 0xff;		// extract free cell positiong
       
  2181 		if (free)
       
  2182 		{
       
  2183 			ASSERT(((free<<2)-sizeof(slabhdr))%header_size(h) == 0);
       
  2184 			void* p = offset(s,free<<2);
       
  2185 			free = *(unsigned char*)p;	// get next pos in free list
       
  2186 			h += (h&0x3C000)<<6;		// update usedm4
       
  2187 			h &= ~0xff;
       
  2188 			h |= free;					// update freelist
       
  2189 			s->header = h;
       
  2190 			ASSERT(header_free(h) == 0 || ((header_free(h)<<2)-sizeof(slabhdr))%header_size(h) == 0);
       
  2191 			ASSERT(header_usedm4(h) <= 0x3F8u);
       
  2192 			ASSERT((header_usedm4(h)+4)%header_size(h) == 0);
       
  2193 			return p;
       
  2194 		}
       
  2195 		unsigned h2 = h + ((h&0x3C000)<<6);
       
  2196 		if (h2 < 0xfc00000)
       
  2197 		{
       
  2198 			ASSERT((header_usedm4(h2)+4)%header_size(h2) == 0);
       
  2199 			s->header = h2;
       
  2200 			return offset(s,(h>>18) + sizeof(unsigned) + sizeof(slabhdr));
       
  2201 		}
       
  2202 		h |= 0x80000000;				// mark the slab as full-floating
       
  2203 		s->header = h;
       
  2204 		tree_remove(s);
       
  2205 		checktree(ss.partial);
       
  2206 		// go back and try the next slab...
       
  2207 	}
       
  2208 	// no partial slabs found, so allocate from a new slab
       
  2209 	return allocnewslab(ss);
       
  2210 }
       
  2211 
       
  2212 void RNewAllocator::slab_free(void* p)
       
  2213 //
       
  2214 // Free a cell from the slab allocator
       
  2215 // Strategy:
       
  2216 // 1. Find the containing slab (round down to nearest 1KB boundary)
       
  2217 // 2. Push the cell into the slab's freelist, and update the slab usage count
       
  2218 // 3. If this is the last allocated cell, free the slab to the main slab manager
       
  2219 // 4. If the slab was full-floating then insert the slab in it's respective partial tree
       
  2220 //
       
  2221 {
       
  2222 	ASSERT(lowbits(p,3)==0);
       
  2223 	slab* s = slab::slabfor(p);
       
  2224 
       
  2225 	unsigned pos = lowbits(p, slabsize);
       
  2226 	unsigned h = s->header;
       
  2227 	ASSERT(header_usedm4(h) != 0x3fC);		// slab is empty already
       
  2228 	ASSERT((pos-sizeof(slabhdr))%header_size(h) == 0);
       
  2229 	*(unsigned char*)p = (unsigned char)h;
       
  2230 	h &= ~0xFF;
       
  2231 	h |= (pos>>2);
       
  2232 	unsigned size = h & 0x3C000;
       
  2233 	iTotalAllocSize -= size;		// TODO DAN
       
  2234 	if (int(h) >= 0)
       
  2235 	{
       
  2236 		h -= size<<6;
       
  2237 		if (int(h)>=0)
       
  2238 		{
       
  2239 			s->header = h;
       
  2240 			return;
       
  2241 		}
       
  2242 		freeslab(s);
       
  2243 		return;
       
  2244 	}
       
  2245 	h -= size<<6;
       
  2246 	h &= ~0x80000000;
       
  2247 	s->header = h;
       
  2248 	slabset& ss = slaballoc[(size>>14)-1];
       
  2249 	tree_insert(s,&ss.partial);
       
  2250 	checktree(ss.partial);
       
  2251 }
       
  2252 
       
  2253 void* RNewAllocator::initnewslab(slabset& allocator, slab* s)
       
  2254 //
       
  2255 // initialise an empty slab for this allocator and return the fist cell
       
  2256 // pre-condition: the slabset has no partial slabs for allocation
       
  2257 //
       
  2258 {
       
  2259 	ASSERT(allocator.partial==0);
       
  2260 	TInt size = 4 + ((&allocator-&slaballoc[0])<<2);	// infer size from slab allocator address
       
  2261 	unsigned h = s->header & 0xF00;	// preserve pagemap only
       
  2262 	h |= (size<<12);					// set size
       
  2263 	h |= (size-4)<<18;					// set usedminus4 to one object minus 4
       
  2264 	s->header = h;
       
  2265 	allocator.partial = s;
       
  2266 	s->parent = &allocator.partial;
       
  2267 	s->child1 = s->child2 = 0;
       
  2268 	return offset(s,sizeof(slabhdr));
       
  2269 }
       
  2270 
       
  2271 TAny* RNewAllocator::SetBrk(TInt32 aDelta)
       
  2272 {
       
  2273 	if (iFlags & EFixedSize)
       
  2274 		return MFAIL;
       
  2275 
       
  2276 	if (aDelta < 0)
       
  2277 		{
       
  2278 		unmap(offset(iTop, aDelta), -aDelta);
       
  2279 		}
       
  2280 	else if (aDelta > 0)
       
  2281 		{
       
  2282 		if (!map(iTop, aDelta))
       
  2283 			return MFAIL;
       
  2284 		}
       
  2285 	void * p =iTop;
       
  2286 	iTop = offset(iTop, aDelta);
       
  2287 	return p;
       
  2288 }
       
  2289 
       
  2290 void* RNewAllocator::map(void* p,unsigned sz)
       
  2291 //
       
  2292 // allocate pages in the chunk
       
  2293 // if p is NULL, find an allocate the required number of pages (which must lie in the lower half)
       
  2294 // otherwise commit the pages specified
       
  2295 //
       
  2296 {
       
  2297 ASSERT(p == floor(p, pagesize));
       
  2298 ASSERT(sz == ceiling(sz, pagesize));
       
  2299 ASSERT(sz > 0);
       
  2300 
       
  2301 	if (iChunkSize + sz > iMaxLength)
       
  2302 		return 0;
       
  2303 
       
  2304 	RChunk chunk;
       
  2305 	chunk.SetHandle(iChunkHandle);
       
  2306 	if (p)
       
  2307 	{
       
  2308 		TInt r = chunk.Commit(iOffset + ptrdiff(p, this),sz);
       
  2309 		if (r < 0)
       
  2310 			return 0;
       
  2311 		//ASSERT(p = offset(this, r - iOffset));
       
  2312 	}
       
  2313 	else
       
  2314 	{
       
  2315 		TInt r = chunk.Allocate(sz);
       
  2316 		if (r < 0)
       
  2317 			return 0;
       
  2318 		if (r > iOffset)
       
  2319 		{
       
  2320 			// can't allow page allocations in DL zone
       
  2321 			chunk.Decommit(r, sz);
       
  2322 			return 0;
       
  2323 		}
       
  2324 		p = offset(this, r - iOffset);
       
  2325 	}
       
  2326 	iChunkSize += sz;
       
  2327 #ifdef TRACING_HEAPS
       
  2328 	if(iChunkSize > iHighWaterMark)
       
  2329 		{
       
  2330 			iHighWaterMark = ceiling(iChunkSize,16*pagesize);
       
  2331 
       
  2332 
       
  2333 			RChunk chunk;
       
  2334 			chunk.SetHandle(iChunkHandle);
       
  2335 			TKName chunk_name;
       
  2336 			chunk.FullName(chunk_name);
       
  2337 			BTraceContextBig(BTrace::ETest1, 4, 44, chunk_name.Ptr(), chunk_name.Size());
       
  2338 
       
  2339 			TUint32 traceData[6];
       
  2340 			traceData[0] = iChunkHandle;
       
  2341 			traceData[1] = iMinLength;
       
  2342 			traceData[2] = iMaxLength;
       
  2343 			traceData[3] = sz;
       
  2344 			traceData[4] = iChunkSize;
       
  2345 			traceData[5] = iHighWaterMark;
       
  2346 			BTraceContextN(BTrace::ETest1, 3, (TUint32)this, 33, traceData, sizeof(traceData));
       
  2347 		}
       
  2348 #endif
       
  2349 	if (iChunkSize >= slab_init_threshold)
       
  2350 	{	// set up slab system now that heap is large enough
       
  2351 		slab_config(slab_config_bits);
       
  2352 		slab_init_threshold = KMaxTUint;
       
  2353 	}
       
  2354 	return p;
       
  2355 }
       
  2356 
       
  2357 void* RNewAllocator::remap(void* p,unsigned oldsz,unsigned sz)
       
  2358 {
       
  2359 	if (oldsz > sz)
       
  2360 		{	// shrink
       
  2361 		unmap(offset(p,sz), oldsz-sz);
       
  2362 		}
       
  2363 	else if (oldsz < sz)
       
  2364 		{	// grow, try and do this in place first
       
  2365 		if (!map(offset(p, oldsz), sz-oldsz))
       
  2366 			{
       
  2367 			// need to allocate-copy-free
       
  2368 			void* newp = map(0, sz);
       
  2369 			memcpy(newp, p, oldsz);
       
  2370 			unmap(p,oldsz);
       
  2371 			return newp;
       
  2372 			}
       
  2373 		}
       
  2374 	return p;
       
  2375 }
       
  2376 
       
  2377 void RNewAllocator::unmap(void* p,unsigned sz)
       
  2378 {
       
  2379 	ASSERT(p == floor(p, pagesize));
       
  2380 	ASSERT(sz == ceiling(sz, pagesize));
       
  2381 	ASSERT(sz > 0);
       
  2382 
       
  2383 	RChunk chunk;
       
  2384 	chunk.SetHandle(iChunkHandle);
       
  2385 	TInt r = chunk.Decommit(ptrdiff(p, offset(this,-iOffset)), sz);
       
  2386 	//TInt offset = (TUint8*)p-(TUint8*)chunk.Base();
       
  2387 	//TInt r = chunk.Decommit(offset,sz);
       
  2388 
       
  2389 	ASSERT(r >= 0);
       
  2390 	iChunkSize -= sz;
       
  2391 }
       
  2392 
       
  2393 void RNewAllocator::paged_init(unsigned pagepower)
       
  2394 	{
       
  2395 		if (pagepower == 0)
       
  2396 			pagepower = 31;
       
  2397 		else if (pagepower < minpagepower)
       
  2398 			pagepower = minpagepower;
       
  2399 		page_threshold = pagepower;
       
  2400 		for (int i=0;i<npagecells;++i)
       
  2401 		{
       
  2402 			pagelist[i].page = 0;
       
  2403 			pagelist[i].size = 0;
       
  2404 		}
       
  2405 	}
       
  2406 
       
  2407 void* RNewAllocator::paged_allocate(unsigned size)
       
  2408 {
       
  2409 	unsigned nbytes = ceiling(size, pagesize);
       
  2410 	if (nbytes < size + cellalign)
       
  2411 	{	// not enough extra space for header and alignment, try and use cell list
       
  2412 		for (pagecell *c = pagelist,*e = c + npagecells;c < e;++c)
       
  2413 			if (c->page == 0)
       
  2414 			{
       
  2415 				void* p = map(0, nbytes);
       
  2416 				if (!p)
       
  2417 					return 0;
       
  2418 				c->page = p;
       
  2419 				c->size = nbytes;
       
  2420 				return p;
       
  2421 			}
       
  2422 	}
       
  2423 	// use a cell header
       
  2424 	nbytes = ceiling(size + cellalign, pagesize);
       
  2425 	void* p = map(0, nbytes);
       
  2426 	if (!p)
       
  2427 		return 0;
       
  2428 	*static_cast<unsigned*>(p) = nbytes;
       
  2429 	return offset(p, cellalign);
       
  2430 }
       
  2431 
       
  2432 void* RNewAllocator::paged_reallocate(void* p, unsigned size)
       
  2433 {
       
  2434 	if (lowbits(p, pagesize) == 0)
       
  2435 	{	// continue using descriptor
       
  2436 		pagecell* c = paged_descriptor(p);
       
  2437 		unsigned nbytes = ceiling(size, pagesize);
       
  2438 		void* newp = remap(p, c->size, nbytes);
       
  2439 		if (!newp)
       
  2440 			return 0;
       
  2441 		c->page = newp;
       
  2442 		c->size = nbytes;
       
  2443 		return newp;
       
  2444 	}
       
  2445 	else
       
  2446 	{	// use a cell header
       
  2447 		ASSERT(lowbits(p,pagesize) == cellalign);
       
  2448 		p = offset(p,-int(cellalign));
       
  2449 		unsigned nbytes = ceiling(size + cellalign, pagesize);
       
  2450 		unsigned obytes = *static_cast<unsigned*>(p);
       
  2451 		void* newp = remap(p, obytes, nbytes);
       
  2452 		if (!newp)
       
  2453 			return 0;
       
  2454 		*static_cast<unsigned*>(newp) = nbytes;
       
  2455 		return offset(newp, cellalign);
       
  2456 	}
       
  2457 }
       
  2458 
       
  2459 void RNewAllocator::paged_free(void* p)
       
  2460 {
       
  2461 	if (lowbits(p,pagesize) == 0)
       
  2462 	{	// check pagelist
       
  2463 		pagecell* c = paged_descriptor(p);
       
  2464 
       
  2465 		iTotalAllocSize -= c->size;		// TODO DAN
       
  2466 
       
  2467 		unmap(p, c->size);
       
  2468 		c->page = 0;
       
  2469 		c->size = 0;
       
  2470 	}
       
  2471 	else
       
  2472 	{	// check page header
       
  2473 		unsigned* page = static_cast<unsigned*>(offset(p,-int(cellalign)));
       
  2474 		unsigned size = *page;
       
  2475 		unmap(page,size);
       
  2476 	}
       
  2477 }
       
  2478 
       
  2479 pagecell* RNewAllocator::paged_descriptor(const void* p) const
       
  2480 {
       
  2481 	ASSERT(lowbits(p,pagesize) == 0);
       
  2482 	// Double casting to keep the compiler happy. Seems to think we can trying to
       
  2483 	// change a non-const member (pagelist) in a const function
       
  2484 	pagecell* c = (pagecell*)((void*)pagelist);
       
  2485 	pagecell* e = c + npagecells;
       
  2486 	for (;;)
       
  2487 	{
       
  2488 		ASSERT(c!=e);
       
  2489 		if (c->page == p)
       
  2490 			return c;
       
  2491 		++c;
       
  2492 	}
       
  2493 }
       
  2494 
       
  2495 RNewAllocator* RNewAllocator::FixedHeap(TAny* aBase, TInt aMaxLength, TInt aAlign, TBool aSingleThread)
       
  2496 /**
       
  2497 Creates a fixed length heap at a specified location.
       
  2498 
       
  2499 On successful return from this function, aMaxLength bytes are committed by the chunk.
       
  2500 The heap cannot be extended.
       
  2501 
       
  2502 @param aBase         A pointer to the location where the heap is to be constructed.
       
  2503 @param aMaxLength    The length of the heap. If the supplied value is less
       
  2504                      than KMinHeapSize, it is discarded and the value KMinHeapSize
       
  2505                      is used instead.
       
  2506 @param aAlign        The alignment of heap cells.
       
  2507 @param aSingleThread Indicates whether single threaded or not.
       
  2508 
       
  2509 @return A pointer to the new heap, or NULL if the heap could not be created.
       
  2510 
       
  2511 @panic USER 56 if aMaxLength is negative.
       
  2512 */
       
  2513 //
       
  2514 // Force construction of the fixed memory.
       
  2515 //
       
  2516 	{
       
  2517 
       
  2518 	__ASSERT_ALWAYS(aMaxLength>=0, ::Panic(ETHeapMaxLengthNegative));
       
  2519 	if (aMaxLength<KMinHeapSize)
       
  2520 		aMaxLength=KMinHeapSize;
       
  2521 
       
  2522 	RNewAllocator* h = new(aBase) RNewAllocator(aMaxLength, aAlign, aSingleThread);
       
  2523 
       
  2524 	if (!aSingleThread)
       
  2525 		{
       
  2526 		TInt r = h->iLock.CreateLocal();
       
  2527 		if (r!=KErrNone)
       
  2528 			return NULL;
       
  2529 		h->iHandles = (TInt*)&h->iLock;
       
  2530 		h->iHandleCount = 1;
       
  2531 		}
       
  2532 	return h;
       
  2533 	}
       
  2534 
       
  2535 RNewAllocator* RNewAllocator::ChunkHeap(const TDesC* aName, TInt aMinLength, TInt aMaxLength, TInt aGrowBy, TInt aAlign, TBool aSingleThread)
       
  2536 /**
       
  2537 Creates a heap in a local or global chunk.
       
  2538 
       
  2539 The chunk hosting the heap can be local or global.
       
  2540 
       
  2541 A local chunk is one which is private to the process creating it and is not
       
  2542 intended for access by other user processes.
       
  2543 A global chunk is one which is visible to all processes.
       
  2544 
       
  2545 The hosting chunk is local, if the pointer aName is NULL, otherwise
       
  2546 the hosting chunk is global and the descriptor *aName is assumed to contain
       
  2547 the name to be assigned to it.
       
  2548 
       
  2549 Ownership of the host chunk is vested in the current process.
       
  2550 
       
  2551 A minimum and a maximum size for the heap can be specified. On successful
       
  2552 return from this function, the size of the heap is at least aMinLength.
       
  2553 If subsequent requests for allocation of memory from the heap cannot be
       
  2554 satisfied by compressing the heap, the size of the heap is extended in
       
  2555 increments of aGrowBy until the request can be satisfied. Attempts to extend
       
  2556 the heap causes the size of the host chunk to be adjusted.
       
  2557 
       
  2558 Note that the size of the heap cannot be adjusted by more than aMaxLength.
       
  2559 
       
  2560 @param aName         If NULL, the function constructs a local chunk to host
       
  2561                      the heap.
       
  2562                      If not NULL, a pointer to a descriptor containing the name
       
  2563                      to be assigned to the global chunk hosting the heap.
       
  2564 @param aMinLength    The minimum length of the heap.
       
  2565 @param aMaxLength    The maximum length to which the heap can grow.
       
  2566                      If the supplied value is less than KMinHeapSize, then it
       
  2567                      is discarded and the value KMinHeapSize used instead.
       
  2568 @param aGrowBy       The increments to the size of the host chunk. If a value is
       
  2569                      not explicitly specified, the value KMinHeapGrowBy is taken
       
  2570                      by default
       
  2571 @param aAlign        The alignment of heap cells.
       
  2572 @param aSingleThread Indicates whether single threaded or not.
       
  2573 
       
  2574 @return A pointer to the new heap or NULL if the heap could not be created.
       
  2575 
       
  2576 @panic USER 41 if aMinLength is greater than the supplied value of aMaxLength.
       
  2577 @panic USER 55 if aMinLength is negative.
       
  2578 @panic USER 56 if aMaxLength is negative.
       
  2579 */
       
  2580 //
       
  2581 // Allocate a Chunk of the requested size and force construction.
       
  2582 //
       
  2583 	{
       
  2584 
       
  2585 	__ASSERT_ALWAYS(aMinLength>=0, ::Panic(ETHeapMinLengthNegative));
       
  2586 	__ASSERT_ALWAYS(aMaxLength>=aMinLength, ::Panic(ETHeapCreateMaxLessThanMin));
       
  2587 	if (aMaxLength<KMinHeapSize)
       
  2588 		aMaxLength=KMinHeapSize;
       
  2589 	RChunk c;
       
  2590 	TInt r;
       
  2591 	if (aName)
       
  2592 		r = c.CreateDisconnectedGlobal(*aName, 0, 0, aMaxLength*2, aSingleThread ? EOwnerThread : EOwnerProcess);
       
  2593 	else
       
  2594 		r = c.CreateDisconnectedLocal(0, 0, aMaxLength*2, aSingleThread ? EOwnerThread : EOwnerProcess);
       
  2595 	if (r!=KErrNone)
       
  2596 		return NULL;
       
  2597 
       
  2598 	RNewAllocator* h = ChunkHeap(c, aMinLength, aGrowBy, aMaxLength, aAlign, aSingleThread, UserHeap::EChunkHeapDuplicate);
       
  2599 	c.Close();
       
  2600 	return h;
       
  2601 	}
       
  2602 
       
  2603 RNewAllocator* RNewAllocator::ChunkHeap(RChunk aChunk, TInt aMinLength, TInt aGrowBy, TInt aMaxLength, TInt aAlign, TBool aSingleThread, TUint32 aMode)
       
  2604 /**
       
  2605 Creates a heap in an existing chunk.
       
  2606 
       
  2607 This function is intended to be used to create a heap in a user writable code
       
  2608 chunk as created by a call to RChunk::CreateLocalCode().
       
  2609 This type of heap can be used to hold code fragments from a JIT compiler.
       
  2610 
       
  2611 The maximum length to which the heap can grow is the same as
       
  2612 the maximum size of the chunk.
       
  2613 
       
  2614 @param aChunk        The chunk that will host the heap.
       
  2615 @param aMinLength    The minimum length of the heap.
       
  2616 @param aGrowBy       The increments to the size of the host chunk.
       
  2617 @param aMaxLength    The maximum length to which the heap can grow.
       
  2618 @param aAlign        The alignment of heap cells.
       
  2619 @param aSingleThread Indicates whether single threaded or not.
       
  2620 @param aMode         Flags controlling the reallocation. The only bit which has any
       
  2621                      effect on reallocation is that defined by the enumeration
       
  2622                      ENeverMove of the enum RAllocator::TReAllocMode.
       
  2623                      If this is set, then any successful reallocation guarantees not
       
  2624                      to have changed the start address of the cell.
       
  2625                      By default, this parameter is zero.
       
  2626 
       
  2627 @return A pointer to the new heap or NULL if the heap could not be created.
       
  2628 */
       
  2629 //
       
  2630 // Construct a heap in an already existing chunk
       
  2631 //
       
  2632 	{
       
  2633 
       
  2634 	return OffsetChunkHeap(aChunk, aMinLength, 0, aGrowBy, aMaxLength, aAlign, aSingleThread, aMode);
       
  2635 	}
       
  2636 
       
  2637 RNewAllocator* RNewAllocator::OffsetChunkHeap(RChunk aChunk, TInt aMinLength, TInt aOffset, TInt aGrowBy, TInt aMaxLength, TInt aAlign, TBool aSingleThread, TUint32 aMode)
       
  2638 /**
       
  2639 Creates a heap in an existing chunk, offset from the beginning of the chunk.
       
  2640 
       
  2641 This function is intended to be used to create a heap where a fixed amount of
       
  2642 additional data must be stored at a known location. The additional data can be
       
  2643 placed at the base address of the chunk, allowing it to be located without
       
  2644 depending on the internals of the heap structure.
       
  2645 
       
  2646 The maximum length to which the heap can grow is the maximum size of the chunk,
       
  2647 minus the offset.
       
  2648 
       
  2649 @param aChunk        The chunk that will host the heap.
       
  2650 @param aMinLength    The minimum length of the heap.
       
  2651 @param aOffset       The offset from the start of the chunk, to the start of the heap.
       
  2652 @param aGrowBy       The increments to the size of the host chunk.
       
  2653 @param aMaxLength    The maximum length to which the heap can grow.
       
  2654 @param aAlign        The alignment of heap cells.
       
  2655 @param aSingleThread Indicates whether single threaded or not.
       
  2656 @param aMode         Flags controlling the reallocation. The only bit which has any
       
  2657                      effect on reallocation is that defined by the enumeration
       
  2658                      ENeverMove of the enum RAllocator::TReAllocMode.
       
  2659                      If this is set, then any successful reallocation guarantees not
       
  2660                      to have changed the start address of the cell.
       
  2661                      By default, this parameter is zero.
       
  2662 
       
  2663 @return A pointer to the new heap or NULL if the heap could not be created.
       
  2664 */
       
  2665 //
       
  2666 // Construct a heap in an already existing chunk
       
  2667 //
       
  2668 	{
       
  2669 
       
  2670 	TInt page_size;
       
  2671 	GET_PAGE_SIZE(page_size);
       
  2672 	if (!aAlign)
       
  2673 		aAlign = RNewAllocator::ECellAlignment;
       
  2674 	TInt maxLength = aChunk.MaxSize();
       
  2675 	TInt round_up = Max(aAlign, page_size);
       
  2676 	TInt min_cell = _ALIGN_UP(Max((TInt)RNewAllocator::EAllocCellSize, (TInt)RNewAllocator::EFreeCellSize), aAlign);
       
  2677 	aOffset = _ALIGN_UP(aOffset, 8);
       
  2678 
       
  2679 #ifdef ALLOCATOR_ADP75
       
  2680 #ifdef TRACING_HEAPS
       
  2681 	TKName chunk_name;
       
  2682 	aChunk.FullName(chunk_name);
       
  2683 	BTraceContextBig(BTrace::ETest1, 0xF, 0xFF, chunk_name.Ptr(), chunk_name.Size());
       
  2684 
       
  2685 	TUint32 traceData[4];
       
  2686 	traceData[0] = aChunk.Handle();
       
  2687 	traceData[1] = aMinLength;
       
  2688 	traceData[2] = aMaxLength;
       
  2689 	traceData[3] = aAlign;
       
  2690 	BTraceContextN(BTrace::ETest1, 0xE, 0xEE, 0xEE, traceData, sizeof(traceData));
       
  2691 #endif
       
  2692 	//modifying the aMinLength because not all memory is the same in the new allocator. So it cannot reserve it properly
       
  2693 	if( aMinLength<aMaxLength)
       
  2694 		aMinLength = 0;
       
  2695 #endif
       
  2696 
       
  2697 	if (aMaxLength && aMaxLength+aOffset<maxLength)
       
  2698 		maxLength = _ALIGN_UP(aMaxLength+aOffset, round_up);
       
  2699 	__ASSERT_ALWAYS(aMinLength>=0, ::Panic(ETHeapMinLengthNegative));
       
  2700 	__ASSERT_ALWAYS(maxLength>=aMinLength, ::Panic(ETHeapCreateMaxLessThanMin));
       
  2701 	aMinLength = _ALIGN_UP(Max(aMinLength, (TInt)sizeof(RNewAllocator) + min_cell) + aOffset, round_up);
       
  2702 
       
  2703 	// the new allocator uses a disconnected chunk so must commit the initial allocation
       
  2704 	// with Commit() instead of Adjust()
       
  2705 	//	TInt r=aChunk.Adjust(aMinLength);
       
  2706 	//TInt r = aChunk.Commit(aOffset, aMinLength);
       
  2707 
       
  2708 	aOffset = maxLength;
       
  2709 	//TInt MORE_CORE_OFFSET = maxLength/2;
       
  2710 	//TInt r = aChunk.Commit(MORE_CORE_OFFSET, aMinLength);
       
  2711 	TInt r = aChunk.Commit(aOffset, aMinLength);
       
  2712 
       
  2713 	if (r!=KErrNone)
       
  2714 		return NULL;
       
  2715 
       
  2716 	RNewAllocator* h = new (aChunk.Base() + aOffset) RNewAllocator(aChunk.Handle(), aOffset, aMinLength, maxLength, aGrowBy, aAlign, aSingleThread);
       
  2717 	//RNewAllocator* h = new (aChunk.Base() + MORE_CORE_OFFSET) RNewAllocator(aChunk.Handle(), aOffset, aMinLength, maxLength, aGrowBy, aAlign, aSingleThread);
       
  2718 
       
  2719 	TBool duplicateLock = EFalse;
       
  2720 	if (!aSingleThread)
       
  2721 		{
       
  2722 		duplicateLock = aMode & UserHeap::EChunkHeapSwitchTo;
       
  2723 		if(h->iLock.CreateLocal(duplicateLock ? EOwnerThread : EOwnerProcess)!=KErrNone)
       
  2724 			{
       
  2725 			h->iChunkHandle = 0;
       
  2726 			return NULL;
       
  2727 			}
       
  2728 		}
       
  2729 
       
  2730 	if (aMode & UserHeap::EChunkHeapSwitchTo)
       
  2731 		User::SwitchHeap(h);
       
  2732 
       
  2733 	h->iHandles = &h->iChunkHandle;
       
  2734 	if (!aSingleThread)
       
  2735 		{
       
  2736 		// now change the thread-relative chunk/semaphore handles into process-relative handles
       
  2737 		h->iHandleCount = 2;
       
  2738 		if(duplicateLock)
       
  2739 			{
       
  2740 			RHandleBase s = h->iLock;
       
  2741 			r = h->iLock.Duplicate(RThread());
       
  2742 			s.Close();
       
  2743 			}
       
  2744 		if (r==KErrNone && (aMode & UserHeap::EChunkHeapDuplicate))
       
  2745 			{
       
  2746 			r = ((RChunk*)&h->iChunkHandle)->Duplicate(RThread());
       
  2747 			if (r!=KErrNone)
       
  2748 				h->iLock.Close(), h->iChunkHandle=0;
       
  2749 			}
       
  2750 		}
       
  2751 	else
       
  2752 		{
       
  2753 		h->iHandleCount = 1;
       
  2754 		if (aMode & UserHeap::EChunkHeapDuplicate)
       
  2755 			r = ((RChunk*)&h->iChunkHandle)->Duplicate(RThread(), EOwnerThread);
       
  2756 		}
       
  2757 
       
  2758 	// return the heap address
       
  2759 	return (r==KErrNone) ? h : NULL;
       
  2760 	}
       
  2761 
       
  2762 
       
  2763 #define UserTestDebugMaskBit(bit) (TBool)(UserSvr::DebugMask(bit>>5) & (1<<(bit&31)))
       
  2764 
       
  2765 // Hack to get access to TChunkCreateInfo internals outside of the kernel
       
  2766 class TFakeChunkCreateInfo: public TChunkCreateInfo
       
  2767 	{
       
  2768 public:
       
  2769 	 void SetThreadNewAllocator(TInt aInitialSize, TInt aMaxSize, const TDesC& aName)
       
  2770 	 	{
       
  2771 		iType = TChunkCreate::ENormal | TChunkCreate::EDisconnected | TChunkCreate::EData;
       
  2772 		iMaxSize = aMaxSize * 2;
       
  2773 
       
  2774 	 	iInitialBottom = 0;
       
  2775 	 	iInitialTop = aInitialSize;
       
  2776 	 	iAttributes = TChunkCreate::ELocalNamed;
       
  2777 	 	iName = &aName;
       
  2778 	 	iOwnerType = EOwnerThread;
       
  2779 	 	}
       
  2780 	};
       
  2781 
       
  2782 _LIT(KLitDollarHeap,"$HEAP");
       
  2783 TInt RNewAllocator::CreateThreadHeap(SStdEpocThreadCreateInfo& aInfo, RNewAllocator*& aHeap, TInt aAlign, TBool aSingleThread)
       
  2784 /**
       
  2785 @internalComponent
       
  2786 */
       
  2787 //
       
  2788 // Create a user-side heap
       
  2789 //
       
  2790 	{
       
  2791 	TInt page_size;
       
  2792 	GET_PAGE_SIZE(page_size);
       
  2793 	TInt minLength = _ALIGN_UP(aInfo.iHeapInitialSize, page_size);
       
  2794 	TInt maxLength = Max(aInfo.iHeapMaxSize, minLength);
       
  2795 	if (UserTestDebugMaskBit(96)) // 96 == KUSERHEAPTRACE in nk_trace.h
       
  2796 		aInfo.iFlags |= ETraceHeapAllocs;
       
  2797 	// Create the thread's heap chunk.
       
  2798 	RChunk c;
       
  2799 	TFakeChunkCreateInfo createInfo;
       
  2800 	createInfo.SetThreadNewAllocator(0, maxLength, KLitDollarHeap());	// Initialise with no memory committed.
       
  2801 	TInt r = c.Create(createInfo);
       
  2802 	if (r!=KErrNone)
       
  2803 		return r;
       
  2804 	aHeap = ChunkHeap(c, minLength, page_size, maxLength, aAlign, aSingleThread, UserHeap::EChunkHeapSwitchTo|UserHeap::EChunkHeapDuplicate);
       
  2805 	c.Close();
       
  2806 	if (!aHeap)
       
  2807 		return KErrNoMemory;
       
  2808 	if (aInfo.iFlags & ETraceHeapAllocs)
       
  2809 		{
       
  2810 		aHeap->iFlags |= RAllocator::ETraceAllocs;
       
  2811 		BTraceContext8(BTrace::EHeap, BTrace::EHeapCreate,(TUint32)aHeap, RNewAllocator::EAllocCellSize);
       
  2812 		TInt handle = aHeap->ChunkHandle();
       
  2813 		TInt chunkId = ((RHandleBase&)handle).BTraceId();
       
  2814 		BTraceContext8(BTrace::EHeap, BTrace::EHeapChunkCreate, (TUint32)aHeap, chunkId);
       
  2815 		}
       
  2816 	return KErrNone;
       
  2817 	}
       
  2818 
       
  2819 TInt UserHeap::SetupThreadHeap(TBool, SStdEpocThreadCreateInfo& aInfo)
       
  2820 /**
       
  2821 @internalComponent
       
  2822 */
       
  2823     {
       
  2824     TInt r = KErrNone;
       
  2825     if (!aInfo.iAllocator && aInfo.iHeapInitialSize>0)
       
  2826         {
       
  2827         // new heap required
       
  2828         RNewAllocator* pH = NULL;
       
  2829         r = RNewAllocator::CreateThreadHeap(aInfo, pH);
       
  2830         }
       
  2831     else if (aInfo.iAllocator)
       
  2832         {
       
  2833         // sharing a heap
       
  2834         RAllocator* pA = aInfo.iAllocator;
       
  2835         pA->Open();
       
  2836         User::SwitchAllocator(pA);
       
  2837         }
       
  2838     return r;
       
  2839     }
       
  2840 
       
  2841 #ifndef __WINS__
       
  2842 #pragma pop
       
  2843 #endif