kernel/eka/memmodel/epoc/flexible/mmu/mslaballoc.cpp
changeset 0 a41df078684a
equal deleted inserted replaced
-1:000000000000 0:a41df078684a
       
     1 // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
       
     2 // All rights reserved.
       
     3 // This component and the accompanying materials are made available
       
     4 // under the terms of the License "Eclipse Public License v1.0"
       
     5 // which accompanies this distribution, and is available
       
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
       
     7 //
       
     8 // Initial Contributors:
       
     9 // Nokia Corporation - initial contribution.
       
    10 //
       
    11 // Contributors:
       
    12 //
       
    13 // Description:
       
    14 //
       
    15 
       
    16 #include <plat_priv.h>
       
    17 #include "mm.h"
       
    18 #include "mmu.h"
       
    19 
       
    20 #include "mslaballoc.h"
       
    21 
       
    22 
       
    23 
       
    24 //
       
    25 // RSlabAllocatorBase
       
    26 //
       
    27 
       
    28 RSlabAllocatorBase::RSlabAllocatorBase(TBool aDelayedCleanup)
       
    29 	:	iFreeCount(0), iReserveCount(0),
       
    30 		iSpinLock(TSpinLock::EOrderGenericIrqHigh3),
       
    31 		iDelayedCleanup(aDelayedCleanup), iSlabMap(0), iMemory(0), iMapping(0)
       
    32 	{
       
    33 	}
       
    34 
       
    35 
       
    36 RSlabAllocatorBase::~RSlabAllocatorBase()
       
    37 	{
       
    38 	delete iSlabMap;
       
    39 	MM::MappingDestroy(iMapping);
       
    40 	MM::MemoryDestroy(iMemory);
       
    41 	}
       
    42 
       
    43 
       
    44 TInt RSlabAllocatorBase::Construct(TUint aMaxSlabs, TUint aObjectSize)
       
    45 	{
       
    46 	return Construct(aMaxSlabs,aObjectSize,0);
       
    47 	}
       
    48 
       
    49 
       
    50 TInt RSlabAllocatorBase::Construct(TUint aMaxSlabs, TUint aObjectSize, TLinAddr aBase)
       
    51 	{
       
    52 	TRACE2(("RSlabAllocatorBase::Construct(0x%08x,0x%08x,0x%08x)",aMaxSlabs,aObjectSize,aBase));
       
    53 
       
    54 	// set sizes...
       
    55 	iObjectSize = aObjectSize;
       
    56 	iObjectsPerSlab = ((KPageSize-sizeof(TSlabHeader))/aObjectSize);
       
    57 
       
    58 	// sanity check arguments...
       
    59 	__NK_ASSERT_DEBUG(iObjectsPerSlab);
       
    60 	__NK_ASSERT_DEBUG(aObjectSize>=sizeof(SDblQueLink));
       
    61 	__NK_ASSERT_DEBUG(aObjectSize%sizeof(TAny*)==0);
       
    62 
       
    63 	// construct bitmap for slabs...
       
    64 	iSlabMap = TBitMapAllocator::New(aMaxSlabs,ETrue);
       
    65 	if(!iSlabMap)
       
    66 		return KErrNoMemory;
       
    67 
       
    68 	if(aBase)
       
    69 		{
       
    70 		// setup base address, we expect one slab to already be mapped at this address...
       
    71 		iBase = aBase;
       
    72 
       
    73 		// initialise first slab...
       
    74 		iSlabMap->Alloc(0,1);
       
    75 		InitSlab(iBase);
       
    76 		}
       
    77 	else
       
    78 		{
       
    79 		// construct memory object for slabs...
       
    80 		__NK_ASSERT_DEBUG(!iMemory);
       
    81 		TInt r = MM::MemoryNew(iMemory,EMemoryObjectUnpaged,aMaxSlabs);
       
    82 		if(r!=KErrNone)
       
    83 			return r;
       
    84 
       
    85 		// construct memory mapping for slabs...
       
    86 		r = MM::MappingNew(iMapping,iMemory,ESupervisorReadWrite,KKernelOsAsid);
       
    87 		if(r!=KErrNone)
       
    88 			return r;
       
    89 
       
    90 		// setup base address...
       
    91 		iBase = MM::MappingBase(iMapping);
       
    92 		}
       
    93 
       
    94 	// done...
       
    95 	return KErrNone;
       
    96 	}
       
    97 
       
    98 
       
    99 TAny* RSlabAllocatorBase::Alloc()
       
   100 	{
       
   101 #ifdef _DEBUG
       
   102 	RamAllocLock::Lock();
       
   103 	TBool fail = K::CheckForSimulatedAllocFail();
       
   104 	RamAllocLock::Unlock();
       
   105 	if(fail)
       
   106 		return 0;
       
   107 #endif
       
   108 	__SPIN_LOCK_IRQ(iSpinLock);
       
   109 
       
   110 	// check if we need to allocate a new slab...
       
   111 	if(iFreeCount<=iReserveCount && !NewSlab())
       
   112 		{
       
   113 		__SPIN_UNLOCK_IRQ(iSpinLock);
       
   114 		return 0;
       
   115 		}
       
   116 
       
   117 	// get a slab with unused objects...
       
   118 	TSlabHeader* slab = (TSlabHeader*)iFreeList.iA.iNext;
       
   119 	__NK_ASSERT_DEBUG(slab!=(TSlabHeader*)&iFreeList.iA.iNext);
       
   120 #ifdef _DEBUG
       
   121 	CheckSlab(slab);
       
   122 #endif
       
   123 
       
   124 	// get object from slab...
       
   125 	SDblQueLink* object = (SDblQueLink*)slab->iFreeList.iA.iNext;
       
   126 	TRACE2(("RSlabAllocatorBase::Alloc got 0x%08x",object));
       
   127 	object->Deque();
       
   128 	__NK_ASSERT_DEBUG(slab->iAllocCount<iObjectsPerSlab);
       
   129 	++slab->iAllocCount;
       
   130 	--iFreeCount;
       
   131 
       
   132 	// see if there are uninitialised free objects after the one just allocated...
       
   133 	if(slab->iHighWaterMark==object)
       
   134 		{
       
   135 		SDblQueLink* nextFree = (SDblQueLink*)((TLinAddr)object+iObjectSize);
       
   136 		if((TAny*)((TLinAddr)nextFree+iObjectSize)<=slab)
       
   137 			{
       
   138 			slab->iHighWaterMark = nextFree;
       
   139 			slab->iFreeList.Add(nextFree);
       
   140 			}
       
   141 		}
       
   142 
       
   143 	// if slab has no more free objects, remove it from the free list...
       
   144 	if(slab->iFreeList.iA.iNext==&slab->iFreeList.iA)
       
   145 		slab->Deque();
       
   146 
       
   147 	__SPIN_UNLOCK_IRQ(iSpinLock);
       
   148 
       
   149 	return object;
       
   150 	}
       
   151 
       
   152 
       
   153 void RSlabAllocatorBase::Free(TAny* aObject)
       
   154 	{
       
   155 	TRACE2(("RSlabAllocatorBase::Free(0x%08x)",aObject));
       
   156 
       
   157 	if(!aObject)
       
   158 		{
       
   159 		// nothing to do
       
   160 		return; 
       
   161 		}
       
   162 
       
   163 	__SPIN_LOCK_IRQ(iSpinLock);
       
   164 
       
   165 	// check object address is valid...
       
   166 	__NK_ASSERT_DEBUG((TLinAddr)aObject-iBase < iSlabMap->iSize*(TLinAddr)KPageSize); // in range
       
   167 	__NK_ASSERT_DEBUG(((TLinAddr)aObject&KPageMask)%iObjectSize==0); // aligned correctly
       
   168 	__NK_ASSERT_DEBUG(((TLinAddr)aObject&KPageMask)<iObjectSize*iObjectsPerSlab); // in slab
       
   169 
       
   170 	// get slab for object...
       
   171 	TSlabHeader* slab = (TSlabHeader*)(((TLinAddr)aObject|KPageMask)+1)-1;
       
   172 #ifdef _DEBUG
       
   173 	CheckSlab(slab);
       
   174 #endif
       
   175 
       
   176 	// if slab didn't previously have any free objects, add it to the free list...
       
   177 	if(slab->iFreeList.iA.iNext==&slab->iFreeList.iA)
       
   178 		iFreeList.AddHead(slab);
       
   179 
       
   180 	// add object to slab's free list...
       
   181 	slab->iFreeList.AddHead((SDblQueLink*)(TAny*)aObject);
       
   182 	TUint allocCount = --slab->iAllocCount;
       
   183 	__NK_ASSERT_DEBUG(allocCount<iObjectsPerSlab);
       
   184 	++iFreeCount;
       
   185 
       
   186 	if(!allocCount)
       
   187 		{
       
   188 		// if slab is empty, put it on end of free list...
       
   189 		slab->Deque();
       
   190 		iFreeList.Add(slab);
       
   191 		}
       
   192 	else
       
   193 		{
       
   194 		// migrate slab to try and keep fuller slabs near the free list start...
       
   195 		TSlabHeader* nextSlab = (TSlabHeader*)slab->iNext;
       
   196 		if(nextSlab!=(SDblQueLink*)&iFreeList && allocCount<=nextSlab->iAllocCount)
       
   197 			{
       
   198 			slab->Deque();
       
   199 			slab->InsertAfter(nextSlab);
       
   200 			}
       
   201 		}
       
   202 
       
   203 #ifdef _DEBUG
       
   204 	CheckSlab(slab);
       
   205 #endif
       
   206 
       
   207 	// check for spare empty slab...
       
   208 	TSlabHeader* lastSlab = (TSlabHeader*)iFreeList.iA.iPrev;
       
   209 	if(lastSlab->iNext!=lastSlab->iPrev && lastSlab->iAllocCount==0) // not only slab and it's empty...
       
   210 		{
       
   211 		// free up slab...
       
   212 		if(!iDelayedCleanup)
       
   213 			{
       
   214 			// free up slab now, (this also relinquishes iSpinLock)...
       
   215 			FreeSlab(lastSlab);
       
   216 			}
       
   217 		else
       
   218 			{
       
   219 			// queue later cleanup...
       
   220 			__SPIN_UNLOCK_IRQ(iSpinLock);
       
   221 			iCleanup.Add(CleanupTrampoline,this);
       
   222 			}
       
   223 		}
       
   224 	else
       
   225 		{
       
   226 		__SPIN_UNLOCK_IRQ(iSpinLock);
       
   227 		}
       
   228 	}
       
   229 
       
   230 
       
   231 #ifdef _DEBUG
       
   232 
       
   233 void RSlabAllocatorBase::CheckSlab(TSlabHeader* aSlab)
       
   234 	{
       
   235 //	Kern::Printf("CheckSlab %x %x %d",aSlab,aSlab->iHighWaterMark,aSlab->iAllocCount);
       
   236 	TAny* base = (TAny*)((TLinAddr)aSlab&~KPageMask);
       
   237 	SDblQueLink* o = aSlab->iFreeList.First();
       
   238 	TUint max = ((TLinAddr)aSlab->iHighWaterMark-(TLinAddr)base)/iObjectSize+1;
       
   239 	__NK_ASSERT_DEBUG(aSlab->iAllocCount<=max);
       
   240 	__NK_ASSERT_DEBUG(max<=iObjectsPerSlab);
       
   241 	TUint freeCount = max-aSlab->iAllocCount;
       
   242 	while(freeCount)
       
   243 		{
       
   244 //		Kern::Printf("CheckSlab o=%x",o);
       
   245 		__NK_ASSERT_DEBUG(o>=base);
       
   246 		__NK_ASSERT_DEBUG(o<=aSlab->iHighWaterMark);
       
   247 		__NK_ASSERT_DEBUG((((TLinAddr)o-(TLinAddr)base)%iObjectSize)==0);
       
   248 		o = o->iNext;
       
   249 		--freeCount;
       
   250 		}
       
   251 	__NK_ASSERT_DEBUG(o==&aSlab->iFreeList.iA);
       
   252 	}
       
   253 
       
   254 #endif
       
   255 
       
   256 
       
   257 TBool RSlabAllocatorBase::NewSlab()
       
   258 	{
       
   259 	TRACE2(("RSlabAllocatorBase::NewSlab()"));
       
   260 
       
   261 	for(;;)
       
   262 		{
       
   263 		__SPIN_UNLOCK_IRQ(iSpinLock);
       
   264 		MM::MemoryLock(iMemory);
       
   265 
       
   266 		if(iAllocatingSlab)
       
   267 			{
       
   268 			// we've gone recursive...
       
   269 			__NK_ASSERT_DEBUG(iFreeCount); // check we still have some reserved objects
       
   270 
       
   271 			// lie and pretend we've allocated a slab which will allow Alloc() to proceed...
       
   272 			MM::MemoryUnlock(iMemory);
       
   273 			__SPIN_LOCK_IRQ(iSpinLock);
       
   274 			return true;
       
   275 			}
       
   276 
       
   277 		iAllocatingSlab = true;
       
   278 
       
   279 		// still need new slab?
       
   280 		if(iFreeCount<=iReserveCount)
       
   281 			{
       
   282 			// find unused slab...
       
   283 			TInt i = iSlabMap->Alloc();
       
   284 			if(i<0)
       
   285 				break; // out of memory
       
   286 
       
   287 			// commit memory for slab...
       
   288 			TInt r = MM::MemoryAlloc(iMemory,i,1);
       
   289 			if(r!=KErrNone)
       
   290 				{
       
   291 				iSlabMap->Free(i);
       
   292 				break; // error
       
   293 				}
       
   294 
       
   295 			// initialise slab...
       
   296 			TLinAddr page = iBase+(i<<KPageShift);
       
   297 			InitSlab(page);
       
   298 			TRACE2(("RSlabAllocatorBase::NewSlab() allocated 0x%08x",(TSlabHeader*)(page+KPageSize)-1));
       
   299 			}
       
   300 
       
   301 		iAllocatingSlab = false;
       
   302 
       
   303 		MM::MemoryUnlock(iMemory);
       
   304 		__SPIN_LOCK_IRQ(iSpinLock);
       
   305 
       
   306 		// still need new slab?
       
   307 		if(iFreeCount>iReserveCount)
       
   308 			return true; // no, so finish
       
   309 		}
       
   310 
       
   311 	// failed...
       
   312 	iAllocatingSlab = false;
       
   313 	MM::MemoryUnlock(iMemory);
       
   314 	__SPIN_LOCK_IRQ(iSpinLock);
       
   315 	return false;
       
   316 	}
       
   317 
       
   318 
       
   319 void RSlabAllocatorBase::InitSlab(TLinAddr aPage)
       
   320 	{
       
   321 	TRACE2(("RSlabAllocatorBase::InitSlab(0x%08x)",aPage));
       
   322 
       
   323 	// header goes at end of slab...
       
   324 	TSlabHeader* slab = (TSlabHeader*)(aPage+KPageSize)-1;
       
   325 
       
   326 	// link first object in slab onto the slab's free list...
       
   327 	SDblQueLink* head = &slab->iFreeList.iA;
       
   328 	SDblQueLink* first = (SDblQueLink*)aPage;
       
   329 	head->iNext = first;
       
   330 	head->iPrev = first;
       
   331 	first->iPrev = head;
       
   332 	first->iNext = head;
       
   333 
       
   334 	// setup rest of slab header...
       
   335 	slab->iAllocCount = 0;
       
   336 	slab->iHighWaterMark = first;
       
   337 
       
   338 	// put new slab at end of free slab list...
       
   339 	__SPIN_LOCK_IRQ(iSpinLock);
       
   340 	iFreeList.Add(slab);
       
   341 	iFreeCount += iObjectsPerSlab;
       
   342 	__SPIN_UNLOCK_IRQ(iSpinLock);
       
   343 	}
       
   344 
       
   345 
       
   346 void RSlabAllocatorBase::FreeSlab(TSlabHeader* aSlab)
       
   347 	{
       
   348 	TRACE2(("RSlabAllocatorBase::FreeSlab(0x%08x)",aSlab));
       
   349 
       
   350 	aSlab->Deque();
       
   351 	iFreeCount -= iObjectsPerSlab;
       
   352 	__SPIN_UNLOCK_IRQ(iSpinLock);
       
   353 
       
   354 	MM::MemoryLock(iMemory);
       
   355 	TUint i = ((TLinAddr)aSlab-iBase)>>KPageShift;
       
   356 	MM::MemoryFree(iMemory,i,1);
       
   357 	iSlabMap->Free(i);
       
   358 	MM::MemoryUnlock(iMemory);
       
   359 	}
       
   360 
       
   361 
       
   362 //
       
   363 // Cleanup
       
   364 //
       
   365 
       
   366 void RSlabAllocatorBase::CleanupTrampoline(TAny* aSelf)
       
   367 	{
       
   368 	((RSlabAllocatorBase*)aSelf)->Cleanup();
       
   369 	}
       
   370 
       
   371 
       
   372 void RSlabAllocatorBase::Cleanup()
       
   373 	{
       
   374 	// free any empty slabs...
       
   375 	for(;;)
       
   376 		{
       
   377 		__SPIN_LOCK_IRQ(iSpinLock);
       
   378 		TSlabHeader* slab = (TSlabHeader*)iFreeList.iA.iPrev; // get slab from end of list
       
   379 		if(slab==iFreeList.iA.iNext)
       
   380 			break; // only slab left, so leave it
       
   381 		if(slab->iAllocCount!=0)
       
   382 			break; // slab has allocated objects, so end, (empty slabs are always at end of list)
       
   383 		FreeSlab(slab);
       
   384 		}
       
   385 	__SPIN_UNLOCK_IRQ(iSpinLock);
       
   386 	}