kernel/eka/memmodel/epoc/flexible/mshbuf.cpp
changeset 43 96e5fb8b040d
child 36 538db54a451d
equal deleted inserted replaced
-1:000000000000 43:96e5fb8b040d
       
     1 // Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies).
       
     2 // All rights reserved.
       
     3 // This component and the accompanying materials are made available
       
     4 // under the terms of the License "Eclipse Public License v1.0"
       
     5 // which accompanies this distribution, and is available
       
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
       
     7 //
       
     8 // Initial Contributors:
       
     9 // Nokia Corporation - initial contribution.
       
    10 //
       
    11 // Contributors:
       
    12 //
       
    13 // Description:
       
    14 // e32/memmodel/epoc/flexible/mshbuf.cpp
       
    15 // Shareable Data Buffers
       
    16 
       
    17 #include <memmodel.h>
       
    18 #include "mmu/mm.h"
       
    19 #include "mmboot.h"
       
    20 #include <kernel/smap.h>
       
    21 
       
    22 _LIT(KLitDMemModelAlignedShPool,"DMMAlignedShPool");	// Must be no more than 16 characters!
       
    23 
       
    24 struct TWait
       
    25 	{
       
    26 	void Link(TWait*& aList)
       
    27 		{
       
    28 		iSem.SetOwner(NULL);
       
    29 		iNext = aList;
       
    30 		aList = this;
       
    31 		};
       
    32 	void Wait()
       
    33 		{
       
    34 		NKern::FSWait(&iSem);
       
    35 		}
       
    36 	NFastSemaphore iSem;
       
    37 	TWait* iNext;
       
    38 
       
    39 	static void SignalAll(TWait* aList)
       
    40 		{
       
    41 		while (aList)
       
    42 			{
       
    43 			TWait* next = aList->iNext;
       
    44 			NKern::FSSignal(&aList->iSem);
       
    45 			aList = next;
       
    46 			}
       
    47 		}
       
    48 	};
       
    49 
       
    50 
       
    51 class DShBufMapping : public DBase
       
    52 	{
       
    53 public:
       
    54 	SDblQueLink iObjLink;
       
    55 	DMemoryMapping* iMapping;
       
    56 	TInt iOsAsid;
       
    57 	TWait* iTransitions; // Mapping and Unmapping operations
       
    58 	TBool iTransitioning;
       
    59 	};
       
    60 
       
    61 
       
    62 DMemModelShPool::DMemModelShPool() : DShPool()
       
    63 	{
       
    64 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelShPool::DMemModelShPool"));
       
    65 	}
       
    66 
       
    67 DMemModelShPool::~DMemModelShPool()
       
    68 	{
       
    69 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelShPool::~DMemModelShPool"));
       
    70 	}
       
    71 
       
    72 void DMemModelShPool::DestroyClientResources(DProcess* aProcess)
       
    73 	{
       
    74 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelShPool::DestroyClientResources"));
       
    75 
       
    76 	TInt r = DestroyAllMappingsAndReservedHandles(aProcess);
       
    77 	__NK_ASSERT_DEBUG((r == KErrNone) || (r == KErrDied));
       
    78 	(void)r;		// Silence warnings
       
    79 	}
       
    80 
       
    81 DMemModelAlignedShBuf::DMemModelAlignedShBuf(DShPool* aPool) : DShBuf(aPool)
       
    82 	{
       
    83 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShBuf::DMemModelAlignedShBuf()"));
       
    84 	}
       
    85 
       
    86 TInt DMemModelAlignedShBuf::Construct()
       
    87 	{
       
    88 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShBuf::Construct()"));
       
    89 
       
    90 	TInt r = KErrNone;
       
    91 
       
    92 	r = DShBuf::Construct();
       
    93 
       
    94 	if (r == KErrNone)
       
    95 		r = Create();
       
    96 
       
    97 	return r;
       
    98 	}
       
    99 
       
   100 TInt DMemModelAlignedShBuf::Close(TAny* aPtr)
       
   101 	{
       
   102 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShBuf::Close(0x%08x)", aPtr));
       
   103 
       
   104 	if (aPtr)
       
   105 		{
       
   106 		DProcess* pP = reinterpret_cast<DProcess*>(aPtr);
       
   107 		UnMap(pP);
       
   108 		iPool->CloseClient(pP);
       
   109 		}
       
   110 
       
   111 	return DShBuf::Close(aPtr);
       
   112 	}
       
   113 
       
   114 TInt DMemModelAlignedShBuf::AddToProcess(DProcess* aProcess, TUint aAttr)
       
   115 	{
       
   116 	__KTRACE_OPT(KMMU,Kern::Printf("Adding DMemModelShBuf %O to process %O",this,aProcess));
       
   117 	TInt r;
       
   118 	TLinAddr base;
       
   119 	TUint flags;
       
   120 
       
   121 	r = iPool->OpenClient(aProcess, flags);
       
   122 
       
   123 	if (r == KErrNone)
       
   124 		{
       
   125 		if ((flags & EShPoolAutoMapBuf) && ((aAttr & EShPoolNoMapBuf) == 0))
       
   126 			{
       
   127 			// note we use the client's pool flags and not the buffer attributes
       
   128 			r = Map(flags, aProcess, base);
       
   129 
       
   130 			if (aProcess == K::TheKernelProcess)
       
   131 				iRelAddress = static_cast<TLinAddr>(base);
       
   132 			}
       
   133 		}
       
   134 
       
   135 	return r;
       
   136 	}
       
   137 
       
   138 TInt DMemModelAlignedShBuf::Create()
       
   139 	{
       
   140 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShBuf::Create()"));
       
   141 	TInt r = KErrNone;
       
   142 
       
   143 	// calculate memory type...
       
   144 	TMemoryObjectType memoryType =  EMemoryObjectUnpaged;
       
   145 
       
   146 	TMemoryAttributes attr = EMemoryAttributeStandard;
       
   147 
       
   148 	// calculate memory flags...
       
   149 	TMemoryCreateFlags flags = static_cast<TMemoryCreateFlags>((EMemoryCreateDefault|EMemoryCreateUseCustomWipeByte|(0xAA<<EMemoryCreateWipeByteShift)));
       
   150 
       
   151 	// note that any guard pages will be included in iBufGap, however the amount of memory committed
       
   152 	// will be iBufSize rounded up to a page
       
   153 	r = MM::MemoryNew(iMemoryObject, memoryType, MM::RoundToPageCount(iPool->iBufGap), flags, attr);
       
   154 
       
   155 	if(r!=KErrNone)
       
   156 		return r;
       
   157 
       
   158 	if (iPool->iPoolFlags & EShPoolContiguous)
       
   159 		{
       
   160 		TPhysAddr paddr;
       
   161 		r = MM::MemoryAllocContiguous(iMemoryObject, 0, MM::RoundToPageCount(iPool->iBufSize), 0, paddr);
       
   162 		}
       
   163 	else
       
   164 		{
       
   165 		r = MM::MemoryAlloc(iMemoryObject, 0, MM::RoundToPageCount(iPool->iBufSize));
       
   166 		}
       
   167 
       
   168 	return r;
       
   169 	}
       
   170 
       
   171 DMemModelAlignedShBuf::~DMemModelAlignedShBuf()
       
   172 	{
       
   173 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShBuf::~DMemModelAlignedShBuf()"));
       
   174 
       
   175 	__NK_ASSERT_DEBUG(iMappings.IsEmpty());
       
   176 
       
   177 	MM::MemoryDestroy(iMemoryObject);
       
   178 	}
       
   179 
       
   180 TInt DMemModelAlignedShBuf::Map(TUint aMapAttr, DProcess* aProcess, TLinAddr& aBase)
       
   181 	{
       
   182 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShBuf::Map()"));
       
   183 	TInt r = KErrNone;
       
   184 
       
   185 	DShBufMapping* m = NULL;
       
   186 	DMemoryMapping* mapping = NULL;
       
   187 	DMemModelProcess* pP = reinterpret_cast<DMemModelProcess*>(aProcess);
       
   188 
       
   189 	TBool write = (TBool)EFalse;
       
   190 
       
   191 	// User = ETrue, ReadOnlyWrite = ETrue, Execute = EFalse
       
   192 	if (aMapAttr & EShPoolWriteable)
       
   193 		write = (TBool)ETrue;
       
   194 
       
   195 	TMappingPermissions perm = MM::MappingPermissions(pP!=K::TheKernelProcess, write, (TBool)EFalse);
       
   196 	TWait wait;
       
   197 
       
   198 	for(;;)
       
   199 		{
       
   200 		iPool->LockPool();
       
   201 		r = FindMapping(m, pP);
       
   202 
       
   203 		if (r != KErrNone)
       
   204 			break;
       
   205 		
       
   206 		if (m->iTransitioning)
       
   207 			{
       
   208 			wait.Link(m->iTransitions);
       
   209 			iPool->UnlockPool();
       
   210 			wait.Wait();
       
   211 			}
       
   212 		else
       
   213 			{
       
   214 			iPool->UnlockPool();
       
   215 			return KErrAlreadyExists;
       
   216 			}
       
   217 		}
       
   218 
       
   219 	DMemModelAlignedShPoolClient* client = reinterpret_cast<DMemModelAlignedShPoolClient*>(iPool->iClientMap->Find(reinterpret_cast<TUint>(aProcess)));
       
   220 
       
   221 	__NK_ASSERT_DEBUG(client);
       
   222 
       
   223 	DMemModelAlignedShPool* pool = reinterpret_cast<DMemModelAlignedShPool*>(iPool);
       
   224 
       
   225 	__NK_ASSERT_DEBUG(m == NULL);
       
   226 	r = pool->GetFreeMapping(m, client);
       
   227 
       
   228 	if (r == KErrNone)
       
   229 		{
       
   230 		iMappings.AddHead(&m->iObjLink);
       
   231 		m->iTransitioning = ETrue;
       
   232 
       
   233 		mapping = m->iMapping;
       
   234 		iPool->UnlockPool(); // have to release fast lock for MappingMap
       
   235 
       
   236 		r = MM::MappingMap(mapping, perm, iMemoryObject, 0, MM::RoundToPageCount(pool->iBufSize));
       
   237 
       
   238 		iPool->LockPool();
       
   239 
       
   240 		TWait* list = m->iTransitions;
       
   241 		m->iTransitions = NULL;
       
   242 
       
   243 		if (r != KErrNone)
       
   244 		    pool->ReleaseMapping(m, client);
       
   245 		else
       
   246 		    aBase = MM::MappingBase(mapping);
       
   247 
       
   248 		m->iTransitioning = EFalse;
       
   249 		iPool->UnlockPool();
       
   250 
       
   251 		TWait::SignalAll(list);
       
   252 		}
       
   253 	else
       
   254 		iPool->UnlockPool();
       
   255 
       
   256 	return r;
       
   257 	}
       
   258 
       
   259 TInt DMemModelAlignedShBuf::FindMapping(DShBufMapping*& aMapping, DMemModelProcess* aProcess)
       
   260 	{
       
   261 	// Must be in critical section so we don't leak os asid references.
       
   262 	__ASSERT_CRITICAL;
       
   263 	__NK_ASSERT_DEBUG(iPool->iLock.HeldByCurrentThread());
       
   264 
       
   265 	TInt r = KErrNotFound;
       
   266 	aMapping = NULL;
       
   267 
       
   268 	// Open a reference on aProcess's os asid so that it can't be freed and 
       
   269 	// reused while searching.
       
   270 	TInt osAsid = aProcess->TryOpenOsAsid();
       
   271 	if (osAsid < 0)
       
   272 		{// aProcess has died and freed its os asid.
       
   273 		return KErrDied;
       
   274 		}
       
   275 
       
   276 	SDblQueLink* pLink = iMappings.First();
       
   277 	SDblQueLink* end = reinterpret_cast<SDblQueLink*>(&iMappings);
       
   278 	DShBufMapping* m = NULL;
       
   279 
       
   280 	while (pLink != end)
       
   281 		{
       
   282 		m = _LOFF(pLink, DShBufMapping, iObjLink);
       
   283 
       
   284 		if (m->iOsAsid == osAsid)
       
   285 			{
       
   286 			aMapping = m;
       
   287 			r = KErrNone;
       
   288 			break;
       
   289 			}
       
   290 		pLink = pLink->iNext;
       
   291 		}
       
   292 
       
   293 	// Close the reference on the os asid as if we have a mapping then its lifetime will 
       
   294 	// determine whether the process still owns an os asid.
       
   295 	aProcess->CloseOsAsid();	
       
   296 	return r;
       
   297 	}
       
   298 
       
   299 TInt DMemModelAlignedShBuf::UnMap(DProcess* aProcess)
       
   300 	{
       
   301 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShBuf::UnMap()"));
       
   302 
       
   303 	TInt r = KErrNone;
       
   304 
       
   305 	DMemModelProcess* pP = reinterpret_cast<DMemModelProcess*>(aProcess);
       
   306 
       
   307 	DShBufMapping* m = NULL;
       
   308 	TWait wait;
       
   309 
       
   310 	for(;;)
       
   311 		{
       
   312 		iPool->LockPool();
       
   313 		r = FindMapping(m, pP);
       
   314 
       
   315 		if (r != KErrNone)
       
   316 			{
       
   317 			iPool->UnlockPool();
       
   318 			return KErrNotFound;
       
   319 			}
       
   320 
       
   321 		if (m->iTransitioning)
       
   322 			{
       
   323 			wait.Link(m->iTransitions);
       
   324 			iPool->UnlockPool();
       
   325 			wait.Wait();
       
   326 			}
       
   327 		else
       
   328 			{
       
   329 			break;
       
   330 			}
       
   331 		}
       
   332 
       
   333 	m->iTransitioning = ETrue;
       
   334 	iPool->UnlockPool();
       
   335 
       
   336 	MM::MappingUnmap(m->iMapping);
       
   337 
       
   338 	iPool->LockPool();
       
   339 	DMemModelAlignedShPoolClient* client = reinterpret_cast<DMemModelAlignedShPoolClient*>(iPool->iClientMap->Find(reinterpret_cast<TUint>(aProcess)));
       
   340 
       
   341 	__NK_ASSERT_DEBUG(client);
       
   342 
       
   343 	TWait* list = m->iTransitions;
       
   344 	m->iTransitions = NULL;
       
   345 	m->iObjLink.Deque();
       
   346 	m->iTransitioning = EFalse;
       
   347 
       
   348 	DMemModelAlignedShPool* pool = reinterpret_cast<DMemModelAlignedShPool*>(iPool);
       
   349 	pool->ReleaseMapping(m, client);
       
   350 
       
   351 	if (aProcess == K::TheKernelProcess)
       
   352 	    iRelAddress = NULL;
       
   353 
       
   354 	iPool->UnlockPool();
       
   355 
       
   356 	wait.SignalAll(list);
       
   357 	return KErrNone;
       
   358 	}
       
   359 
       
   360 TUint8* DMemModelAlignedShBuf::Base(DProcess* aProcess)
       
   361 	{
       
   362 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShBuf::Base()"));
       
   363 	DMemModelProcess* pP = reinterpret_cast<DMemModelProcess*>(aProcess);
       
   364 
       
   365 	DShBufMapping* mapping = NULL;
       
   366 	iPool->LockPool();
       
   367 	TInt r = FindMapping(mapping, pP);
       
   368 	TUint8* base = NULL;
       
   369 
       
   370 	if (r == KErrNone)
       
   371 		base = reinterpret_cast<TUint8*>(MM::MappingBase(mapping->iMapping));
       
   372 	iPool->UnlockPool();
       
   373 
       
   374 	return base;
       
   375 	}
       
   376 
       
   377 TUint8* DMemModelAlignedShBuf::Base()
       
   378 	{
       
   379 	return reinterpret_cast<TUint8*>(iRelAddress);
       
   380 	}
       
   381 
       
   382 TInt DMemModelAlignedShBuf::Pin(TPhysicalPinObject* aPinObject, TBool aReadOnly, TPhysAddr& aAddress, TPhysAddr* aPages, TUint32& aMapAttr, TUint& aColour)
       
   383 	{
       
   384 	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"DMemModelAlignedShBuf::Pin");
       
   385 
       
   386 	TInt r = MM::PinPhysicalMemory(iMemoryObject, (DPhysicalPinMapping*)aPinObject, 0,
       
   387 								   MM::RoundToPageCount(Size()),
       
   388 								   aReadOnly, aAddress, aPages, aMapAttr, aColour);
       
   389 
       
   390 	return r;
       
   391 	}
       
   392 
       
   393 TInt DMemModelAlignedShPool::GetFreeMapping(DShBufMapping*& aMapping, DMemModelAlignedShPoolClient* aClient)
       
   394 	{
       
   395 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::GetFreeMapping()"));
       
   396 	__NK_ASSERT_DEBUG(iLock.HeldByCurrentThread());
       
   397 
       
   398 	TInt r = KErrNotFound;
       
   399 	aMapping = NULL;
       
   400 
       
   401 	if (aClient)
       
   402 		{
       
   403 		if (!aClient->iMappingFreeList.IsEmpty())
       
   404 			{
       
   405 			aMapping = _LOFF(aClient->iMappingFreeList.GetFirst(), DShBufMapping, iObjLink);
       
   406 			r = KErrNone;
       
   407 			}
       
   408 		else
       
   409 			{
       
   410 			r = KErrNoMemory;
       
   411 			}
       
   412 		}
       
   413 
       
   414 	__KTRACE_OPT(KMMU2, Kern::Printf("DMemModelAlignedShPool::GetFreeMapping(0x%08x, 0x%08x) returns %d", aMapping, aClient, r));
       
   415 	return r;
       
   416 	}
       
   417 
       
   418 TInt DMemModelAlignedShPool::ReleaseMapping(DShBufMapping*& aMapping, DMemModelAlignedShPoolClient* aClient)
       
   419 	{
       
   420 	__KTRACE_OPT(KMMU2, Kern::Printf("DMemModelAlignedShPool::ReleaseMapping(0x%08x,0x%08x)",aMapping,aClient));
       
   421 	__NK_ASSERT_DEBUG(iLock.HeldByCurrentThread());
       
   422 
       
   423 	TInt r = KErrNone;
       
   424 
       
   425 	if (aClient)
       
   426 		{
       
   427 		aClient->iMappingFreeList.AddHead(&aMapping->iObjLink);
       
   428 		aMapping = NULL;
       
   429 		}
       
   430 	else
       
   431 		{
       
   432 		// pool has probably been closed delete mapping
       
   433 		r = KErrNotFound;
       
   434 		__KTRACE_OPT(KMMU2, Kern::Printf("DMemModelAlignedShPool::ReleaseMapping delete 0x%08x",aMapping));
       
   435 		UnlockPool(); // have to release fast lock for MappingDestroy
       
   436 		MM::MappingDestroy(aMapping->iMapping);
       
   437 		delete aMapping;
       
   438 		aMapping = NULL;
       
   439 		LockPool();
       
   440 		}
       
   441 
       
   442 	return r;
       
   443 	}
       
   444 
       
   445 TInt DMemModelAlignedShPool::SetBufferWindow(DProcess* aProcess, TInt aWindowSize)
       
   446 	{
       
   447 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::SetBufferWindow()"));
       
   448 
       
   449 	// Create and construct mappings but do not map
       
   450 	// also allocate reserved handles
       
   451 	TInt r = KErrNone;
       
   452 	TUint noOfBuffers = aWindowSize;
       
   453 
       
   454 	if (aWindowSize > static_cast<TInt>(iMaxBuffers))
       
   455 		return KErrArgument;
       
   456 
       
   457 	Kern::MutexWait(*iProcessLock);
       
   458 
       
   459 	LockPool();
       
   460 	DMemModelAlignedShPoolClient* client = reinterpret_cast<DMemModelAlignedShPoolClient*>(iClientMap->Find(reinterpret_cast<TUint>(aProcess)));
       
   461 	UnlockPool();
       
   462 
       
   463 	if (client)
       
   464 		{
       
   465 		if (client->iWindowSize != 0)
       
   466 			{
       
   467 			Kern::MutexSignal(*iProcessLock);
       
   468 			return KErrAlreadyExists;
       
   469 			}
       
   470 
       
   471 		if (aWindowSize < 0)
       
   472 			{
       
   473 			noOfBuffers = iTotalBuffers;
       
   474 			}
       
   475 
       
   476 		DMemModelProcess* pP = reinterpret_cast<DMemModelProcess*>(aProcess);
       
   477 		r = CreateMappings(client, noOfBuffers, pP);
       
   478 
       
   479 		if (r == KErrNone)
       
   480 			{
       
   481 			client->iWindowSize = aWindowSize;
       
   482 			}
       
   483 		else
       
   484 			{
       
   485 			DestroyMappings(client, noOfBuffers);
       
   486 			}
       
   487 		}
       
   488 	else
       
   489 		{
       
   490 		r = KErrNotFound;
       
   491 		}
       
   492 
       
   493 	Kern::MutexSignal(*iProcessLock);
       
   494 
       
   495 	return r;
       
   496 	}
       
   497 
       
   498 TInt DMemModelAlignedShPool::MappingNew(DShBufMapping*& aMapping, DMemModelProcess* aProcess)
       
   499 	{
       
   500 	// Must be in critical section so we don't leak os asid references.
       
   501 	__ASSERT_CRITICAL;
       
   502 
       
   503 	TMappingCreateFlags flags=EMappingCreateDefault;
       
   504 
       
   505 	FlagSet(flags, EMappingCreateReserveAllResources);
       
   506 
       
   507 	// Open a reference to aProcess's os so it isn't freed and reused while
       
   508 	// we're creating this mapping.
       
   509 	TInt osAsid = aProcess->TryOpenOsAsid();
       
   510 	if (osAsid < 0)
       
   511 		{// The process has freed its os asid so can't create a new mapping.
       
   512 		return KErrDied;
       
   513 		}
       
   514 
       
   515 	DMemoryMapping* mapping = NULL;
       
   516 	DShBufMapping* m = NULL;
       
   517 	TInt r = MM::MappingNew(mapping, MM::RoundToPageCount(iBufGap), osAsid, flags);
       
   518 
       
   519 	if (r == KErrNone)
       
   520 		{
       
   521 		m = new DShBufMapping;
       
   522 
       
   523 		if (m)
       
   524 			{
       
   525 			m->iMapping = mapping;
       
   526 			m->iOsAsid = osAsid;
       
   527 			}
       
   528 		else
       
   529 			{
       
   530 			MM::MappingDestroy(mapping);
       
   531 			r = KErrNoMemory;
       
   532 			}
       
   533 		}
       
   534 
       
   535 	// Close the reference on the os asid as while aMapping is valid then the 
       
   536 	// os asid must be also.
       
   537 	aProcess->CloseOsAsid();
       
   538 
       
   539 	aMapping = m;
       
   540 	__KTRACE_OPT(KMMU2, Kern::Printf("DMemModelAlignedShPool::MappingNew returns 0x%08x,%d",aMapping,r));
       
   541 	return r;
       
   542 	}
       
   543 
       
   544 TInt DMemModelAlignedShPool::AddToProcess(DProcess* aProcess, TUint aAttr)
       
   545 	{
       
   546 	__KTRACE_OPT(KMMU,Kern::Printf("Adding DMemModelAlignedShPool %O to process %O",this,aProcess));
       
   547 	TInt r = KErrNone;
       
   548 
       
   549 	Kern::MutexWait(*iProcessLock);
       
   550 
       
   551 	LockPool();
       
   552 	DShPoolClient* client = reinterpret_cast<DShPoolClient*>(iClientMap->Find(reinterpret_cast<TUint>(aProcess)));
       
   553 	UnlockPool();
       
   554 
       
   555 	if (!client)
       
   556 		{
       
   557 		client = new DMemModelAlignedShPoolClient;
       
   558 		if (client)
       
   559 			{
       
   560 			client->iFlags = aAttr;
       
   561 			r = iClientMap->Add(reinterpret_cast<TUint>(aProcess), client);
       
   562 
       
   563 			if (r == KErrNone)
       
   564 				{
       
   565 				if (aProcess != K::TheKernelProcess)
       
   566 					{
       
   567 					r = aProcess->iHandles.Reserve(iTotalBuffers);
       
   568 
       
   569 					if (r != KErrNone)
       
   570 						{
       
   571 						iClientMap->Remove(reinterpret_cast<TUint>(aProcess));
       
   572 						}
       
   573 					}
       
   574 				}
       
   575 			if (r != KErrNone)
       
   576 				{
       
   577 				delete client;
       
   578 				r = KErrNoMemory;
       
   579 				}
       
   580 			}
       
   581 		else
       
   582 			{
       
   583 			r = KErrNoMemory;
       
   584 			}
       
   585 		}
       
   586 	else
       
   587 		{
       
   588 		LockPool();
       
   589 		client->iAccessCount++;
       
   590 		UnlockPool();
       
   591 		}
       
   592 
       
   593 	Kern::MutexSignal(*iProcessLock);
       
   594 
       
   595 	return r;
       
   596 	}
       
   597 
       
   598 DMemModelAlignedShPool::DMemModelAlignedShPool() :	DMemModelShPool()
       
   599 
       
   600 	{
       
   601 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::DMemModelAlignedShPool"));
       
   602 	}
       
   603 
       
   604 void DMemModelAlignedShPool::Free(DShBuf* aBuf)
       
   605 	{
       
   606 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::Free (aBuf = 0x%08x)", aBuf));
       
   607 
       
   608 	LockPool();
       
   609 #ifdef _DEBUG
       
   610 	// Remove from allocated list
       
   611 	aBuf->iObjLink.Deque();
       
   612 #endif
       
   613 
       
   614 	DMemModelAlignedShBuf* buf = reinterpret_cast<DMemModelAlignedShBuf*>(aBuf);
       
   615 
       
   616 	if (MM::MemoryIsNotMapped(buf->iMemoryObject))
       
   617 		{
       
   618 		UnlockPool(); // have to release fast mutex
       
   619 		MM::MemoryWipe(buf->iMemoryObject);
       
   620 		LockPool();
       
   621 
       
   622 		// we want to put the initial buffers at the head of the free list
       
   623 		// and the grown buffers at the tail as this makes shrinking more efficient
       
   624 		if (aBuf >= iInitialBuffersArray && aBuf < (iInitialBuffersArray + iInitialBuffers))
       
   625 			{
       
   626 			iFreeList.AddHead(&aBuf->iObjLink);
       
   627 			}
       
   628 		else
       
   629 			{
       
   630 			iFreeList.Add(&aBuf->iObjLink);
       
   631 			}
       
   632 		++iFreeBuffers;
       
   633 #ifdef _DEBUG
       
   634 		--iAllocatedBuffers;
       
   635 #endif
       
   636 		}
       
   637 	else
       
   638 		{
       
   639 		iPendingList.Add(&aBuf->iObjLink);
       
   640 		}
       
   641 
       
   642 	iPoolFlags &= ~EShPoolSuppressShrink;		// Allow shrinking again, if it was blocked
       
   643 	UnlockPool();
       
   644 
       
   645 	// queue ManagementDfc which completes notifications as appropriate
       
   646 	if (HaveWorkToDo())
       
   647 		KickManagementDfc();
       
   648 
       
   649 	DShPool::Close(NULL); // decrement pool reference count
       
   650 	}
       
   651 
       
   652 TInt DMemModelAlignedShPool::UpdateFreeList()
       
   653 	{
       
   654 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::UpdateFreeList"));
       
   655 
       
   656 	LockPool();
       
   657 	SDblQueLink* pLink = iPendingList.First();
       
   658 	UnlockPool();
       
   659 
       
   660 	SDblQueLink* anchor = &iPendingList.iA;
       
   661 
       
   662 	while (pLink != anchor)
       
   663 		{
       
   664 		DMemModelAlignedShBuf* buf = _LOFF(pLink, DMemModelAlignedShBuf, iObjLink);
       
   665 		LockPool();
       
   666 		pLink = pLink->iNext;
       
   667 		UnlockPool();
       
   668 
       
   669 		if (MM::MemoryIsNotMapped(buf->iMemoryObject))
       
   670 			{
       
   671 			LockPool();
       
   672 			buf->iObjLink.Deque();
       
   673 			UnlockPool();
       
   674 
       
   675 			MM::MemoryWipe(buf->iMemoryObject);
       
   676 
       
   677 			LockPool();
       
   678 			if (buf >= iInitialBuffersArray && buf < (iInitialBuffersArray + iInitialBuffers))
       
   679 				{
       
   680 				iFreeList.AddHead(&buf->iObjLink);
       
   681 				}
       
   682 			else
       
   683 				{
       
   684 				iFreeList.Add(&buf->iObjLink);
       
   685 				}
       
   686 			++iFreeBuffers;
       
   687 #ifdef _DEBUG
       
   688 			--iAllocatedBuffers;
       
   689 #endif
       
   690 			UnlockPool();
       
   691 			}
       
   692 		}
       
   693 
       
   694 	__KTRACE_OPT(KMMU, Kern::Printf("<DMemModelAlignedShPool::UpdateFreeList"));
       
   695 	return KErrNone;
       
   696 	}
       
   697 
       
   698 DMemModelAlignedShPool::~DMemModelAlignedShPool()
       
   699 	{
       
   700 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::~DMemModelAlignedShPool"));
       
   701 	}
       
   702 
       
   703 TInt DMemModelAlignedShPool::DoCreate(TShPoolCreateInfo& aInfo)
       
   704 	{
       
   705 
       
   706 	TUint64 maxSize64 = static_cast<TUint64>(aInfo.iInfo.iMaxBufs) * static_cast<TUint64>(iBufGap);
       
   707 
       
   708 	if (maxSize64 > static_cast<TUint64>(KMaxTInt) || maxSize64 <= static_cast<TUint64>(0))
       
   709 		return KErrArgument;
       
   710 
       
   711 	iMaxPages = MM::RoundToPageCount(static_cast<TInt>(maxSize64));
       
   712 
       
   713 	return KErrNone;
       
   714 	}
       
   715 
       
   716 TInt DMemModelAlignedShPool::DestroyAllMappingsAndReservedHandles(DProcess* aProcess)
       
   717 	{
       
   718 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::DestroyAllMappingsAndReservedHandles(0x%08x)", aProcess));
       
   719 
       
   720 	TInt r = KErrNone;
       
   721 	Kern::MutexWait(*iProcessLock);
       
   722 	DMemModelAlignedShPoolClient* client = reinterpret_cast<DMemModelAlignedShPoolClient*>(iClientMap->Remove(reinterpret_cast<TUint>(aProcess)));
       
   723 
       
   724 	__NK_ASSERT_DEBUG(client);
       
   725 	__NK_ASSERT_DEBUG(client->iAccessCount == 0);
       
   726 
       
   727 	DestroyMappings(client, KMaxTInt);
       
   728 	delete client;
       
   729 
       
   730 	if (aProcess != K::TheKernelProcess)
       
   731 		{
       
   732 		// Remove reserved handles
       
   733 		r = aProcess->iHandles.Reserve(-iTotalBuffers);
       
   734 		}
       
   735 
       
   736 	Kern::MutexSignal(*iProcessLock);
       
   737 
       
   738 	__KTRACE_OPT(KMMU, Kern::Printf("<DMemModelAlignedShPool::DestroyAllMappingsAndReservedHandles(0x%08x)", aProcess));
       
   739 
       
   740 	return r;
       
   741 	}
       
   742 
       
   743 TInt DMemModelAlignedShPool::DestroyMappings(DMemModelAlignedShPoolClient* aClient, TInt aNoOfMappings)
       
   744 	{
       
   745 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::DestroyMappings(0x%08x)", aClient));
       
   746 
       
   747 	TInt r = KErrNone;
       
   748 	TInt i = 0;
       
   749 
       
   750 	DShBufMapping* m = NULL;
       
   751 	SDblQueLink* pLink = NULL;
       
   752 
       
   753 	while (i < aNoOfMappings && !aClient->iMappingFreeList.IsEmpty())
       
   754 		{
       
   755 		LockPool();
       
   756 		pLink = aClient->iMappingFreeList.GetFirst();
       
   757 		UnlockPool();
       
   758 
       
   759 		if (pLink == NULL)
       
   760 			break;
       
   761 
       
   762 		m = _LOFF(pLink, DShBufMapping, iObjLink);
       
   763 		__KTRACE_OPT(KMMU2, Kern::Printf("DMemModelAlignedShPool::DestroyMappings delete 0x%08x",m));
       
   764 		MM::MappingClose(m->iMapping);
       
   765 		delete m;
       
   766 		++i;
       
   767 		}
       
   768 
       
   769 	__KTRACE_OPT(KMMU, Kern::Printf("<MemModelAlignedShPool::DestroyMappings"));
       
   770 
       
   771 	return r;
       
   772 	}
       
   773 
       
   774 
       
   775 TInt DMemModelAlignedShPool::CreateMappings(DMemModelAlignedShPoolClient* aClient, TInt aNoOfMappings, DMemModelProcess* aProcess)
       
   776 	{
       
   777 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::CreateMappings"));
       
   778 
       
   779 	__ASSERT_MUTEX(iProcessLock);
       
   780 
       
   781 	TInt r = KErrNone;
       
   782 
       
   783 	for (TInt i = 0; i < aNoOfMappings; ++i)
       
   784 		{
       
   785 		DShBufMapping* mapping;
       
   786 		r = MappingNew(mapping, aProcess);
       
   787 		if (r == KErrNone)
       
   788 			{
       
   789 			LockPool();
       
   790 			aClient->iMappingFreeList.AddHead(&mapping->iObjLink);
       
   791 			UnlockPool();
       
   792 			}
       
   793 		else
       
   794 			{
       
   795 			r = KErrNoMemory;
       
   796 			break;
       
   797 			}
       
   798 		}
       
   799 
       
   800 	return r;
       
   801 	}
       
   802 
       
   803 TInt DMemModelAlignedShPool::UpdateMappingsAndReservedHandles(TInt aNoOfBuffers)
       
   804 	{
       
   805 	__KTRACE_OPT(KMMU2, Kern::Printf(">DMemModelAlignedShPool::UpdateMappingsAndReservedHandles(0x%08x)", aNoOfBuffers));
       
   806 
       
   807 	SMap::TIterator iter(*iClientMap);
       
   808 	SMap::TEntry* entry;
       
   809 	SMap::TEntry* lastEntry = NULL;
       
   810 	DMemModelProcess* pP;
       
   811 	DMemModelAlignedShPoolClient* client;
       
   812 	TInt result = KErrNone;
       
   813 
       
   814 	Kern::MutexWait(*iProcessLock);
       
   815 
       
   816 	// First handle the case of increasing allocation
       
   817 	if (aNoOfBuffers > 0)
       
   818 		while ((entry = iter.Next()) != lastEntry)
       
   819 			{
       
   820 			// Try to update handle reservation; skip if process is null or has gone away
       
   821 			client = (DMemModelAlignedShPoolClient*)(entry->iObj);
       
   822 			pP = (DMemModelProcess*)(entry->iKey);
       
   823 			if (!pP)
       
   824 				continue;
       
   825 			TInt r = pP->iHandles.Reserve(aNoOfBuffers);
       
   826 			if (r)
       
   827 				__KTRACE_OPT(KMMU2, Kern::Printf("?DMemModelAlignedShPool::UpdateMappingsAndReservedHandles(0x%08x) Reserve failed %d", aNoOfBuffers, r));
       
   828 			if (r == KErrDied)
       
   829 				continue;
       
   830 
       
   831 			if (r == KErrNone && client->iWindowSize <= 0)
       
   832 				{
       
   833 				// A positive window size means the number of mappings is fixed, so we don't need to reserve more.
       
   834 				// But here zero or negative means a variable number, so we need to create extra mappings now.
       
   835 				r = CreateMappings(client, aNoOfBuffers, pP);
       
   836 				if (r != KErrNone)
       
   837 					{
       
   838 					__KTRACE_OPT(KMMU2, Kern::Printf("?DMemModelAlignedShPool::UpdateMappingsAndReservedHandles(0x%08x) CreateMappings failed %d", aNoOfBuffers, r));
       
   839 					pP->iHandles.Reserve(-aNoOfBuffers); // Creation failed, so release the handles reserved above
       
   840 					}
       
   841 				}
       
   842 
       
   843 			if (r != KErrNone)
       
   844 				{
       
   845 				// Some problem; cleanup as best we can by falling into the loop below to undo what we've done
       
   846 				result = r;
       
   847 				iter.Reset();
       
   848 				lastEntry = entry;
       
   849 				aNoOfBuffers = -aNoOfBuffers;
       
   850 				break;
       
   851 				}
       
   852 			}
       
   853 
       
   854 	// Now handle the case of decreasing allocation; also used for recovery from errors, in which case
       
   855 	// this loop iterates only over the elements that were *successfully* processed by the loop above
       
   856 	if (aNoOfBuffers < 0)
       
   857 		while ((entry = iter.Next()) != lastEntry)
       
   858 			{
       
   859 			// Try to update handle reservation; skip if process is null or has gone away
       
   860 			client = (DMemModelAlignedShPoolClient*)(entry->iObj);
       
   861 			pP = (DMemModelProcess*)(entry->iKey);
       
   862 			if (!pP)
       
   863 				continue;
       
   864 			TInt r = pP->iHandles.Reserve(aNoOfBuffers);
       
   865 			if (r == KErrDied)
       
   866 				continue;
       
   867 
       
   868 			if (r == KErrNone && client->iWindowSize <= 0)
       
   869 				r = DestroyMappings(client, -aNoOfBuffers);
       
   870 			// De-allocation by Reserve(-n) and/or DestroyMappings() should never fail
       
   871 			if (r != KErrNone)
       
   872 				Kern::PanicCurrentThread(KLitDMemModelAlignedShPool, r);
       
   873 			}
       
   874 
       
   875 	Kern::MutexSignal(*iProcessLock);
       
   876 
       
   877 	__KTRACE_OPT(KMMU2, Kern::Printf("<DMemModelAlignedShPool::UpdateMappingsAndReservedHandles(0x%08x) returning %d", aNoOfBuffers, result));
       
   878 	return result;
       
   879 	}
       
   880 
       
   881 TInt DMemModelAlignedShPool::DeleteInitialBuffers()
       
   882 	{
       
   883 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::DeleteInitialBuffers"));
       
   884 
       
   885 	if (iInitialBuffersArray != NULL)
       
   886 		{
       
   887 		for (TUint i = 0; i < iInitialBuffers; i++)
       
   888 			{
       
   889 			iInitialBuffersArray[i].iObjLink.Deque(); // remove from free list
       
   890 			iInitialBuffersArray[i].Dec();
       
   891 			iInitialBuffersArray[i].~DMemModelAlignedShBuf();
       
   892 			}
       
   893 		}
       
   894 
       
   895 	Kern::Free(iInitialBuffersArray);
       
   896 	iInitialBuffersArray = NULL;
       
   897 
       
   898 	return KErrNone;
       
   899 	}
       
   900 
       
   901 TInt DMemModelAlignedShPool::Close(TAny* aPtr)
       
   902 	{
       
   903 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::Close(0x%08x)", aPtr));
       
   904 
       
   905 	if (aPtr)
       
   906 		{
       
   907 		DProcess* pP = reinterpret_cast<DProcess*>(aPtr);
       
   908 
       
   909 		CloseClient(pP);
       
   910 		}
       
   911 	__KTRACE_OPT(KMMU, Kern::Printf("<DMemModelAlignedShPool::Close(0x%08x)", aPtr));
       
   912 	return DShPool::Close(aPtr);
       
   913 	}
       
   914 
       
   915 TInt DMemModelAlignedShPool::CreateInitialBuffers()
       
   916 	{
       
   917 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::CreateInitialBuffers"));
       
   918 
       
   919 	iInitialBuffersArray = reinterpret_cast<DMemModelAlignedShBuf*>(Kern::Alloc(iInitialBuffers * sizeof(DMemModelAlignedShBuf)));
       
   920 
       
   921 	if (iInitialBuffersArray == NULL)
       
   922 		return KErrNoMemory;
       
   923 
       
   924 	for (TUint i = 0; i < iInitialBuffers; i++)
       
   925 		{
       
   926 		// always use kernel linear address in DShBuf
       
   927 		DMemModelAlignedShBuf *buf = new (&iInitialBuffersArray[i]) DMemModelAlignedShBuf(this);
       
   928 		TInt r = buf->Construct();
       
   929 
       
   930 		if (r == KErrNone)
       
   931 			{
       
   932 			iFreeList.Add(&buf->iObjLink);
       
   933 			}
       
   934 		else
       
   935 			{
       
   936 			iInitialBuffers = i;
       
   937 			return KErrNoMemory;
       
   938 			}
       
   939 		}
       
   940 
       
   941 	iFreeBuffers  = iInitialBuffers;
       
   942 	iTotalBuffers = iInitialBuffers;
       
   943 	return KErrNone;
       
   944 	}
       
   945 
       
   946 
       
   947 TInt DMemModelAlignedShPool::GrowPool()
       
   948 	{
       
   949 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::GrowPool()"));
       
   950 	TInt r = KErrNone;
       
   951 	SDblQue temp;
       
   952 
       
   953 	Kern::MutexWait(*iProcessLock);
       
   954 
       
   955 	TUint32 headroom = iMaxBuffers - iTotalBuffers;
       
   956 
       
   957 	// How many buffers to grow by?
       
   958 	TUint32 grow = mult_fx248(iTotalBuffers, iGrowByRatio);
       
   959 	if (grow == 0)			// Handle round-to-zero
       
   960 		grow = 1;
       
   961 	if (grow > headroom)
       
   962 		grow = headroom;
       
   963 
       
   964 	TUint i;
       
   965 	for (i = 0; i < grow; ++i)
       
   966 		{
       
   967 		DMemModelAlignedShBuf *buf = new DMemModelAlignedShBuf(this);
       
   968 
       
   969 		if (buf == NULL)
       
   970 			{
       
   971 			r = KErrNoMemory;
       
   972 			break;
       
   973 			}
       
   974 
       
   975 		TInt r = buf->Construct();
       
   976 
       
   977 		if (r != KErrNone)
       
   978 			{
       
   979 			buf->DObject::Close(NULL);
       
   980 			break;
       
   981 			}
       
   982 
       
   983 		temp.Add(&buf->iObjLink);
       
   984 		}
       
   985 
       
   986 	r = UpdateMappingsAndReservedHandles(i);
       
   987 
       
   988 	if (r == KErrNone)
       
   989 		{
       
   990 		LockPool();
       
   991 		iFreeList.MoveFrom(&temp);
       
   992 		iFreeBuffers += i;
       
   993 		iTotalBuffers += i;
       
   994 		UnlockPool();
       
   995 		}
       
   996 	else
       
   997 		{
       
   998 		// couldn't create either the mappings or reserve handles so have no choice but to
       
   999 		// delete the buffers
       
  1000 		SDblQueLink *pLink;
       
  1001 		while ((pLink = temp.GetFirst()) != NULL)
       
  1002 			{
       
  1003 			DShBuf* buf = _LOFF(pLink, DShBuf, iObjLink);
       
  1004 			buf->DObject::Close(NULL);
       
  1005 			}
       
  1006 		}
       
  1007 
       
  1008 	CalculateGrowShrinkTriggers();
       
  1009 
       
  1010 	Kern::MutexSignal(*iProcessLock);
       
  1011 
       
  1012 	__KTRACE_OPT(KMMU, Kern::Printf("<DMemModelAlignedShPool::GrowPool()"));
       
  1013 	return r;
       
  1014 	}
       
  1015 
       
  1016 TInt DMemModelAlignedShPool::ShrinkPool()
       
  1017 	{
       
  1018 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::ShrinkPool()"))
       
  1019 
       
  1020 	Kern::MutexWait(*iProcessLock);
       
  1021 
       
  1022 	TUint32 grownBy = iTotalBuffers - iInitialBuffers;
       
  1023 
       
  1024 	// How many buffers to shrink by?
       
  1025 	TUint32 shrink = mult_fx248(iTotalBuffers, iShrinkByRatio);
       
  1026 	if (shrink == 0)		// Handle round-to-zero
       
  1027 		shrink = 1;
       
  1028 	if (shrink > grownBy)
       
  1029 		shrink = grownBy;
       
  1030 	if (shrink > iFreeBuffers)
       
  1031 		shrink = iFreeBuffers;
       
  1032 
       
  1033 	// work backwards as the grown buffers should be at the back
       
  1034 	TUint i;
       
  1035 	for (i = 0; i < shrink; i++)
       
  1036 		{
       
  1037 		LockPool();
       
  1038 
       
  1039 		if (iFreeList.IsEmpty())
       
  1040 			{
       
  1041 			UnlockPool();
       
  1042 			break;
       
  1043 			}
       
  1044 
       
  1045 		DShBuf* buf = _LOFF(iFreeList.Last(), DShBuf, iObjLink);
       
  1046 
       
  1047 		// can't delete initial buffers
       
  1048 		if (buf >= iInitialBuffersArray && buf < (iInitialBuffersArray + iInitialBuffers))
       
  1049 			{
       
  1050 			UnlockPool();
       
  1051 			break;
       
  1052 			}
       
  1053 
       
  1054 		buf->iObjLink.Deque();
       
  1055 		--iFreeBuffers;
       
  1056 		--iTotalBuffers;
       
  1057 		UnlockPool();
       
  1058 		buf->DObject::Close(NULL);
       
  1059 		}
       
  1060 
       
  1061 	TInt r = UpdateMappingsAndReservedHandles(-i);
       
  1062 
       
  1063 	// If we couldn't shrink the pool by this many buffers, wait until we Free() another
       
  1064 	// buffer before trying to shrink again.
       
  1065 	if (i < shrink)
       
  1066 		iPoolFlags |= EShPoolSuppressShrink;
       
  1067 
       
  1068 	CalculateGrowShrinkTriggers();
       
  1069 
       
  1070 	Kern::MutexSignal(*iProcessLock);
       
  1071 
       
  1072 	__KTRACE_OPT(KMMU, Kern::Printf("<DMemModelAlignedShPool::ShrinkPool()"));
       
  1073 	return r;
       
  1074 	}
       
  1075 
       
  1076 // Kernel side API
       
  1077 TInt DMemModelAlignedShPool::Alloc(DShBuf*& aShBuf)
       
  1078 	{
       
  1079 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::Alloc (DShBuf)"));
       
  1080 
       
  1081 	TInt r = KErrNoMemory;
       
  1082 	aShBuf = NULL;
       
  1083 
       
  1084 	LockPool();
       
  1085 
       
  1086 	if (!iFreeList.IsEmpty())
       
  1087 		{
       
  1088 		aShBuf = _LOFF(iFreeList.GetFirst(), DShBuf, iObjLink);
       
  1089 
       
  1090 #ifdef _DEBUG
       
  1091 		iAllocated.Add(&aShBuf->iObjLink);
       
  1092 		iAllocatedBuffers++;
       
  1093 #endif
       
  1094 		--iFreeBuffers;
       
  1095 		Open(); // increment pool reference count
       
  1096 		r = KErrNone;
       
  1097 		}
       
  1098 
       
  1099 	UnlockPool();
       
  1100 
       
  1101 	if (HaveWorkToDo())
       
  1102 		KickManagementDfc();
       
  1103 
       
  1104 	__KTRACE_OPT(KMMU, Kern::Printf("<DMemModelAlignedShPool::Alloc return buf = 0x%08x", aShBuf));
       
  1105 	return r;
       
  1106 	}
       
  1107 
       
  1108 DMemModelNonAlignedShBuf::DMemModelNonAlignedShBuf(DShPool* aPool, TLinAddr aRelAddr) : DShBuf(aPool, aRelAddr)
       
  1109 	{
       
  1110 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShBuf::DMemModelNonAlignedShBuf()"));
       
  1111 	}
       
  1112 
       
  1113 DMemModelNonAlignedShBuf::~DMemModelNonAlignedShBuf()
       
  1114 	{
       
  1115 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShBuf::~DMemModelNonAlignedShBuf()"));
       
  1116 	}
       
  1117 
       
  1118 TInt DMemModelNonAlignedShBuf::Close(TAny* aPtr)
       
  1119 	{
       
  1120 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShBuf::Close(0x%08x)", aPtr));
       
  1121 
       
  1122 	if (aPtr)
       
  1123 		{
       
  1124 		DProcess* pP = reinterpret_cast<DProcess*>(aPtr);
       
  1125 
       
  1126 		// there no per buffer resources for kernel clients for non-aligned buffers
       
  1127 		if (pP != K::TheKernelProcess)
       
  1128 		    iPool->CloseClient(pP);
       
  1129 		}
       
  1130 
       
  1131 	return DShBuf::Close(aPtr);
       
  1132 	}
       
  1133 
       
  1134 TInt DMemModelNonAlignedShBuf::AddToProcess(DProcess* aProcess, TUint /* aAttr */)
       
  1135 	{
       
  1136 	__KTRACE_OPT(KMMU, Kern::Printf("Adding DMemModelShBuf %O to process %O", this, aProcess));
       
  1137 	TUint flags;
       
  1138 
       
  1139 	return iPool->OpenClient(aProcess, flags);
       
  1140 	}
       
  1141 
       
  1142 
       
  1143 TUint8* DMemModelNonAlignedShBuf::Base(DProcess* aProcess)
       
  1144 	{
       
  1145 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShBuf::Base(0x%x)", aProcess));
       
  1146 
       
  1147 	TUint8* base = reinterpret_cast<DMemModelNonAlignedShPool*>(iPool)->Base(aProcess) + (TUint)iRelAddress;
       
  1148 
       
  1149 	return base;
       
  1150 	}
       
  1151 
       
  1152 TUint8* DMemModelNonAlignedShBuf::Base()
       
  1153 	{
       
  1154 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShBuf::Base()"));
       
  1155 
       
  1156 	TUint8* base = reinterpret_cast<DMemModelNonAlignedShPool*>(iPool)->Base();
       
  1157 
       
  1158 	return base ? base + iRelAddress : NULL;
       
  1159 	}
       
  1160 
       
  1161 TInt DMemModelNonAlignedShBuf::Map(TUint /* aMapAttr */, DProcess* /* aProcess */, TLinAddr& /* aBase */)
       
  1162 	{
       
  1163 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShBuf::Map()"));
       
  1164 
       
  1165 	return KErrNotSupported;
       
  1166 	}
       
  1167 
       
  1168 TInt DMemModelNonAlignedShBuf::UnMap(DProcess* /* aProcess */)
       
  1169 	{
       
  1170 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShBuf::UnMap()"));
       
  1171 
       
  1172 	return KErrNotSupported;
       
  1173 	}
       
  1174 
       
  1175 TInt DMemModelNonAlignedShBuf::Pin(TPhysicalPinObject* aPinObject, TBool aReadOnly, TPhysAddr& aAddress, TPhysAddr* aPages, TUint32& aMapAttr, TUint& aColour)
       
  1176 	{
       
  1177 	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"DMemModelNonAlignedShBuf::Pin");
       
  1178 
       
  1179 	DMemModelNonAlignedShPool* pool = reinterpret_cast<DMemModelNonAlignedShPool*>(iPool);
       
  1180 
       
  1181 	NKern::ThreadEnterCS();
       
  1182 
       
  1183 	TInt startPage = iRelAddress >> KPageShift;
       
  1184 	TInt lastPage = MM::RoundToPageCount(iRelAddress + Size());
       
  1185 
       
  1186 	TInt pages = lastPage - startPage;
       
  1187 
       
  1188 	if (!pages) pages++;
       
  1189 
       
  1190 	TInt r = MM::PinPhysicalMemory(pool->iMemoryObject, (DPhysicalPinMapping*)aPinObject,
       
  1191 									startPage, pages, aReadOnly, aAddress, aPages, aMapAttr, aColour);
       
  1192 
       
  1193 	// adjust physical address to start of the buffer
       
  1194 	if (r == KErrNone)
       
  1195 		{
       
  1196 		aAddress += (iRelAddress - (startPage << KPageShift));
       
  1197 		}
       
  1198 	NKern::ThreadLeaveCS();
       
  1199 	return r;
       
  1200 	}
       
  1201 
       
  1202 DMemModelNonAlignedShPool::DMemModelNonAlignedShPool() : DMemModelShPool()
       
  1203 	{
       
  1204 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::DMemModelNonAlignedShPool"));
       
  1205 	}
       
  1206 
       
  1207 DMemModelNonAlignedShPool::~DMemModelNonAlignedShPool()
       
  1208 	{
       
  1209 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::~DMemModelNonAlignedShPool"));
       
  1210 
       
  1211 	MM::MemoryDestroy(iMemoryObject);
       
  1212 
       
  1213 	delete iPagesMap;
       
  1214 	delete iBufMap;
       
  1215 	}
       
  1216 
       
  1217 TInt DMemModelNonAlignedShPool::DoCreate(TShPoolCreateInfo& aInfo)
       
  1218 	{
       
  1219 	__KTRACE_OPT(KMMU, Kern::Printf("DMemModelNonAlignedShPool::DoCreate(%d, %d, %d)", aInfo.iInfo.iMaxBufs, iBufGap, iBufSize));
       
  1220 
       
  1221 	TInt r;
       
  1222 	TUint64 maxSize64 = static_cast<TUint64>(aInfo.iInfo.iMaxBufs) * static_cast<TUint64>(iBufGap);
       
  1223 
       
  1224 	if (maxSize64 > static_cast<TUint64>(KMaxTInt) || maxSize64 <= static_cast<TUint64>(0))
       
  1225 		return KErrArgument;
       
  1226 
       
  1227 	TInt maxPages = MM::RoundToPageCount(static_cast<TInt>(maxSize64));
       
  1228 
       
  1229 	iBufMap = TBitMapAllocator::New(aInfo.iInfo.iMaxBufs, (TBool)ETrue);
       
  1230 	if (iBufMap == NULL)
       
  1231 		return KErrNoMemory;
       
  1232 
       
  1233 	iPagesMap = TBitMapAllocator::New(maxPages, (TBool)ETrue);
       
  1234 	if (iPagesMap == NULL)
       
  1235 		return KErrNoMemory;
       
  1236 
       
  1237 	// Memory attributes
       
  1238 	TMemoryAttributes attr = EMemoryAttributeStandard;
       
  1239 
       
  1240 	// Memory type
       
  1241 	TMemoryObjectType memoryType = (iPoolFlags & EShPoolPhysicalMemoryPool) ? EMemoryObjectHardware : EMemoryObjectUnpaged;
       
  1242 
       
  1243 	// Memory flags
       
  1244 	TMemoryCreateFlags memoryFlags = EMemoryCreateDefault;	// Don't leave previous contents of memory
       
  1245 
       
  1246 	// Now create the memory object
       
  1247 	r = MM::MemoryNew(iMemoryObject, memoryType, maxPages, memoryFlags, attr);
       
  1248 	if (r != KErrNone)
       
  1249 		return r;
       
  1250 
       
  1251 	// Make sure we give the caller the number of buffers they were expecting
       
  1252 	iCommittedPages = MM::RoundToPageCount(iInitialBuffers * iBufGap);
       
  1253 
       
  1254 	if (iPoolFlags & EShPoolPhysicalMemoryPool)
       
  1255 		{
       
  1256 		__KTRACE_OPT(KMMU, Kern::Printf("DMemModelNonAlignedShPool::DoCreate(iCommittedPages = 0x%08x, aInfo.iPhysAddr.iPhysAddrList = 0x%08x )", iCommittedPages, aInfo.iPhysAddr.iPhysAddrList));
       
  1257 		if (iPoolFlags & EShPoolContiguous)
       
  1258 			{
       
  1259 			r = MM::MemoryAddContiguous(iMemoryObject, 0, iCommittedPages, aInfo.iPhysAddr.iPhysAddr);
       
  1260 			}
       
  1261 		else
       
  1262 			{
       
  1263 			r = MM::MemoryAddPages(iMemoryObject, 0, iCommittedPages, aInfo.iPhysAddr.iPhysAddrList);
       
  1264 			}
       
  1265 
       
  1266 		iMaxPages = iCommittedPages;
       
  1267 		}
       
  1268 	else
       
  1269 		{
       
  1270 		__KTRACE_OPT(KMMU, Kern::Printf("DMemModelNonAlignedShPool::DoCreate(iCommittedPages = %d, contig = %d)", iCommittedPages, iPoolFlags & EShPoolContiguous));
       
  1271 
       
  1272 		if (iPoolFlags & EShPoolContiguous)
       
  1273 			{
       
  1274 			TPhysAddr paddr;
       
  1275 			r = MM::MemoryAllocContiguous(iMemoryObject, 0, iCommittedPages, 0, paddr);
       
  1276 			}
       
  1277 		else
       
  1278 			{
       
  1279 			r = MM::MemoryAlloc(iMemoryObject, 0, iCommittedPages);
       
  1280 			}
       
  1281 
       
  1282 		iMaxPages = maxPages;
       
  1283 		}
       
  1284 
       
  1285 	iPagesMap->Alloc(0, iCommittedPages);
       
  1286 	
       
  1287 	return r;
       
  1288 	}
       
  1289 
       
  1290 TUint8* DMemModelNonAlignedShPool::Base(DProcess* aProcess)
       
  1291 	{
       
  1292 	TUint8 *base = 0;
       
  1293 
       
  1294 	LockPool();
       
  1295 	DMemModelNonAlignedShPoolClient* client = reinterpret_cast<DMemModelNonAlignedShPoolClient*>(iClientMap->Find(reinterpret_cast<TUint>(aProcess)));
       
  1296 
       
  1297 	__NK_ASSERT_DEBUG(client); // ASSERT because pool must be already opened in the clients address space
       
  1298 	__NK_ASSERT_DEBUG(client->iMapping); // ASSERT because non-aligned buffers are mapped by default in user space
       
  1299 
       
  1300 	base = reinterpret_cast<TUint8*>(MM::MappingBase(client->iMapping));
       
  1301 
       
  1302 	UnlockPool();
       
  1303 
       
  1304 	return base;
       
  1305 	}
       
  1306 
       
  1307 TInt DMemModelNonAlignedShPool::CreateInitialBuffers()
       
  1308 	{
       
  1309 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::CreateInitialBuffers"));
       
  1310 
       
  1311 	iInitialBuffersArray = reinterpret_cast<DMemModelNonAlignedShBuf*>(Kern::Alloc(iInitialBuffers * sizeof(DMemModelNonAlignedShBuf)));
       
  1312 
       
  1313 	if (iInitialBuffersArray == NULL)
       
  1314 		return KErrNoMemory;
       
  1315 
       
  1316 	TLinAddr offset = 0;
       
  1317 	for (TUint i = 0; i < iInitialBuffers; i++)
       
  1318 		{
       
  1319 		DMemModelNonAlignedShBuf *buf = new (&iInitialBuffersArray[i]) DMemModelNonAlignedShBuf(this, offset);
       
  1320 		TInt r = buf->Construct();
       
  1321 
       
  1322 		if (r == KErrNone)
       
  1323 			{
       
  1324 			iFreeList.Add(&buf->iObjLink);
       
  1325 			}
       
  1326 		else
       
  1327 			{
       
  1328 			iInitialBuffers = i;
       
  1329 			return KErrNoMemory;
       
  1330 			}
       
  1331 
       
  1332 		offset += iBufGap;
       
  1333 		}
       
  1334 
       
  1335 	iFreeBuffers  = iInitialBuffers;
       
  1336 	iTotalBuffers = iInitialBuffers;
       
  1337 	iBufMap->Alloc(0, iInitialBuffers);
       
  1338 
       
  1339 	return KErrNone;
       
  1340 	}
       
  1341 
       
  1342 TInt DMemModelNonAlignedShPool::AddToProcess(DProcess* aProcess, TUint aAttr)
       
  1343 	{
       
  1344 	// Must be in critical section so we don't leak os asid references.
       
  1345 	__ASSERT_CRITICAL;
       
  1346 	__KTRACE_OPT(KMMU, Kern::Printf("Adding DMemModelShPool %O to process %O", this, aProcess));
       
  1347 
       
  1348 	DMemoryMapping* mapping = NULL;
       
  1349 
       
  1350 	TBool write = (TBool)EFalse;
       
  1351 
       
  1352 	// User = ETrue, ReadOnlyWrite = ETrue, Execute = EFalse
       
  1353 	if (aAttr & EShPoolWriteable)
       
  1354 		write = (TBool)ETrue;
       
  1355 
       
  1356 	TMappingPermissions perm = MM::MappingPermissions(ETrue,	// user
       
  1357 													  write,	// writeable
       
  1358 													  EFalse);	// execute
       
  1359 
       
  1360 	TMappingCreateFlags mappingFlags = EMappingCreateDefault;
       
  1361 
       
  1362 	DMemModelProcess* pP = reinterpret_cast<DMemModelProcess*>(aProcess);
       
  1363 
       
  1364 	Kern::MutexWait(*iProcessLock);
       
  1365 	TInt r = KErrNone;
       
  1366 
       
  1367 	LockPool();
       
  1368 	DMemModelNonAlignedShPoolClient* client = reinterpret_cast<DMemModelNonAlignedShPoolClient*>(iClientMap->Find(reinterpret_cast<TUint>(aProcess)));
       
  1369 	UnlockPool();
       
  1370 
       
  1371 	if (!client)
       
  1372 		{
       
  1373 		client = new DMemModelNonAlignedShPoolClient;
       
  1374 
       
  1375 		if (client)
       
  1376 			{
       
  1377 			// map non aligned pools in userside processes by default
       
  1378 			if (aAttr & EShPoolAutoMapBuf || pP != K::TheKernelProcess)
       
  1379 				{
       
  1380 				// Open a reference on the os asid so it doesn't get freed and reused.
       
  1381 				TInt osAsid = pP->TryOpenOsAsid();
       
  1382 				if (osAsid < 0)
       
  1383 					{// The process freed its os asid so can't create a new mapping.
       
  1384 					r = KErrDied;
       
  1385 					}
       
  1386 				else
       
  1387 					{
       
  1388 					r = MM::MappingNew(mapping, iMemoryObject, perm, osAsid, mappingFlags);
       
  1389 					// Close the reference as the mapping will be destroyed if the process dies.
       
  1390 					pP->CloseOsAsid();
       
  1391 					}
       
  1392 
       
  1393 				if ((r == KErrNone) && (pP == K::TheKernelProcess))
       
  1394 					{
       
  1395 					iBaseAddress = MM::MappingBase(mapping);
       
  1396 					}
       
  1397 				}
       
  1398 
       
  1399 			if (r == KErrNone)
       
  1400 				{
       
  1401 				client->iMapping = mapping;
       
  1402 				client->iFlags = aAttr;
       
  1403 				r = iClientMap->Add(reinterpret_cast<TUint>(aProcess), client);
       
  1404 
       
  1405 				if (r == KErrNone)
       
  1406 					{
       
  1407 					if (pP != K::TheKernelProcess)
       
  1408 						{
       
  1409 						r = aProcess->iHandles.Reserve(iTotalBuffers);
       
  1410 
       
  1411 						if (r != KErrNone)
       
  1412 							{
       
  1413 							iClientMap->Remove(reinterpret_cast<TUint>(aProcess));
       
  1414 							}
       
  1415 						}
       
  1416 					}
       
  1417 
       
  1418 				if (r != KErrNone)
       
  1419 					{
       
  1420 					delete client;
       
  1421 					MM::MappingDestroy(mapping);
       
  1422 					}
       
  1423 				}
       
  1424 			else
       
  1425 				{
       
  1426 				delete client;
       
  1427 				}
       
  1428 			}
       
  1429 		else
       
  1430 			{
       
  1431 			r = KErrNoMemory;
       
  1432 			}
       
  1433 		}
       
  1434 	else
       
  1435 		{
       
  1436 		LockPool();
       
  1437 		client->iAccessCount++;
       
  1438 		UnlockPool();
       
  1439 		}
       
  1440 
       
  1441 	Kern::MutexSignal(*iProcessLock);
       
  1442 
       
  1443 	return r;
       
  1444 	}
       
  1445 
       
  1446 TInt DMemModelNonAlignedShPool::DeleteInitialBuffers()
       
  1447 	{
       
  1448 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::DeleteInitialBuffers"));
       
  1449 
       
  1450 	if (iInitialBuffersArray != NULL)
       
  1451 		{
       
  1452 		for (TUint i = 0; i < iInitialBuffers; i++)
       
  1453 			{
       
  1454 			iInitialBuffersArray[i].iObjLink.Deque(); // remove from free list
       
  1455 			iInitialBuffersArray[i].Dec();
       
  1456 			iInitialBuffersArray[i].~DMemModelNonAlignedShBuf();
       
  1457 			}
       
  1458 		}
       
  1459 
       
  1460 	Kern::Free(iInitialBuffersArray);
       
  1461 	iInitialBuffersArray = NULL;
       
  1462 
       
  1463 	return KErrNone;
       
  1464 	}
       
  1465 
       
  1466 TInt DMemModelNonAlignedShPool::DestroyAllMappingsAndReservedHandles(DProcess* aProcess)
       
  1467 	{
       
  1468 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::DestroyAllMappingsAndReservedHandles(0x%08x)", aProcess));
       
  1469 
       
  1470 	TInt r = KErrNone;
       
  1471 	Kern::MutexWait(*iProcessLock);
       
  1472 	DMemModelNonAlignedShPoolClient* client = reinterpret_cast<DMemModelNonAlignedShPoolClient*>(iClientMap->Remove(reinterpret_cast<TUint>(aProcess)));
       
  1473 
       
  1474 	__NK_ASSERT_DEBUG(client);
       
  1475 	__NK_ASSERT_DEBUG(client->iAccessCount == 0);
       
  1476 
       
  1477 	if (client->iMapping)
       
  1478 		{
       
  1479 		MM::MappingDestroy(client->iMapping);
       
  1480 		}
       
  1481 	delete client;
       
  1482 
       
  1483 	if (aProcess != K::TheKernelProcess)
       
  1484 		{
       
  1485 		// Remove reserved handles
       
  1486 		r = aProcess->iHandles.Reserve(-(iTotalBuffers));
       
  1487 		}
       
  1488 	else
       
  1489 		{
       
  1490 		iBaseAddress = 0;
       
  1491 		}
       
  1492 
       
  1493 	Kern::MutexSignal(*iProcessLock);
       
  1494 
       
  1495 	__KTRACE_OPT(KMMU, Kern::Printf("<DMemModelNonAlignedShPool::DestroyAllMappingsAndReservedHandles(0x%08x)", aProcess));
       
  1496 
       
  1497 	return r;
       
  1498 	}
       
  1499 
       
  1500 
       
  1501 TInt DMemModelNonAlignedShPool::Close(TAny* aPtr)
       
  1502 	{
       
  1503 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::Close(0x%08x)", aPtr));
       
  1504 
       
  1505 	if (aPtr)
       
  1506 		{
       
  1507 		DProcess* pP = reinterpret_cast<DProcess*>(aPtr);
       
  1508 
       
  1509 		CloseClient(pP);
       
  1510 		}
       
  1511 
       
  1512 	return DShPool::Close(aPtr);
       
  1513 	}
       
  1514 
       
  1515 void DMemModelNonAlignedShPool::FreeBufferPages(TUint aOffset)
       
  1516 	{
       
  1517 	TLinAddr firstByte = aOffset;	// offset of first byte in buffer
       
  1518 	TLinAddr lastByte = firstByte+iBufGap-1;	// offset of last byte in buffer
       
  1519 	TUint firstPage = firstByte>>KPageShift;	// index of first page containing part of the buffer
       
  1520 	TUint lastPage = lastByte>>KPageShift;		// index of last page containing part of the buffer
       
  1521 
       
  1522 	TUint firstBuffer = (firstByte&~KPageMask)/iBufGap; // index of first buffer which lies in firstPage
       
  1523 	TUint lastBuffer = (lastByte|KPageMask)/iBufGap;    // index of last buffer which lies in lastPage
       
  1524 	TUint thisBuffer = firstByte/iBufGap;				// index of the buffer to be freed
       
  1525 
       
  1526 	// Ensure lastBuffer is within bounds (there may be room in the last
       
  1527 	// page for more buffers than we have allocated).
       
  1528 	if (lastBuffer >= iMaxBuffers)
       
  1529 		lastBuffer = iMaxBuffers-1;
       
  1530 
       
  1531 	if(firstBuffer!=thisBuffer && iBufMap->NotFree(firstBuffer,thisBuffer-firstBuffer))
       
  1532 		{
       
  1533 		// first page has other allocated buffers in it,
       
  1534 		// so we can't free it and must move on to next one...
       
  1535 		if (firstPage >= lastPage)
       
  1536 			return;
       
  1537 		++firstPage;
       
  1538 		}
       
  1539 
       
  1540 	if(lastBuffer!=thisBuffer && iBufMap->NotFree(thisBuffer+1,lastBuffer-thisBuffer))
       
  1541 		{
       
  1542 		// last page has other allocated buffers in it,
       
  1543 		// so we can't free it and must step back to previous one...
       
  1544 		if (lastPage <= firstPage)
       
  1545 			return;
       
  1546 		--lastPage;
       
  1547 		}
       
  1548 
       
  1549 	if(firstPage<=lastPage)
       
  1550 		{
       
  1551 		// we can free pages firstPage trough to lastPage...
       
  1552 		TUint numPages = lastPage-firstPage+1;
       
  1553 		iPagesMap->SelectiveFree(firstPage,numPages);
       
  1554 		MM::MemoryLock(iMemoryObject);
       
  1555 		MM::MemoryFree(iMemoryObject, firstPage, numPages);
       
  1556 		MM::MemoryUnlock(iMemoryObject);
       
  1557 		iCommittedPages -= numPages;
       
  1558 		}
       
  1559 	}
       
  1560 
       
  1561 TInt DMemModelNonAlignedShPool::GrowPool()
       
  1562 	{
       
  1563 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::GrowPool()"));
       
  1564 
       
  1565 	// Don't do anything with physical memory pools
       
  1566 	if (iPoolFlags & EShPoolPhysicalMemoryPool)
       
  1567 		return KErrNone;
       
  1568 
       
  1569 	Kern::MutexWait(*iProcessLock);
       
  1570 
       
  1571 	TUint32 headroom = iMaxBuffers - iTotalBuffers;
       
  1572 
       
  1573 	// How many buffers to grow by?
       
  1574 	TUint32 grow = mult_fx248(iTotalBuffers, iGrowByRatio);
       
  1575 	if (grow == 0)			// Handle round-to-zero
       
  1576 		grow = 1;
       
  1577 	if (grow > headroom)
       
  1578 		grow = headroom;
       
  1579 
       
  1580 	TInt r = KErrNone;
       
  1581 	SDblQue temp;
       
  1582 
       
  1583 	TUint i;
       
  1584 	for (i = 0; i < grow; ++i)
       
  1585 		{
       
  1586 		TInt offset = iBufMap->Alloc();
       
  1587 
       
  1588 		if (offset < 0)
       
  1589 			{
       
  1590 			r = KErrNoMemory;
       
  1591 			break;
       
  1592 			}
       
  1593 
       
  1594 		offset *= iBufGap;
       
  1595 
       
  1596 		TInt lastPage = (offset + iBufSize - 1) >> KPageShift;
       
  1597 
       
  1598 		// Allocate one page at a time.
       
  1599 		for (TInt page = offset >> KPageShift; page <= lastPage; ++page)
       
  1600 			{
       
  1601 			// Is the page allocated?
       
  1602 			if (iPagesMap->NotAllocated(page, 1))
       
  1603 				{
       
  1604 				MM::MemoryLock(iMemoryObject);
       
  1605 				r = MM::MemoryAlloc(iMemoryObject, page, 1);
       
  1606 				MM::MemoryUnlock(iMemoryObject);
       
  1607 
       
  1608 				if (r != KErrNone)
       
  1609 					{
       
  1610 					break;
       
  1611 					}
       
  1612 
       
  1613 				++iCommittedPages;
       
  1614 				iPagesMap->Alloc(page, 1);
       
  1615 				}
       
  1616 			}
       
  1617 
       
  1618 		if (r != KErrNone)
       
  1619 			{
       
  1620 			iBufMap->Free(offset / iBufGap);
       
  1621 			FreeBufferPages(offset);
       
  1622 			break;
       
  1623 			}
       
  1624 
       
  1625 		DMemModelNonAlignedShBuf *buf = new DMemModelNonAlignedShBuf(this, offset);
       
  1626 
       
  1627 		if (buf == NULL)
       
  1628 			{
       
  1629 			iBufMap->Free(offset / iBufGap);
       
  1630 			FreeBufferPages(offset);
       
  1631 			r = KErrNoMemory;
       
  1632 			break;
       
  1633 			}
       
  1634 
       
  1635 		r = buf->Construct();
       
  1636 
       
  1637 		if (r != KErrNone)
       
  1638 			{
       
  1639 			iBufMap->Free(offset / iBufGap);
       
  1640 			FreeBufferPages(offset);
       
  1641 			buf->DObject::Close(NULL);
       
  1642 			break;
       
  1643 			}
       
  1644 
       
  1645 		temp.Add(&buf->iObjLink);
       
  1646 		}
       
  1647 
       
  1648 	r = UpdateReservedHandles(i);
       
  1649 
       
  1650 	if (r == KErrNone)
       
  1651 		{
       
  1652 		LockPool();
       
  1653 		iFreeList.MoveFrom(&temp);
       
  1654 		iFreeBuffers += i;
       
  1655 		iTotalBuffers += i;
       
  1656 		UnlockPool();
       
  1657 		}
       
  1658 	else
       
  1659 		{
       
  1660 		// couldn't reserve handles so have no choice but to
       
  1661 		// delete the buffers
       
  1662 		__KTRACE_OPT(KMMU, Kern::Printf("GrowPool failed with %d, deleting buffers", r));
       
  1663 		SDblQueLink *pLink;
       
  1664 		while ((pLink = temp.GetFirst()) != NULL)
       
  1665 			{
       
  1666 			DShBuf* buf = _LOFF(pLink, DShBuf, iObjLink);
       
  1667 			TLinAddr offset = buf->iRelAddress;
       
  1668 			iBufMap->Free(offset / iBufGap);
       
  1669 			FreeBufferPages(offset);
       
  1670 			buf->DObject::Close(NULL);
       
  1671 			}
       
  1672 		__KTRACE_OPT(KMMU, Kern::Printf("Buffers deleted"));
       
  1673 		}
       
  1674 
       
  1675 	CalculateGrowShrinkTriggers();
       
  1676 
       
  1677 	Kern::MutexSignal(*iProcessLock);
       
  1678 
       
  1679 	__KTRACE_OPT(KMMU, Kern::Printf("<DMemModelNonAlignedShPool::GrowPool()"));
       
  1680 	return r;
       
  1681 	}
       
  1682 
       
  1683 TInt DMemModelNonAlignedShPool::ShrinkPool()
       
  1684 	{
       
  1685 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::ShrinkPool()"));
       
  1686 
       
  1687 	// Don't do anything with physical memory pools
       
  1688 	if (iPoolFlags & EShPoolPhysicalMemoryPool)
       
  1689 		return KErrNone;
       
  1690 
       
  1691 	Kern::MutexWait(*iProcessLock);
       
  1692 
       
  1693 	TUint32 grownBy = iTotalBuffers - iInitialBuffers;
       
  1694 
       
  1695 	// How many buffers to shrink by?
       
  1696 	TUint32 shrink = mult_fx248(iTotalBuffers, iShrinkByRatio);
       
  1697 	if (shrink == 0)		// Handle round-to-zero
       
  1698 		shrink = 1;
       
  1699 	if (shrink > grownBy)
       
  1700 		shrink = grownBy;
       
  1701 	if (shrink > iFreeBuffers)
       
  1702 		shrink = iFreeBuffers;
       
  1703 
       
  1704 	TUint i;
       
  1705 	for (i = 0; i < shrink; ++i)
       
  1706 		{
       
  1707 		LockPool();
       
  1708 
       
  1709 		if (iFreeList.IsEmpty())
       
  1710 			{
       
  1711 			UnlockPool();
       
  1712 			break;
       
  1713 			}
       
  1714 
       
  1715 		// work from the back of the queue
       
  1716 		SDblQueLink *pLink = iFreeList.Last();
       
  1717 
       
  1718 		DShBuf* pBuf = _LOFF(pLink, DShBuf, iObjLink);
       
  1719 
       
  1720 		if (pBuf >= iInitialBuffersArray && pBuf < (iInitialBuffersArray + iInitialBuffers))
       
  1721 			{
       
  1722 			UnlockPool();
       
  1723 			break;
       
  1724 			}
       
  1725 
       
  1726 		--iFreeBuffers;
       
  1727 		--iTotalBuffers;
       
  1728 		pLink->Deque();
       
  1729 		UnlockPool();
       
  1730 
       
  1731 		TLinAddr offset = pBuf->iRelAddress;
       
  1732 		iBufMap->Free(offset / iBufGap);
       
  1733 		FreeBufferPages(offset);
       
  1734 
       
  1735 		pBuf->DObject::Close(NULL);
       
  1736 		}
       
  1737 
       
  1738 	UpdateReservedHandles(-(TInt)i);
       
  1739 
       
  1740 	// If we couldn't shrink the pool by this many buffers, wait until we Free() another
       
  1741 	// buffer before trying to shrink again.
       
  1742 	if (i < shrink)
       
  1743 		iPoolFlags |= EShPoolSuppressShrink;
       
  1744 
       
  1745 	CalculateGrowShrinkTriggers();
       
  1746 
       
  1747 	Kern::MutexSignal(*iProcessLock);
       
  1748 
       
  1749 	__KTRACE_OPT(KMMU, Kern::Printf("<DMemModelNonAlignedShPool::ShrinkPool()"));
       
  1750 
       
  1751 	return KErrNone;
       
  1752 	}
       
  1753 
       
  1754 TInt DMemModelNonAlignedShPool::UpdateFreeList()
       
  1755 	{
       
  1756 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::UpdateFreeList"));
       
  1757 
       
  1758 	SDblQue temp;
       
  1759 
       
  1760 	LockPool();
       
  1761 	while(!iAltFreeList.IsEmpty())
       
  1762 		{
       
  1763 		// sort a temporary list of 'n' object with the lowest index first
       
  1764 		for (TInt n = 0; n < 8 && !iAltFreeList.IsEmpty(); ++n)
       
  1765 			{
       
  1766 			// bit of an assumption, lets assume that the lower indexes will be allocated and freed first
       
  1767 			// and therefore will be nearer the front of the list
       
  1768 			DShBuf* buf = _LOFF(iAltFreeList.GetFirst(), DShBuf, iObjLink);
       
  1769 
       
  1770 			SDblQueLink* anchor = reinterpret_cast<SDblQueLink*>(&temp);
       
  1771 			SDblQueLink* pLink = temp.Last();
       
  1772 
       
  1773 			while (ETrue)
       
  1774 				{
       
  1775 				// traverse the list starting at the back
       
  1776 				if ((pLink != anchor) && (_LOFF(pLink, DShBuf, iObjLink)->iRelAddress > buf->iRelAddress))
       
  1777 					{
       
  1778 					pLink = pLink->iPrev;
       
  1779 					}
       
  1780 				else
       
  1781 					{
       
  1782 					buf->iObjLink.InsertAfter(pLink);
       
  1783 					break;
       
  1784 					}
       
  1785 				}
       
  1786 			}
       
  1787 
       
  1788 		// now merge with the free list
       
  1789 		while(!temp.IsEmpty())
       
  1790 			{
       
  1791 			if (iFreeList.IsEmpty())
       
  1792 				{
       
  1793 				iFreeList.MoveFrom(&temp);
       
  1794 				break;
       
  1795 				}
       
  1796 
       
  1797 			// working backwards with the highest index
       
  1798 			DShBuf* buf = _LOFF(temp.Last(), DShBuf, iObjLink);
       
  1799 			SDblQueLink* anchor = reinterpret_cast<SDblQueLink*>(&iFreeList);
       
  1800 			SDblQueLink* pLink = iFreeList.Last();
       
  1801 
       
  1802 			while (!NKern::FMFlash(&iLock))
       
  1803 				{
       
  1804 				if ((pLink != anchor) && (_LOFF(pLink, DShBuf, iObjLink)->iRelAddress > buf->iRelAddress))
       
  1805 					{
       
  1806 					pLink = pLink->iPrev;
       
  1807 					}
       
  1808 				else
       
  1809 					{
       
  1810 					buf->iObjLink.Deque();
       
  1811 					buf->iObjLink.InsertAfter(pLink);
       
  1812 					// next buffer
       
  1813 					if (temp.IsEmpty())
       
  1814 						break;
       
  1815 					buf = _LOFF(temp.Last(), DShBuf, iObjLink);
       
  1816 					}
       
  1817 				}
       
  1818 			}
       
  1819 		NKern::FMFlash(&iLock);
       
  1820 		}
       
  1821 	UnlockPool();
       
  1822 
       
  1823 	__KTRACE_OPT(KMMU, Kern::Printf("<DMemModelNonAlignedShPool::UpdateFreeList"));
       
  1824 	return KErrNone;
       
  1825 	}
       
  1826 
       
  1827 void DMemModelNonAlignedShPool::Free(DShBuf* aBuf)
       
  1828 	{
       
  1829 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::Free (aBuf = 0x%08x, aBuf->Base() 0x%08x)", aBuf, aBuf->iRelAddress));
       
  1830 
       
  1831 	LockPool();
       
  1832 #ifdef _DEBUG
       
  1833 	// Remove from allocated list
       
  1834 	aBuf->iObjLink.Deque();
       
  1835 #endif
       
  1836 
       
  1837 	// we want to put the initial buffers at the head of the free list
       
  1838 	// and the grown buffers at the tail as this makes shrinking more efficient
       
  1839 	if (aBuf >= iInitialBuffersArray && aBuf < (iInitialBuffersArray + iInitialBuffers))
       
  1840 		{
       
  1841 		iFreeList.AddHead(&aBuf->iObjLink);
       
  1842 		}
       
  1843 	else
       
  1844 		{
       
  1845 		iAltFreeList.Add(&aBuf->iObjLink);
       
  1846 		}
       
  1847 
       
  1848 	++iFreeBuffers;
       
  1849 #ifdef _DEBUG
       
  1850 	--iAllocatedBuffers;
       
  1851 #endif
       
  1852 	iPoolFlags &= ~EShPoolSuppressShrink;		// Allow shrinking again, if it was blocked
       
  1853 	UnlockPool();
       
  1854 
       
  1855 	// queue ManagementDfc which completes notifications as appropriate
       
  1856 	if (HaveWorkToDo())
       
  1857 		KickManagementDfc();
       
  1858 
       
  1859 	DShPool::Close(NULL); // decrement pool reference count
       
  1860 	}
       
  1861 
       
  1862 // Kernel side API
       
  1863 TInt DMemModelNonAlignedShPool::Alloc(DShBuf*& aShBuf)
       
  1864 	{
       
  1865 	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::Alloc (DShBuf)"));
       
  1866 
       
  1867 	aShBuf = NULL;
       
  1868 
       
  1869 	LockPool();
       
  1870 
       
  1871 	if (!iFreeList.IsEmpty())
       
  1872 		{
       
  1873 		aShBuf = _LOFF(iFreeList.GetFirst(), DShBuf, iObjLink);
       
  1874 #ifdef _DEBUG
       
  1875 		iAllocated.Add(&aShBuf->iObjLink);
       
  1876 		iAllocatedBuffers++;
       
  1877 #endif
       
  1878 		}
       
  1879 	else
       
  1880 		{
       
  1881 		// try alternative free list
       
  1882 		if (!iAltFreeList.IsEmpty())
       
  1883 			{
       
  1884 			aShBuf = _LOFF(iAltFreeList.GetFirst(), DShBuf, iObjLink);
       
  1885 #ifdef _DEBUG
       
  1886 			iAllocated.Add(&aShBuf->iObjLink);
       
  1887 			iAllocatedBuffers++;
       
  1888 #endif
       
  1889 			}
       
  1890 		else
       
  1891 			{
       
  1892 			UnlockPool();
       
  1893 			KickManagementDfc(); // Try to grow
       
  1894 			return KErrNoMemory;
       
  1895 			}
       
  1896 		}
       
  1897 
       
  1898 	--iFreeBuffers;
       
  1899 	Open(); // increment pool reference count
       
  1900 
       
  1901 	UnlockPool();
       
  1902 
       
  1903 	if (HaveWorkToDo())
       
  1904 		KickManagementDfc();
       
  1905 
       
  1906 	__KTRACE_OPT(KMMU, Kern::Printf("<DMemModelNonAlignedShPool::Alloc return buf = 0x%08x", aShBuf));
       
  1907 	return KErrNone;
       
  1908 	}