kernel/eka/memmodel/emul/win32/mshbuf.cpp
changeset 0 a41df078684a
equal deleted inserted replaced
-1:000000000000 0:a41df078684a
       
     1 // Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies).
       
     2 // All rights reserved.
       
     3 // This component and the accompanying materials are made available
       
     4 // under the terms of the License "Eclipse Public License v1.0"
       
     5 // which accompanies this distribution, and is available
       
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
       
     7 //
       
     8 // Initial Contributors:
       
     9 // Nokia Corporation - initial contribution.
       
    10 //
       
    11 // Contributors:
       
    12 //
       
    13 // Description:
       
    14 // e32/memmodel/emul/win32/mshbuf.cpp
       
    15 // Shareable Data Buffers
       
    16 
       
    17 #include "memmodel.h"
       
    18 #include <kernel/smap.h>
       
    19 
       
    20 _LIT(KLitDWin32ShPool,"DWin32ShPool");
       
    21 _LIT(KLitDWin32AlignedShPool,"DWin32AlignedShPool");
       
    22 _LIT(KLitDWin32NonAlignedShPool,"DWin32NonAlignedShPool");
       
    23 
       
    24 
       
    25 DWin32ShBuf::DWin32ShBuf(DShPool* aPool, TLinAddr aRelAddr) : DShBuf(aPool, aRelAddr)
       
    26 	{
       
    27 	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShBuf::DWin32ShBuf()"));
       
    28 	}
       
    29 
       
    30 DWin32ShBuf::~DWin32ShBuf()
       
    31 	{
       
    32 	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShBuf::~DWin32ShBuf()"));
       
    33 	}
       
    34 
       
    35 TUint8* DWin32ShBuf::Base(DProcess* aProcess)
       
    36 	{
       
    37 	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShBuf::Base(0x%x)", aProcess));
       
    38 
       
    39 	TUint8* base = reinterpret_cast<DWin32ShPool*>(iPool)->Base(aProcess) + (TUint)iRelAddress;
       
    40 
       
    41 	return base;
       
    42 	}
       
    43 
       
    44 TUint8* DWin32ShBuf::Base()
       
    45 	{
       
    46 	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShBuf::Base()"));
       
    47 
       
    48 	TUint8* base = reinterpret_cast<DWin32ShPool*>(iPool)->Base() + (TUint)iRelAddress;
       
    49 
       
    50 	return base;
       
    51 	}
       
    52 
       
    53 TInt DWin32ShBuf::Map(TUint /* aMapAttr */, DProcess* /* aProcess */, TLinAddr& aBase)
       
    54 	{
       
    55 	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShBuf::Map()"));
       
    56 
       
    57 	TInt r = KErrNotSupported;
       
    58 
       
    59 	if (iPool->iPoolFlags & EShPoolPageAlignedBuffer)
       
    60 		{
       
    61 		if(iMapped)
       
    62 			{
       
    63 			r = KErrAlreadyExists;
       
    64 			}
       
    65 		else
       
    66 			{
       
    67 			aBase = reinterpret_cast<TUint>(reinterpret_cast<DWin32ShPool*>(iPool)->Base() + (TUint)iRelAddress);
       
    68 			iMapped = ETrue;
       
    69 			r = KErrNone;
       
    70 			}
       
    71 		}
       
    72 
       
    73 	return r;
       
    74 	}
       
    75 
       
    76 TInt DWin32ShBuf::UnMap(DProcess* /* aProcess */)
       
    77 	{
       
    78 	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShBuf::UnMap()"));
       
    79 
       
    80 	TInt r = KErrNotSupported;
       
    81 
       
    82 	if (iPool->iPoolFlags & EShPoolPageAlignedBuffer)
       
    83 		{
       
    84 		if(iMapped)
       
    85 			{
       
    86 			iMapped = EFalse;
       
    87 			r = KErrNone;
       
    88 			}
       
    89 		else
       
    90 			{
       
    91 			r = KErrNotFound;
       
    92 			}
       
    93 		}
       
    94 
       
    95 	return r;
       
    96 	}
       
    97 
       
    98 TInt DWin32ShBuf::AddToProcess(DProcess* aProcess, TUint /* aAttr */)
       
    99 	{
       
   100 	__KTRACE_OPT(KMMU, Kern::Printf("Adding DWin32ShBuf %O to process %O", this, aProcess));
       
   101 	TUint flags;
       
   102 	TInt r = KErrNone;
       
   103 
       
   104 	if (aProcess != K::TheKernelProcess)
       
   105 	    r = iPool->OpenClient(aProcess, flags);
       
   106 
       
   107 	return r;
       
   108 	}
       
   109 
       
   110 TInt DWin32ShBuf::Close(TAny* aPtr)
       
   111 	{
       
   112 	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShBuf::Close(0x%08x)", aPtr));
       
   113 
       
   114 	if (aPtr)
       
   115 		{
       
   116 		DProcess* pP = reinterpret_cast<DProcess*>(aPtr);
       
   117 
       
   118 		if (pP != K::TheKernelProcess)
       
   119 		    iPool->CloseClient(pP);
       
   120 		}
       
   121 
       
   122 	return DShBuf::Close(aPtr);
       
   123 	}
       
   124 
       
   125 DWin32ShPool::DWin32ShPool()
       
   126   : DShPool()
       
   127 	{
       
   128 	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShPool::DWin32ShPool"));
       
   129 	}
       
   130 
       
   131 
       
   132 DWin32ShPool::~DWin32ShPool()
       
   133 	{
       
   134 	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShPool::~DWin32ShPool"));
       
   135 
       
   136 	if (iWin32MemoryBase)
       
   137 		{
       
   138 		TUint64 maxSize = static_cast<TUint64>(iMaxBuffers) * static_cast<TUint64>(iBufGap);
       
   139 
       
   140 		// We know that maxSize is less than KMaxTInt as we tested for this in DoCreate().
       
   141 		VirtualFree(LPVOID(iWin32MemoryBase), (SIZE_T)maxSize, MEM_DECOMMIT);
       
   142 		VirtualFree(LPVOID(iWin32MemoryBase), 0, MEM_RELEASE);
       
   143 		MM::Wait();
       
   144 		MM::FreeMemory += iWin32MemorySize;
       
   145 		MM::Signal();
       
   146 		}
       
   147 
       
   148 	delete iBufMap;
       
   149 	}
       
   150 
       
   151 void DWin32ShPool::DestroyClientResources(DProcess* aProcess)
       
   152 	{
       
   153 	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShPool::DestroyClientResources"));
       
   154 
       
   155 	TInt r = DestroyHandles(aProcess);
       
   156 	__NK_ASSERT_DEBUG((r == KErrNone) || (r == KErrDied));
       
   157 	(void)r;		// Silence warnings
       
   158 	}
       
   159 
       
   160 TInt DWin32ShPool::DeleteInitialBuffers()
       
   161 	{
       
   162 	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShPool::DeleteInitialBuffers"));
       
   163 
       
   164 	if (iInitialBuffersArray != NULL)
       
   165 		{
       
   166 		for (TUint i = 0; i < iInitialBuffers; i++)
       
   167 			{
       
   168 			iInitialBuffersArray[i].iObjLink.Deque(); // remove from free list
       
   169 			iInitialBuffersArray[i].Dec();
       
   170 			iInitialBuffersArray[i].~DWin32ShBuf();
       
   171 			}
       
   172 
       
   173 		Kern::Free(iInitialBuffersArray);
       
   174 		iInitialBuffersArray = NULL;
       
   175 		}
       
   176 
       
   177 	return KErrNone;
       
   178 	}
       
   179 
       
   180 TInt DWin32ShPool::DestroyHandles(DProcess* aProcess)
       
   181 	{
       
   182 	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShPool::DestroyHandles(0x%08x)", aProcess));
       
   183 
       
   184 	TInt r = KErrNone;
       
   185 	Kern::MutexWait(*iProcessLock);
       
   186 	DShPoolClient* client = reinterpret_cast<DShPoolClient*>(iClientMap->Remove(reinterpret_cast<TUint>(aProcess)));
       
   187 
       
   188 	__NK_ASSERT_DEBUG(client);
       
   189 	__NK_ASSERT_DEBUG(client->iAccessCount == 0);
       
   190 
       
   191 	delete client;
       
   192 
       
   193 	if (aProcess != K::TheKernelProcess)
       
   194 		{
       
   195 		// Remove reserved handles
       
   196 		r = aProcess->iHandles.Reserve(-TInt(iTotalBuffers));
       
   197 		}
       
   198 
       
   199 	Kern::MutexSignal(*iProcessLock);
       
   200 
       
   201 	return r;
       
   202 	}
       
   203 
       
   204 
       
   205 TInt DWin32ShPool::Close(TAny* aPtr)
       
   206 	{
       
   207 	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShPool::Close(0x%08x)", aPtr));
       
   208 
       
   209 	if (aPtr) // not NULL must be user side
       
   210 		{
       
   211 		DProcess* pP = reinterpret_cast<DProcess*>(aPtr);
       
   212 
       
   213 		CloseClient(pP);
       
   214 		}
       
   215 
       
   216 	return DShPool::Close(aPtr);
       
   217 	}
       
   218 
       
   219 
       
   220 TInt DWin32ShPool::CreateInitialBuffers()
       
   221 	{
       
   222 	__KTRACE_OPT(KMMU,Kern::Printf(">DWin32ShPool::CreateInitialBuffers"));
       
   223 
       
   224 	iInitialBuffersArray = reinterpret_cast<DWin32ShBuf*>(Kern::Alloc(iInitialBuffers * sizeof(DWin32ShBuf)));
       
   225 
       
   226 	if (iInitialBuffersArray == NULL)
       
   227 		return KErrNoMemory;
       
   228 
       
   229 	TLinAddr offset = 0;
       
   230 	for (TUint i = 0; i < iInitialBuffers; i++)
       
   231 		{
       
   232 		DWin32ShBuf *buf = new (&iInitialBuffersArray[i]) DWin32ShBuf(this, offset);
       
   233 		TInt r = buf->Construct();
       
   234 
       
   235 		if (r == KErrNone)
       
   236 			{
       
   237 			iFreeList.Add(&buf->iObjLink);
       
   238 			}
       
   239 		else
       
   240 			{
       
   241 			iInitialBuffers = i;
       
   242 			return KErrNoMemory;
       
   243 			}
       
   244 
       
   245 		offset += iBufGap;
       
   246 		}
       
   247 
       
   248 	iFreeBuffers = iInitialBuffers;
       
   249 	iTotalBuffers = iInitialBuffers;
       
   250 
       
   251 	iBufMap->Alloc(0, iInitialBuffers);
       
   252 
       
   253 	return KErrNone;
       
   254 	}
       
   255 
       
   256 
       
   257 TUint8* DWin32ShPool::Base()
       
   258 	{
       
   259 	return iWin32MemoryBase;
       
   260 	}
       
   261 
       
   262 
       
   263 TUint8* DWin32ShPool::Base(DProcess* /*aProcess*/)
       
   264 	{
       
   265 	return iWin32MemoryBase;
       
   266 	}
       
   267 
       
   268 
       
   269 TInt DWin32ShPool::AddToProcess(DProcess* aProcess, TUint aAttr)
       
   270 	{
       
   271 	__KTRACE_OPT(KEXEC, Kern::Printf("Adding DWin32ShPool %O to process %O", this, aProcess));
       
   272 
       
   273 	TInt r = KErrNone;
       
   274 
       
   275 	Kern::MutexWait(*iProcessLock);
       
   276 	LockPool();
       
   277 	DShPoolClient* client = reinterpret_cast<DShPoolClient*>(iClientMap->Find(reinterpret_cast<TUint>(aProcess)));
       
   278 	UnlockPool();
       
   279 
       
   280 	if (!client)
       
   281 		{
       
   282 		client = new DShPoolClient;
       
   283 
       
   284 		if (client)
       
   285 			{
       
   286 			client->iFlags = aAttr;
       
   287 			r = iClientMap->Add(reinterpret_cast<TUint>(aProcess), client);
       
   288 
       
   289 			if (r == KErrNone)
       
   290 				{
       
   291 				if (aProcess != K::TheKernelProcess)
       
   292 					{
       
   293 					r = aProcess->iHandles.Reserve(iTotalBuffers);
       
   294 
       
   295 					if (r != KErrNone)
       
   296 						{
       
   297 						iClientMap->Remove(reinterpret_cast<TUint>(aProcess));
       
   298 						}
       
   299 					}
       
   300 				}
       
   301 
       
   302 			if (r != KErrNone)
       
   303 				{
       
   304 				delete client;
       
   305 				}
       
   306 			}
       
   307 		else
       
   308 			{
       
   309 			r = KErrNoMemory;
       
   310 			}
       
   311 		}
       
   312 	else
       
   313 		{
       
   314 		LockPool();
       
   315 		client->iAccessCount++;
       
   316 		UnlockPool();
       
   317 		}
       
   318 
       
   319 	Kern::MutexSignal(*iProcessLock);
       
   320 
       
   321 	return r;
       
   322 	}
       
   323 
       
   324 
       
   325 TInt DWin32ShPool::DoCreate(TShPoolCreateInfo& aInfo)
       
   326 	{
       
   327 	TUint64 maxSize = static_cast<TUint64>(aInfo.iInfo.iMaxBufs) * static_cast<TUint64>(iBufGap);
       
   328 
       
   329 	if (maxSize > static_cast<TUint64>(KMaxTInt))
       
   330 		{
       
   331 		return KErrArgument;
       
   332 		}
       
   333 
       
   334 	__KTRACE_OPT(KMMU,Kern::Printf("DWin32ShPool::DoCreate (maxSize = 0x%08x, iBufGap = 0x%08x)",
       
   335 		static_cast<TInt>(maxSize), iBufGap));
       
   336 
       
   337 	iWin32MemoryBase = (TUint8*) VirtualAlloc(NULL, (SIZE_T)maxSize, MEM_RESERVE, PAGE_READWRITE);
       
   338 	if (iWin32MemoryBase == NULL)
       
   339 		{
       
   340 		return KErrNoMemory;
       
   341 		}
       
   342 
       
   343 	__KTRACE_OPT(KMMU,Kern::Printf("DWin32ShPool::DoCreate (iWin32MemoryBase = 0x%08x)", iWin32MemoryBase));
       
   344 
       
   345 	iBufMap = TBitMapAllocator::New(aInfo.iInfo.iMaxBufs, (TBool)ETrue);
       
   346 	if (iBufMap == NULL)
       
   347 		{
       
   348 		return KErrNoMemory;
       
   349 		}
       
   350 
       
   351 	return KErrNone;
       
   352 	}
       
   353 
       
   354 
       
   355 TBool DWin32ShPool::IsOpen(DProcess* /*aProcess*/)
       
   356 	{
       
   357 	// could do we some kind of check here?
       
   358 	return (TBool)ETrue;
       
   359 	}
       
   360 
       
   361 
       
   362 TInt DWin32ShPool::UpdateFreeList()
       
   363 	{
       
   364 	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShPool::UpdateFreeList"));
       
   365 
       
   366 	SDblQue temp;
       
   367 	SDblQueLink* anchor = reinterpret_cast<SDblQueLink*>(&iFreeList);
       
   368 
       
   369 	LockPool();
       
   370 	while(!iAltFreeList.IsEmpty())
       
   371 		{
       
   372 		// sort a temporary list of 'n' object with the lowest index first
       
   373 		for (TInt n = 0; n < 8 && !iAltFreeList.IsEmpty(); ++n)
       
   374 			{
       
   375 			// bit of an assumption, lets assume that the lower indexes will be allocated and freed first
       
   376 			// and therefore will be nearer the front of the list
       
   377 			DShBuf* buf = _LOFF(iAltFreeList.GetFirst(), DShBuf, iObjLink);
       
   378 
       
   379 			SDblQueLink* anchor = reinterpret_cast<SDblQueLink*>(&temp);
       
   380 			SDblQueLink* pLink = temp.Last();
       
   381 
       
   382 			for (;;)
       
   383 				{
       
   384 				// traverse the list starting at the back
       
   385 				if ((pLink != anchor) && (_LOFF(pLink, DShBuf, iObjLink)->iRelAddress > buf->iRelAddress))
       
   386 					{
       
   387 					pLink = pLink->iPrev;
       
   388 					}
       
   389 				else
       
   390 					{
       
   391 					buf->iObjLink.InsertAfter(pLink);
       
   392 					break;
       
   393 					}
       
   394 				}
       
   395 			}
       
   396 
       
   397 		// now merge with the free list
       
   398 		while(!temp.IsEmpty())
       
   399 			{
       
   400 			if (iFreeList.IsEmpty())
       
   401 				{
       
   402 				iFreeList.MoveFrom(&temp);
       
   403 				break;
       
   404 				}
       
   405 
       
   406 			// working backwards with the highest index
       
   407 			DShBuf* buf = _LOFF(temp.Last(), DShBuf, iObjLink);
       
   408 			SDblQueLink* pLink = iFreeList.Last();
       
   409 
       
   410 			while (!NKern::FMFlash(&iLock))
       
   411 				{
       
   412 				if ((pLink != anchor) && (_LOFF(pLink, DShBuf, iObjLink)->iRelAddress > buf->iRelAddress))
       
   413 					{
       
   414 					pLink = pLink->iPrev;
       
   415 					}
       
   416 				else
       
   417 					{
       
   418 					buf->iObjLink.Deque();
       
   419 					buf->iObjLink.InsertAfter(pLink);
       
   420 					// next buffer
       
   421 					if (temp.IsEmpty())
       
   422 						break;
       
   423 					buf = _LOFF(temp.Last(), DShBuf, iObjLink);
       
   424 					}
       
   425 				}
       
   426 			}
       
   427 		NKern::FMFlash(&iLock);
       
   428 		}
       
   429 	UnlockPool();
       
   430 
       
   431 	__KTRACE_OPT(KMMU, Kern::Printf("<DWin32ShPool::UpdateFreeList"));
       
   432 	return KErrNone;
       
   433 	}
       
   434 
       
   435 
       
   436 void DWin32ShPool::Free(DShBuf* aBuf)
       
   437 	{
       
   438 	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShPool::Free (aBuf = 0x%08x, aBuf->Base() 0x%08x)", aBuf, aBuf->Base()));
       
   439 
       
   440 	TLinAddr newAddr = (TLinAddr)aBuf->Base();
       
   441 #ifdef _DEBUG
       
   442 	memset((TAny*)newAddr,0xde,aBuf->Size());
       
   443 #else
       
   444 	memclr((TAny*)newAddr,aBuf->Size());
       
   445 #endif
       
   446 
       
   447 	LockPool();
       
   448 #ifdef _DEBUG
       
   449 	// Remove from allocated list
       
   450 	aBuf->iObjLink.Deque();
       
   451 #endif
       
   452 	// we want to put the initial buffers at the head of the free list
       
   453 	// and the grown buffers at the tail as this makes shrinking more efficient
       
   454 	if (aBuf >= iInitialBuffersArray && aBuf < (iInitialBuffersArray + iInitialBuffers))
       
   455 		{
       
   456 		iFreeList.AddHead(&aBuf->iObjLink);
       
   457 		}
       
   458 	else
       
   459 		{
       
   460 		iAltFreeList.Add(&aBuf->iObjLink);
       
   461 		}
       
   462 
       
   463 	++iFreeBuffers;
       
   464 #ifdef _DEBUG
       
   465 	--iAllocatedBuffers;
       
   466 #endif
       
   467 	iPoolFlags &= ~EShPoolSuppressShrink;		// Allow shrinking again, if it was blocked
       
   468 	UnlockPool();
       
   469 
       
   470 	// queue ManagementDfc which completes notifications as appropriate
       
   471 	if (HaveWorkToDo())
       
   472 		KickManagementDfc();
       
   473 
       
   474 	Close(NULL); // decrement pool reference count
       
   475 	}
       
   476 
       
   477 // Kernel side API
       
   478 TInt DWin32ShPool::Alloc(DShBuf*& aShBuf)
       
   479 	{
       
   480 	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShPool::Alloc (DShBuf)"));
       
   481 
       
   482 	TInt r = KErrNoMemory;
       
   483 	aShBuf = NULL;
       
   484 
       
   485 	LockPool();
       
   486 
       
   487 	if (!iFreeList.IsEmpty())
       
   488 		{
       
   489 		aShBuf = _LOFF(iFreeList.GetFirst(), DShBuf, iObjLink);
       
   490 #ifdef _DEBUG
       
   491 		iAllocated.Add(&aShBuf->iObjLink);
       
   492 		iAllocatedBuffers++;
       
   493 #endif
       
   494 		--iFreeBuffers;
       
   495 		Open(); // increment pool reference count
       
   496 		r = KErrNone;
       
   497 		}
       
   498 	else
       
   499 		{
       
   500 		// try alternative free list
       
   501 		if (!iAltFreeList.IsEmpty())
       
   502 			{
       
   503 			aShBuf = _LOFF(iAltFreeList.GetFirst(), DShBuf, iObjLink);
       
   504 #ifdef _DEBUG
       
   505 			iAllocated.Add(&aShBuf->iObjLink);
       
   506 			iAllocatedBuffers++;
       
   507 #endif
       
   508 			--iFreeBuffers;
       
   509 			Open(); // increment pool reference count
       
   510 			r = KErrNone;
       
   511 			}
       
   512 		}
       
   513 
       
   514 	UnlockPool();
       
   515 
       
   516 	if (HaveWorkToDo())
       
   517 		KickManagementDfc();
       
   518 
       
   519 	__KTRACE_OPT(KMMU, Kern::Printf("<DWin32ShPool::Alloc return buf = 0x%08x", aShBuf));
       
   520 	return r;
       
   521 	}
       
   522 
       
   523 
       
   524 DWin32AlignedShPool::DWin32AlignedShPool()
       
   525   : DWin32ShPool()
       
   526 	{
       
   527 	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32AlignedShPool::DWin32AlignedShPool"));
       
   528 	}
       
   529 
       
   530 
       
   531 DWin32AlignedShPool::~DWin32AlignedShPool()
       
   532 	{
       
   533 	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32AlignedShPool::~DWin32AlignedShPool"));
       
   534 	}
       
   535 
       
   536 
       
   537 TInt DWin32AlignedShPool::DoCreate(TShPoolCreateInfo& aInfo)
       
   538 	{
       
   539 	TInt r;
       
   540 	// Create Chunk
       
   541 	r = DWin32ShPool::DoCreate(aInfo);
       
   542 	if (r != KErrNone)
       
   543 		{
       
   544 		return r;
       
   545 		}
       
   546 
       
   547 	if (iPoolFlags & EShPoolGuardPages)
       
   548 		{
       
   549 		TUint numOfBytes = iBufGap - MM::RamPageSize;
       
   550 		iCommittedPages = MM::RoundToPageSize(iInitialBuffers * numOfBytes) >> MM::RamPageShift;
       
   551 
       
   552 		for (TUint i = 0; i < iInitialBuffers; ++i)
       
   553 			{
       
   554 			TUint offset = iBufGap * i;
       
   555 
       
   556 			MM::Wait();
       
   557 			if (MM::Commit(reinterpret_cast<TLinAddr>(iWin32MemoryBase+offset), numOfBytes, 0xFF, EFalse) != KErrNone)
       
   558 				{
       
   559 				MM::Signal();
       
   560 				return KErrNoMemory;
       
   561 				}
       
   562 			iWin32MemorySize += numOfBytes;
       
   563 
       
   564 			MM::Signal();
       
   565 			}
       
   566 
       
   567 		iMaxPages = MM::RoundToPageSize(aInfo.iInfo.iMaxBufs * numOfBytes) >> MM::RamPageShift;
       
   568 		}
       
   569 	else
       
   570 		{
       
   571 		// Make sure we give the caller the number of buffers they were expecting
       
   572 		iCommittedPages = MM::RoundToPageSize(iInitialBuffers * iBufGap) >> MM::RamPageShift;
       
   573 		MM::Wait();
       
   574 		if (MM::Commit(reinterpret_cast<TLinAddr>(iWin32MemoryBase), iCommittedPages << MM::RamPageShift, 0xFF, EFalse) != KErrNone)
       
   575 			{
       
   576 			MM::Signal();
       
   577 			return KErrNoMemory;
       
   578 			}
       
   579 		iWin32MemorySize = iCommittedPages << MM::RamPageShift;
       
   580 
       
   581 		MM::Signal();
       
   582 
       
   583 		iMaxPages = MM::RoundToPageSize(aInfo.iInfo.iMaxBufs * iBufGap) >> MM::RamPageShift;
       
   584 		}
       
   585 
       
   586 	return r;
       
   587 	}
       
   588 
       
   589 
       
   590 TInt DWin32AlignedShPool::SetBufferWindow(DProcess* /*aProcess*/, TInt /*aWindowSize*/ )
       
   591 	{
       
   592 	return KErrNone;
       
   593 	}
       
   594 
       
   595 
       
   596 TInt DWin32AlignedShPool::GrowPool()
       
   597 	{
       
   598 	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32AlignedShPool::GrowPool()"));
       
   599 
       
   600 	Kern::MutexWait(*iProcessLock);
       
   601 
       
   602 	// How many bytes to commit for each new buffer (must be whole number of pages)
       
   603 	TUint bytes = (iPoolFlags & EShPoolGuardPages) ? iBufGap - MM::RamPageSize : iBufGap;
       
   604 
       
   605 	__ASSERT_DEBUG(!(bytes % MM::RamPageSize), Kern::PanicCurrentThread(KLitDWin32AlignedShPool, __LINE__));
       
   606 
       
   607 	TInt pages = bytes >> MM::RamPageShift;
       
   608 
       
   609 	TUint32 headroom = iMaxBuffers - iTotalBuffers;
       
   610 
       
   611 	// How many buffers to grow by?
       
   612 	TUint32 grow = mult_fx248(iTotalBuffers, iGrowByRatio);
       
   613 	if (grow == 0)			// Handle round-to-zero
       
   614 		grow = 1;
       
   615 	if (grow > headroom)
       
   616 		grow = headroom;
       
   617 
       
   618 	TInt r = KErrNone;
       
   619 	SDblQue temp;
       
   620 
       
   621 	TUint i;
       
   622 	for (i = 0; i < grow; ++i)
       
   623 		{
       
   624 		TInt offset = iBufMap->Alloc();
       
   625 
       
   626 		if (offset < 0)
       
   627 			{
       
   628 			r = KErrNoMemory;
       
   629 			break;
       
   630 			}
       
   631 
       
   632 		offset *= iBufGap;
       
   633 
       
   634 		MM::Wait();
       
   635 		if (MM::Commit(reinterpret_cast<TLinAddr>(iWin32MemoryBase+offset), bytes, 0xFF, EFalse) != KErrNone)
       
   636 			{
       
   637 			r = KErrNoMemory;
       
   638 			}
       
   639 		iWin32MemorySize += bytes;
       
   640 		MM::Signal();
       
   641 
       
   642 		if (r != KErrNone)
       
   643 			{
       
   644 			iBufMap->Free(offset / iBufGap);
       
   645 			break;
       
   646 			}
       
   647 
       
   648 		DWin32ShBuf *buf = new DWin32ShBuf(this, offset);
       
   649 
       
   650 		if (buf == NULL)
       
   651 			{
       
   652 			MM::Wait();
       
   653 			MM::Decommit(reinterpret_cast<TLinAddr>(iWin32MemoryBase+offset), bytes);
       
   654 			iWin32MemorySize -= bytes;
       
   655 			MM::Signal();
       
   656 			iBufMap->Free(offset / iBufGap);
       
   657 			r = KErrNoMemory;
       
   658 			break;
       
   659 			}
       
   660 
       
   661 		TInt r = buf->Construct();
       
   662 
       
   663 		if (r != KErrNone)
       
   664 			{
       
   665 			MM::Wait();
       
   666 			MM::Decommit(reinterpret_cast<TLinAddr>(iWin32MemoryBase+offset), bytes);
       
   667 			iWin32MemorySize -= bytes;
       
   668 			MM::Signal();
       
   669 			iBufMap->Free(offset / iBufGap);
       
   670 			buf->DObject::Close(NULL);
       
   671 			break;
       
   672 			}
       
   673 
       
   674 		iCommittedPages += pages;
       
   675 
       
   676 		temp.Add(&buf->iObjLink);
       
   677 		}
       
   678 
       
   679 	r = UpdateReservedHandles(i);
       
   680 
       
   681 	if (r == KErrNone)
       
   682 		{
       
   683 		LockPool();
       
   684 		iFreeList.MoveFrom(&temp);
       
   685 		iFreeBuffers += i;
       
   686 		iTotalBuffers += i;
       
   687 		UnlockPool();
       
   688 		}
       
   689 	else
       
   690 		{
       
   691 		// else delete buffers
       
   692 		SDblQueLink *pLink;
       
   693 		while ((pLink = temp.GetFirst()) != NULL)
       
   694 			{
       
   695 			DShBuf* buf = _LOFF(pLink, DShBuf, iObjLink);
       
   696 			TLinAddr offset = buf->iRelAddress;
       
   697 			iBufMap->Free(offset / iBufGap);
       
   698 			MM::Wait();
       
   699 			MM::Decommit(reinterpret_cast<TLinAddr>(iWin32MemoryBase+offset), bytes);
       
   700 			iWin32MemorySize -= bytes;
       
   701 			MM::Signal();
       
   702 			iCommittedPages -= pages;
       
   703 			buf->DObject::Close(NULL);
       
   704 			}
       
   705 		}
       
   706 
       
   707 	CalculateGrowShrinkTriggers();
       
   708 
       
   709 	Kern::MutexSignal(*iProcessLock);
       
   710 
       
   711 	__KTRACE_OPT(KMMU, Kern::Printf("<DWin32AlignedShPool::GrowPool()"));
       
   712 	return r;
       
   713 	} // DWin32AlignedShPool::GrowPool
       
   714 
       
   715 
       
   716 TInt DWin32AlignedShPool::ShrinkPool()
       
   717 	{
       
   718 	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32AlignedShPool::ShrinkPool()"));
       
   719 
       
   720 	Kern::MutexWait(*iProcessLock);
       
   721 
       
   722 	// How many bytes to commit for each new buffer (must be whole number of pages)
       
   723 	TUint bytes = (iPoolFlags & EShPoolGuardPages) ? iBufGap - MM::RamPageSize : iBufGap;
       
   724 
       
   725 	__ASSERT_DEBUG(!(bytes % MM::RamPageSize), Kern::PanicCurrentThread(KLitDWin32AlignedShPool, __LINE__));
       
   726 
       
   727 	TInt pages = bytes >> MM::RamPageShift;
       
   728 
       
   729 	// Grab pool stats
       
   730 	TUint32 grownBy = iTotalBuffers - iInitialBuffers;
       
   731 
       
   732 	// How many buffers to shrink by?
       
   733 	TUint32 shrink = mult_fx248(iTotalBuffers, iShrinkByRatio);
       
   734 	if (shrink == 0)		// Handle round-to-zero
       
   735 		shrink = 1;
       
   736 	if (shrink > grownBy)
       
   737 		shrink = grownBy;
       
   738 	if (shrink > iFreeBuffers)
       
   739 		shrink = iFreeBuffers;
       
   740 
       
   741 	// work backwards
       
   742 	TUint i;
       
   743 	for (i = 0; i < shrink; ++i)
       
   744 		{
       
   745 		LockPool();
       
   746 		if (iFreeList.IsEmpty())
       
   747 			{
       
   748 			UnlockPool();
       
   749 			break;
       
   750 			}
       
   751 		// work from the back of the queue
       
   752 		SDblQueLink *pLink = iFreeList.Last();
       
   753 
       
   754 		DShBuf* pBuf = _LOFF(pLink, DShBuf, iObjLink);
       
   755 
       
   756 		if (pBuf >= iInitialBuffersArray && pBuf < (iInitialBuffersArray + iInitialBuffers))
       
   757 			{
       
   758 			UnlockPool();
       
   759 			break;
       
   760 			}
       
   761 
       
   762 		--iFreeBuffers;
       
   763 		--iTotalBuffers;
       
   764 		pLink->Deque();
       
   765 		iCommittedPages -= pages;
       
   766 		UnlockPool();
       
   767 
       
   768 		TLinAddr offset = pBuf->iRelAddress;
       
   769 
       
   770 		iBufMap->Free(offset / iBufGap);
       
   771 
       
   772 		MM::Wait();
       
   773 		MM::Decommit(reinterpret_cast<TLinAddr>(iWin32MemoryBase+offset), iBufSize);
       
   774 		iWin32MemorySize -= iBufSize;
       
   775 		MM::Signal();
       
   776 		pBuf->DObject::Close(NULL);
       
   777 		}
       
   778 
       
   779 	TInt r = UpdateReservedHandles(-(TInt)i);
       
   780 
       
   781 	// If we couldn't shrink the pool by this many buffers, wait until we Free() another
       
   782 	// buffer before trying to shrink again.
       
   783 	if (i < shrink)
       
   784 		iPoolFlags |= EShPoolSuppressShrink;
       
   785 
       
   786 	CalculateGrowShrinkTriggers();
       
   787 
       
   788 	Kern::MutexSignal(*iProcessLock);
       
   789 
       
   790 	__KTRACE_OPT(KMMU, Kern::Printf("<DWin32AlignedShPool::ShrinkPool()"));
       
   791 	return r;
       
   792 	} // DWin32AlignedShPool::ShrinkPool
       
   793 
       
   794 
       
   795 DWin32NonAlignedShPool::DWin32NonAlignedShPool()
       
   796   : DWin32ShPool()
       
   797 	{
       
   798 	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32NonAlignedShPool::DWin32NonAlignedShPool"));
       
   799 	}
       
   800 
       
   801 
       
   802 DWin32NonAlignedShPool::~DWin32NonAlignedShPool()
       
   803 	{
       
   804 	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32NonAlignedShPool::~DWin32NonAlignedShPool"));
       
   805 
       
   806 	delete iPagesMap;
       
   807 	}
       
   808 
       
   809 
       
   810 TInt DWin32NonAlignedShPool::DoCreate(TShPoolCreateInfo& aInfo)
       
   811 	{
       
   812 	// Create Chunk
       
   813 	TInt r;
       
   814 
       
   815 	r = DWin32ShPool::DoCreate(aInfo);
       
   816 
       
   817 	if (r != KErrNone)
       
   818 		{
       
   819 		return r;
       
   820 		}
       
   821 
       
   822 	if (iPoolFlags & EShPoolPhysicalMemoryPool)
       
   823 		{
       
   824 		return KErrNotSupported;
       
   825 		}
       
   826 	else
       
   827 		{
       
   828 		// Make sure we give the caller the number of buffers they were expecting
       
   829 		iCommittedPages = MM::RoundToPageSize(iInitialBuffers * iBufGap) >> MM::RamPageShift;
       
   830 
       
   831 		MM::Wait();
       
   832 		if (MM::Commit(reinterpret_cast<TLinAddr>(iWin32MemoryBase), iCommittedPages << MM::RamPageShift, 0xFF, EFalse) != KErrNone)
       
   833 			{
       
   834 			MM::Signal();
       
   835 			return KErrNoMemory;
       
   836 			}
       
   837 		iWin32MemorySize = iCommittedPages << MM::RamPageShift;
       
   838 
       
   839 		MM::Signal();
       
   840 		iMaxPages = MM::RoundToPageSize(aInfo.iInfo.iMaxBufs * iBufGap) >> MM::RamPageShift;
       
   841 		}
       
   842 
       
   843 	iPagesMap = TBitMapAllocator::New(iMaxPages, (TBool)ETrue);
       
   844 
       
   845 	if(!iPagesMap)
       
   846 		{
       
   847 		return KErrNoMemory;
       
   848 		}
       
   849 
       
   850 	iPagesMap->Alloc(0, iCommittedPages);
       
   851 	return r;
       
   852 	}
       
   853 
       
   854 
       
   855 void DWin32NonAlignedShPool::FreeBufferPages(TUint aOffset)
       
   856 	{
       
   857 	TLinAddr firstByte = aOffset;	// offset of first byte in buffer
       
   858 	TLinAddr lastByte = firstByte+iBufGap-1;	// offset of last byte in buffer
       
   859 	TUint firstPage = firstByte>>MM::RamPageShift;	// index of first page containing part of the buffer
       
   860 	TUint lastPage = lastByte>>MM::RamPageShift;		// index of last page containing part of the buffer
       
   861 
       
   862 	TUint firstBuffer = (firstByte&~(MM::RamPageSize - 1))/iBufGap; // index of first buffer which lies in firstPage
       
   863 	TUint lastBuffer = (lastByte|(MM::RamPageSize - 1))/iBufGap;    // index of last buffer which lies in lastPage
       
   864 	TUint thisBuffer = firstByte/iBufGap;				// index of the buffer to be freed
       
   865 
       
   866 	// Ensure lastBuffer is within bounds (there may be room in the last
       
   867 	// page for more buffers than we have allocated).
       
   868 	if (lastBuffer >= iMaxBuffers)
       
   869 		lastBuffer = iMaxBuffers-1;
       
   870 
       
   871 	if(firstBuffer!=thisBuffer && iBufMap->NotFree(firstBuffer,thisBuffer-firstBuffer))
       
   872 		{
       
   873 		// first page has other allocated buffers in it,
       
   874 		// so we can't free it and must move on to next one...
       
   875 		if (firstPage >= lastPage)
       
   876 			return;
       
   877 		++firstPage;
       
   878 		}
       
   879 
       
   880 	if(lastBuffer!=thisBuffer && iBufMap->NotFree(thisBuffer+1,lastBuffer-thisBuffer))
       
   881 		{
       
   882 		// last page has other allocated buffers in it,
       
   883 		// so we can't free it and must step back to previous one...
       
   884 		if (lastPage <= firstPage)
       
   885 			return;
       
   886 		--lastPage;
       
   887 		}
       
   888 
       
   889 	if(firstPage<=lastPage)
       
   890 		{
       
   891 		// we can free pages firstPage trough to lastPage...
       
   892 		TUint numPages = lastPage-firstPage+1;
       
   893 		iPagesMap->SelectiveFree(firstPage,numPages);
       
   894 		MM::Wait();
       
   895 		MM::Decommit(reinterpret_cast<TLinAddr>(iWin32MemoryBase+(firstPage << MM::RamPageShift)), (numPages << MM::RamPageShift));
       
   896 		iWin32MemorySize -= (numPages << MM::RamPageShift);
       
   897 		MM::Signal();
       
   898 		iCommittedPages -= numPages;
       
   899 		}
       
   900 	}
       
   901 
       
   902 
       
   903 TInt DWin32NonAlignedShPool::GrowPool()
       
   904 	{
       
   905 	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32NonAlignedShPool::GrowPool()"));
       
   906 
       
   907 	Kern::MutexWait(*iProcessLock);
       
   908 
       
   909 	TUint32 headroom = iMaxBuffers - iTotalBuffers;
       
   910 
       
   911 	// How many buffers to grow by?
       
   912 	TUint32 grow = mult_fx248(iTotalBuffers, iGrowByRatio);
       
   913 	if (grow == 0)			// Handle round-to-zero
       
   914 		grow = 1;
       
   915 	if (grow > headroom)
       
   916 		grow = headroom;
       
   917 
       
   918 	TInt r = KErrNone;
       
   919 	SDblQue temp;
       
   920 
       
   921 	TUint i;
       
   922 	for (i = 0; i < grow; ++i)
       
   923 		{
       
   924 		TInt offset = iBufMap->Alloc();
       
   925 
       
   926 		if (offset < 0)
       
   927 			{
       
   928 			r = KErrNoMemory;
       
   929 			break;
       
   930 			}
       
   931 
       
   932 		offset *= iBufGap;
       
   933 
       
   934 		TInt lastPage = (offset + iBufSize - 1) >> MM::RamPageShift;
       
   935 
       
   936 		// Allocate one page at a time.
       
   937 		for (TInt page = offset >> MM::RamPageShift; page <= lastPage; ++page)
       
   938 			{
       
   939 			// Is the page allocated?
       
   940 			if (iPagesMap->NotAllocated(page, 1))
       
   941 				{
       
   942 				MM::Wait();
       
   943 				if (MM::Commit(reinterpret_cast<TLinAddr>(iWin32MemoryBase+(page << MM::RamPageShift)), MM::RamPageSize, 0xFF, EFalse) != KErrNone)
       
   944 					{
       
   945 					MM::Signal();
       
   946 					r = KErrNoMemory;
       
   947 					break;
       
   948 					}
       
   949 				iWin32MemorySize += MM::RamPageSize;
       
   950 
       
   951 				MM::Signal();
       
   952 				++iCommittedPages;
       
   953 				iPagesMap->Alloc(page, 1);
       
   954 				}
       
   955 			}
       
   956 
       
   957 		if (r != KErrNone)
       
   958 			{
       
   959 			iBufMap->Free(offset / iBufGap);
       
   960 			FreeBufferPages(offset);
       
   961 			break;
       
   962 			}
       
   963 
       
   964 		DWin32ShBuf *buf = new DWin32ShBuf(this, offset);
       
   965 
       
   966 		if (buf == NULL)
       
   967 			{
       
   968 			iBufMap->Free(offset / iBufGap);
       
   969 			FreeBufferPages(offset);
       
   970 			r = KErrNoMemory;
       
   971 			break;
       
   972 			}
       
   973 
       
   974 		r = buf->Construct();
       
   975 
       
   976 		if (r != KErrNone)
       
   977 			{
       
   978 			iBufMap->Free(offset / iBufGap);
       
   979 			FreeBufferPages(offset);
       
   980 			buf->DObject::Close(NULL);
       
   981 			break;
       
   982 			}
       
   983 
       
   984 		temp.Add(&buf->iObjLink);
       
   985 		}
       
   986 
       
   987 	r = UpdateReservedHandles(i);
       
   988 
       
   989 	if (r == KErrNone)
       
   990 		{
       
   991 		LockPool();
       
   992 		iFreeList.MoveFrom(&temp);
       
   993 		iFreeBuffers += i;
       
   994 		iTotalBuffers += i;
       
   995 		UnlockPool();
       
   996 		}
       
   997 	else
       
   998 		{
       
   999 		// couldn't reserve handles so have no choice but to
       
  1000 		// delete the buffers
       
  1001 		__KTRACE_OPT(KMMU, Kern::Printf("GrowPool failed with %d, deleting buffers", r));
       
  1002 		SDblQueLink *pLink;
       
  1003 		while ((pLink = temp.GetFirst()) != NULL)
       
  1004 			{
       
  1005 			DShBuf* buf = _LOFF(pLink, DShBuf, iObjLink);
       
  1006 			TLinAddr offset = buf->iRelAddress;
       
  1007 			iBufMap->Free(offset / iBufGap);
       
  1008 			FreeBufferPages(offset);
       
  1009 			buf->DObject::Close(NULL);
       
  1010 			}
       
  1011 		__KTRACE_OPT(KMMU, Kern::Printf("Buffers deleted"));
       
  1012 		}
       
  1013 
       
  1014 	CalculateGrowShrinkTriggers();
       
  1015 
       
  1016 	Kern::MutexSignal(*iProcessLock);
       
  1017 
       
  1018 	__KTRACE_OPT(KMMU, Kern::Printf("<DWin32NonAlignedShPool::GrowPool()"));
       
  1019 	return r;
       
  1020 	} // DWin32NonAlignedShPool::GrowPool
       
  1021 
       
  1022 
       
  1023 TInt DWin32NonAlignedShPool::ShrinkPool()
       
  1024 	{
       
  1025 	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32NonAlignedShPool::ShrinkPool()"));
       
  1026 
       
  1027 	Kern::MutexWait(*iProcessLock);
       
  1028 
       
  1029 	// Grab pool stats
       
  1030 	TUint32 grownBy = iTotalBuffers - iInitialBuffers;
       
  1031 
       
  1032 	// How many buffers to shrink by?
       
  1033 	TUint32 shrink = mult_fx248(iTotalBuffers, iShrinkByRatio);
       
  1034 	if (shrink == 0)		// Handle round-to-zero
       
  1035 		shrink = 1;
       
  1036 	if (shrink > grownBy)
       
  1037 		shrink = grownBy;
       
  1038 	if (shrink > iFreeBuffers)
       
  1039 		shrink = iFreeBuffers;
       
  1040 
       
  1041 	TUint i;
       
  1042 	for (i = 0; i < shrink; ++i)
       
  1043 		{
       
  1044 		LockPool();
       
  1045 		if (iFreeList.IsEmpty())
       
  1046 			{
       
  1047 			UnlockPool();
       
  1048 			break;
       
  1049 			}
       
  1050 		// work from the back of the queue
       
  1051 		SDblQueLink *pLink = iFreeList.Last();
       
  1052 
       
  1053 		DShBuf* pBuf = _LOFF(pLink, DShBuf, iObjLink);
       
  1054 
       
  1055 		if (pBuf >= iInitialBuffersArray && pBuf < (iInitialBuffersArray + iInitialBuffers))
       
  1056 			{
       
  1057 			UnlockPool();
       
  1058 			break;
       
  1059 			}
       
  1060 
       
  1061 		--iFreeBuffers;
       
  1062 		--iTotalBuffers;
       
  1063 		pLink->Deque();
       
  1064 		UnlockPool();
       
  1065 
       
  1066 		TLinAddr offset = pBuf->iRelAddress;
       
  1067 
       
  1068 		iBufMap->Free(offset / iBufGap);
       
  1069 		FreeBufferPages(offset);
       
  1070 		pBuf->DObject::Close(NULL);
       
  1071 		}
       
  1072 
       
  1073 	UpdateReservedHandles(-(TInt)i);
       
  1074 
       
  1075 	// If we couldn't shrink the pool by this many buffers, wait until we Free() another
       
  1076 	// buffer before trying to shrink again.
       
  1077 	if (i < shrink)
       
  1078 		iPoolFlags |= EShPoolSuppressShrink;
       
  1079 
       
  1080 	CalculateGrowShrinkTriggers();
       
  1081 
       
  1082 	Kern::MutexSignal(*iProcessLock);
       
  1083 
       
  1084 	__KTRACE_OPT(KMMU, Kern::Printf("<DWin32NonAlignedShPool::ShrinkPool()"));
       
  1085 
       
  1086 	return KErrNone;
       
  1087 	} // DWin32NonAlignedShPool::ShrinkPool