kernel/eka/memmodel/emul/win32/mchunk.cpp
changeset 0 a41df078684a
equal deleted inserted replaced
-1:000000000000 0:a41df078684a
       
     1 // Copyright (c) 1994-2009 Nokia Corporation and/or its subsidiary(-ies).
       
     2 // All rights reserved.
       
     3 // This component and the accompanying materials are made available
       
     4 // under the terms of the License "Eclipse Public License v1.0"
       
     5 // which accompanies this distribution, and is available
       
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
       
     7 //
       
     8 // Initial Contributors:
       
     9 // Nokia Corporation - initial contribution.
       
    10 //
       
    11 // Contributors:
       
    12 //
       
    13 // Description:
       
    14 // e32\memmodel\emul\win32\mchunk.cpp
       
    15 // 
       
    16 //
       
    17 
       
    18 #include "memmodel.h"
       
    19 #include <emulator.h>
       
    20 
       
    21 DWin32Chunk::~DWin32Chunk()
       
    22 	{
       
    23 	__KTRACE_OPT(KTHREAD,Kern::Printf("DWin32Chunk destruct %O",this));
       
    24 
       
    25 	if (iBase)
       
    26 		{
       
    27 		VirtualFree(LPVOID(iBase), iMaxSize, MEM_DECOMMIT);
       
    28 		VirtualFree(LPVOID(iBase), 0, MEM_RELEASE);
       
    29 		MM::Wait();
       
    30 		MM::FreeMemory += iSize;
       
    31 		if(iUnlockedPageBitMap)
       
    32 			{
       
    33 			TInt unlockedMemory = MM::RamPageSize*(iUnlockedPageBitMap->iSize-iUnlockedPageBitMap->iAvail);
       
    34 			if(unlockedMemory<=MM::CacheMemory)
       
    35 				MM::CacheMemory-=unlockedMemory;
       
    36 			else
       
    37 				{
       
    38 				MM::ReclaimedCacheMemory -= unlockedMemory-MM::CacheMemory;
       
    39 				MM::CacheMemory = 0;
       
    40 				}
       
    41 			MM::CheckMemoryCounters();
       
    42 			}
       
    43 		MM::Signal();
       
    44 		}
       
    45 	__KTRACE_OPT(KMEMTRACE, {MM::Wait();Kern::Printf("MT:D %d %x %O",NTickCount(),this,this);MM::Signal();});
       
    46 #ifdef BTRACE_CHUNKS
       
    47 	BTraceContext4(BTrace::EChunks,BTrace::EChunkDestroyed,this);
       
    48 #endif
       
    49 	delete iPageBitMap;
       
    50 	delete iUnlockedPageBitMap;
       
    51 	delete iPermanentPageBitMap;
       
    52 
       
    53 	TDfc* dfc = (TDfc*)__e32_atomic_swp_ord_ptr(&iDestroyedDfc, 0);
       
    54 	if (dfc)
       
    55 		dfc->Enque();
       
    56 	}
       
    57 
       
    58 
       
    59 TUint8* DWin32Chunk::Base(DProcess* /*aProcess*/)
       
    60 	{
       
    61 	return iBase;
       
    62 	}
       
    63 
       
    64 
       
    65 TInt DWin32Chunk::DoCreate(SChunkCreateInfo& aInfo)
       
    66 	{
       
    67 	__ASSERT_COMPILE(!(EMMChunkAttributesMask & EChunkAttributesMask));
       
    68 
       
    69 	if(iAttributes&EMemoryNotOwned)
       
    70 		return KErrNotSupported;
       
    71 	if (aInfo.iMaxSize<=0)
       
    72 		return KErrArgument;
       
    73 	iMaxSize=MM::RoundToChunkSize(aInfo.iMaxSize);
       
    74 	TInt maxpages=iMaxSize>>MM::RamPageShift;
       
    75 	if (iAttributes & EDisconnected)
       
    76 		{
       
    77 		TBitMapAllocator* pM=TBitMapAllocator::New(maxpages,ETrue);
       
    78 		if (!pM)
       
    79 			return KErrNoMemory;
       
    80 		TBitMapAllocator* pUM=TBitMapAllocator::New(maxpages,ETrue);
       
    81 		if (!pUM)
       
    82 			{
       
    83 			delete pM;
       
    84 			return KErrNoMemory;
       
    85 			}
       
    86 		iPageBitMap=pM;
       
    87 		iUnlockedPageBitMap=pUM;
       
    88 		__KTRACE_OPT(KMMU,Kern::Printf("PageBitMap at %08x, MaxPages %d",pM,maxpages));
       
    89 		}
       
    90 	switch (iChunkType)
       
    91 		{
       
    92 	case ESharedKernelSingle:
       
    93 	case ESharedKernelMultiple:
       
    94 		{
       
    95 		TBitMapAllocator* pM=TBitMapAllocator::New(maxpages,ETrue);
       
    96 		if (!pM)
       
    97 			return KErrNoMemory;
       
    98 		iPermanentPageBitMap = pM;
       
    99 		}
       
   100 		// fall through to next case...
       
   101 	case ESharedIo:
       
   102 	case EKernelMessage:
       
   103 	case EUserSelfModCode:
       
   104 	case EUserData:
       
   105 		{
       
   106 		DWORD protect = (iChunkType == EUserSelfModCode) ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
       
   107 		LPVOID base = VirtualAlloc(NULL, iMaxSize, MEM_RESERVE, protect);
       
   108 		if (!base)
       
   109 			return KErrNoMemory;
       
   110 		iBase  = (TUint8*) base;
       
   111 		__KTRACE_OPT(KMMU,Kern::Printf("Reserved: Base=%08x, Size=%08x",iBase,iMaxSize));
       
   112 		}
       
   113 		break;
       
   114 	default:
       
   115 		break;
       
   116 		}
       
   117 	__KTRACE_OPT(KMEMTRACE, {MM::Wait();Kern::Printf("MT:C %d %x %O",NTickCount(),this,this);MM::Signal();});
       
   118 #ifdef BTRACE_CHUNKS
       
   119 	TKName nameBuf;
       
   120 	Name(nameBuf);
       
   121 	BTraceContextN(BTrace::EChunks,BTrace::EChunkCreated,this,iMaxSize,nameBuf.Ptr(),nameBuf.Size());
       
   122 	if(iOwningProcess)
       
   123 		BTrace8(BTrace::EChunks,BTrace::EChunkOwner,this,iOwningProcess);
       
   124 	BTraceContext12(BTrace::EChunks,BTrace::EChunkInfo,this,iChunkType,iAttributes);
       
   125 #endif
       
   126 	return KErrNone;
       
   127 	}
       
   128 
       
   129 TInt DWin32Chunk::Adjust(TInt aNewSize)
       
   130 //
       
   131 // Adjust a standard chunk.
       
   132 //
       
   133 	{
       
   134 
       
   135 	__KTRACE_OPT(KMMU,Kern::Printf("DWin32Chunk::Adjust %08x",aNewSize));
       
   136 	if (iAttributes & (EDoubleEnded|EDisconnected))
       
   137 		return KErrGeneral;
       
   138 	if (aNewSize<0 || aNewSize>iMaxSize)
       
   139 		return KErrArgument;
       
   140 
       
   141 	TInt r=KErrNone;
       
   142 	TInt newSize=MM::RoundToPageSize(aNewSize);
       
   143 	if (newSize!=iSize)
       
   144 		{
       
   145 		MM::Wait();
       
   146 		if (newSize>iSize)
       
   147 			{
       
   148 			__KTRACE_OPT(KMMU,Kern::Printf("DWin32Chunk::Adjust growing"));
       
   149 			r=DoCommit(iSize,newSize-iSize);
       
   150 			}
       
   151 		else if (newSize<iSize)
       
   152 			{
       
   153 			__KTRACE_OPT(KMMU,Kern::Printf("DWin32Chunk::Adjust shrinking"));
       
   154 			DoDecommit(newSize,iSize-newSize);
       
   155 			}
       
   156 		MM::Signal();
       
   157 		}
       
   158 
       
   159 	__COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
       
   160 	__KTRACE_OPT(KMMU,Kern::Printf("DWin32Chunk %O adjusted to %x",this,iSize));
       
   161 	return r;
       
   162 	}
       
   163 
       
   164 TInt DWin32Chunk::AdjustDoubleEnded(TInt aBottom, TInt aTop)
       
   165 //
       
   166 // Adjust a double-ended chunk.
       
   167 //
       
   168 	{
       
   169 	__KTRACE_OPT(KMMU,Kern::Printf("DWin32Chunk::AdjustDoubleEnded %x-%x",aBottom,aTop));
       
   170 	if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDoubleEnded)
       
   171 		return KErrGeneral;
       
   172 	if (0>aBottom || aBottom>aTop || aTop>iMaxSize)
       
   173 		return KErrArgument;
       
   174 	aBottom &= ~(MM::RamPageSize-1);
       
   175 	aTop = MM::RoundToPageSize(aTop);
       
   176 	TInt newSize=aTop-aBottom;
       
   177 
       
   178 	MM::Wait();
       
   179 	TInt initBottom=iStartPos;
       
   180 	TInt initTop=iStartPos+iSize;
       
   181 	TInt nBottom=Max(aBottom,initBottom);	// intersection bottom
       
   182 	TInt nTop=Min(aTop,initTop);	// intersection top
       
   183 	TInt r=KErrNone;
       
   184 	if (nBottom<nTop)
       
   185 		{
       
   186 		__KTRACE_OPT(KMMU,Kern::Printf("Initial and final regions intersect"));
       
   187 		if (initBottom<nBottom)
       
   188 			{
       
   189 			iStartPos=aBottom;
       
   190 			DoDecommit(initBottom,nBottom-initBottom);
       
   191 			}
       
   192 		if (initTop>nTop)
       
   193 			DoDecommit(nTop,initTop-nTop);	// this changes iSize
       
   194 		if (aBottom<nBottom)
       
   195 			{
       
   196 			r=DoCommit(aBottom,nBottom-aBottom);
       
   197 			if (r==KErrNone)
       
   198 				{
       
   199 				if (aTop>nTop)
       
   200 					r=DoCommit(nTop,aTop-nTop);
       
   201 				if (r==KErrNone)
       
   202 					iStartPos=aBottom;
       
   203 				else
       
   204 					DoDecommit(aBottom,nBottom-aBottom);
       
   205 				}
       
   206 			}
       
   207 		else if (aTop>nTop)
       
   208 			r=DoCommit(nTop,aTop-nTop);
       
   209 		}
       
   210 	else
       
   211 		{
       
   212 		__KTRACE_OPT(KMMU,Kern::Printf("Initial and final regions disjoint"));
       
   213 		if (iSize)
       
   214 			DoDecommit(initBottom,iSize);
       
   215 		iStartPos=aBottom;
       
   216 		if (newSize)
       
   217 			r=DoCommit(iStartPos,newSize);
       
   218 		}
       
   219 	MM::Signal();
       
   220 	__COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
       
   221 	__KTRACE_OPT(KMMU,Kern::Printf("DWin32Chunk %O adjusted to %x+%x",this,iStartPos,iSize));
       
   222 	return r;
       
   223 	}
       
   224 
       
   225 
       
   226 TInt DWin32Chunk::Commit(TInt aOffset, TInt aSize, TCommitType aCommitType, TUint32* aExtraArg)
       
   227 //
       
   228 // Commit to a disconnected chunk.
       
   229 //
       
   230 	{
       
   231 	__KTRACE_OPT(KMMU,Kern::Printf("DWin32Chunk::Commit %x+%x type=%d extra=%08x",aOffset,aSize,aCommitType,aExtraArg));
       
   232 	if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
       
   233 		return KErrGeneral;
       
   234 	if (aOffset<0 || aSize<0 || (aOffset+aSize)>iMaxSize)
       
   235 		return KErrArgument;
       
   236 	if(LOGICAL_XOR((TInt)aCommitType&DChunk::ECommitPhysicalMask, iAttributes&DChunk::EMemoryNotOwned))
       
   237 		return KErrNotSupported;  // Commit type doesn't match 'memory owned' type
       
   238 
       
   239 	TInt top = MM::RoundToPageSize(aOffset + aSize);
       
   240 	aOffset &= ~(MM::RamPageSize - 1);
       
   241 	aSize = top - aOffset;
       
   242 
       
   243 	TInt r=KErrNone;
       
   244 	TInt i=aOffset>>MM::RamPageShift;
       
   245 	TInt n=aSize>>MM::RamPageShift;
       
   246 	MM::Wait();
       
   247 	if (iPageBitMap->NotFree(i,n))
       
   248 		r=KErrAlreadyExists;
       
   249 	else
       
   250 		{
       
   251 		switch(aCommitType)
       
   252 			{
       
   253 		case DChunk::ECommitDiscontiguous:
       
   254 			if(aExtraArg==0)
       
   255 				r=DoCommit(aOffset,aSize);
       
   256 			else
       
   257 				r = KErrArgument;
       
   258 			break;
       
   259 
       
   260 		case DChunk::ECommitContiguous:
       
   261 			r=DoCommit(aOffset,aSize);
       
   262 			 // Return a fake physical address which is == linear address
       
   263 			if(r==KErrNone)
       
   264 				*aExtraArg = (TUint)(iBase+aOffset);
       
   265 			break;
       
   266 
       
   267 		case DChunk::ECommitDiscontiguousPhysical:
       
   268 		case DChunk::ECommitContiguousPhysical:
       
   269 			// The emulator doesn't do physical address allocation
       
   270 			r=KErrNotSupported;
       
   271 			break;
       
   272 
       
   273 		default:
       
   274 			r = KErrArgument;
       
   275 			break;
       
   276 			};
       
   277 		if (r==KErrNone)
       
   278 			iPageBitMap->Alloc(i,n);
       
   279 		}
       
   280 	MM::CheckMemoryCounters();
       
   281 	MM::Signal();
       
   282 	__COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
       
   283 	return r;
       
   284 	}
       
   285 
       
   286 TInt DWin32Chunk::Allocate(TInt aSize, TInt aGuard, TInt aAlign)
       
   287 //
       
   288 // Allocate offset and commit to a disconnected chunk.
       
   289 //
       
   290 	{
       
   291 	(void)aAlign;
       
   292 	__KTRACE_OPT(KMMU,Kern::Printf("DWin32Chunk::Allocate %x %x %d",aSize,aGuard,aAlign));
       
   293 	if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
       
   294 		return KErrGeneral;
       
   295 	if (aSize<=0 || aGuard<0 || aSize+aGuard>iMaxSize)
       
   296 		return KErrArgument;
       
   297 
       
   298 	aSize = MM::RoundToPageSize(aSize);
       
   299 	aGuard = MM::RoundToPageSize(aGuard);
       
   300 
       
   301 	TInt r=KErrNone;
       
   302 	TInt n=(aSize+aGuard)>>MM::RamPageShift;
       
   303 	MM::Wait();
       
   304 	TInt i=iPageBitMap->AllocConsecutive(n,EFalse);		// allocate the offset
       
   305 	if (i<0)
       
   306 		r=KErrNoMemory;		// run out of reserved space for this chunk
       
   307 	else
       
   308 		{
       
   309 		TInt offset=i<<MM::RamPageShift;
       
   310 		__KTRACE_OPT(KMMU,Kern::Printf("Offset %x allocated",offset));
       
   311 		r=DoCommit(offset+aGuard,aSize);
       
   312 		if (r==KErrNone)
       
   313 			{
       
   314 			iPageBitMap->Alloc(i,n);
       
   315 			r=offset;		// if operation successful, return allocated offset
       
   316 			}
       
   317 		}
       
   318 	MM::Signal();
       
   319 	__KTRACE_OPT(KMMU,Kern::Printf("DWin32Chunk::Allocate returns %x",r));
       
   320 	__COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
       
   321 	return r;
       
   322 	}
       
   323 
       
   324 TInt DWin32Chunk::Decommit(TInt anOffset, TInt aSize)
       
   325 //
       
   326 // Decommit from a disconnected chunk.
       
   327 //
       
   328 	{
       
   329 	__KTRACE_OPT(KMMU,Kern::Printf("DWin32Chunk::Decommit %x+%x",anOffset,aSize));
       
   330 	if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
       
   331 		return KErrGeneral;
       
   332 	if (anOffset<0 || aSize<0 || (anOffset+aSize)>iMaxSize)
       
   333 		return KErrArgument;
       
   334 	
       
   335 	TInt top = MM::RoundToPageSize(anOffset + aSize);
       
   336 	anOffset &= ~(MM::RamPageSize - 1);
       
   337 	aSize = top - anOffset;
       
   338 
       
   339 	MM::Wait();
       
   340 
       
   341 	// limit the range to the home region range
       
   342 	__KTRACE_OPT(KMMU,Kern::Printf("Rounded and Clipped range %x+%x",anOffset,aSize));
       
   343 
       
   344 	TInt i=anOffset>>MM::RamPageShift;
       
   345 	TInt n=aSize>>MM::RamPageShift;
       
   346 	// check for decommiting unlocked pages...
       
   347 	for(TInt j=i; j<i+n; j++)
       
   348 		{
       
   349 		if(iUnlockedPageBitMap->NotFree(j,1))
       
   350 			{
       
   351 			iUnlockedPageBitMap->Free(j);
       
   352 			if(MM::ReclaimedCacheMemory)
       
   353 				{
       
   354 				MM::ReclaimedCacheMemory -= MM::RamPageSize;
       
   355 				MM::FreeMemory -= MM::RamPageSize; // reclaimed memory already counted, so adjust
       
   356 				}
       
   357 			else
       
   358 				MM::CacheMemory -= MM::RamPageSize;
       
   359 			}
       
   360 		}
       
   361 	__KTRACE_OPT(KMMU,Kern::Printf("Calling SelectiveFree(%d,%d)",i,n));
       
   362 	iPageBitMap->SelectiveFree(i,n);	// free those positions which are actually allocated
       
   363 	DoDecommit(anOffset,aSize);
       
   364 	MM::CheckMemoryCounters();
       
   365 	MM::Signal();
       
   366 	__DEBUG_EVENT(EEventUpdateChunk, this);
       
   367 	return KErrNone;
       
   368 	}
       
   369 
       
   370 TInt DWin32Chunk::Unlock(TInt aOffset, TInt aSize)
       
   371 	{
       
   372 	__KTRACE_OPT(KMMU,Kern::Printf("DWin32Chunk::Unlock %x+%x",aOffset,aSize));
       
   373 	if (!(iAttributes&ECache))
       
   374 		return KErrGeneral;
       
   375 	if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
       
   376 		return KErrGeneral;
       
   377 	if (aOffset<0 || aSize<0 || (aOffset+aSize)>iMaxSize)
       
   378 		return KErrArgument;
       
   379 	
       
   380 	TInt top = MM::RoundToPageSize(aOffset + aSize);
       
   381 	aOffset &= ~(MM::RamPageSize - 1);
       
   382 	aSize = top - aOffset;
       
   383 
       
   384 	MM::Wait();
       
   385 
       
   386 	TInt i=aOffset>>MM::RamPageShift;
       
   387 	TInt n=aSize>>MM::RamPageShift;
       
   388 	TInt r;
       
   389 	if (iPageBitMap->NotAllocated(i,n))
       
   390 		r=KErrNotFound; // some pages aren't committed
       
   391 	else
       
   392 		{
       
   393 		for(TInt j=i; j<i+n; j++)
       
   394 			{
       
   395 			if(iUnlockedPageBitMap->NotAllocated(j,1))
       
   396 				{
       
   397 				// unlock this page...
       
   398 				iUnlockedPageBitMap->Alloc(j,1);
       
   399 				MM::CacheMemory += MM::RamPageSize;
       
   400 				}
       
   401 			}
       
   402 		r = KErrNone;
       
   403 		}
       
   404 
       
   405 	MM::CheckMemoryCounters();
       
   406 	MM::Signal();
       
   407 	return r;
       
   408 	}
       
   409 
       
   410 TInt DWin32Chunk::Lock(TInt aOffset, TInt aSize)
       
   411 	{
       
   412 	__KTRACE_OPT(KMMU,Kern::Printf("DWin32Chunk::Lock %x+%x",aOffset,aSize));
       
   413 	if (!(iAttributes&ECache))
       
   414 		return KErrGeneral;
       
   415 	if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
       
   416 		return KErrGeneral;
       
   417 	if (aOffset<0 || aSize<0 || (aOffset+aSize)>iMaxSize)
       
   418 		return KErrArgument;
       
   419 	
       
   420 	TInt top = MM::RoundToPageSize(aOffset + aSize);
       
   421 	aOffset &= ~(MM::RamPageSize - 1);
       
   422 	aSize = top - aOffset;
       
   423 
       
   424 	MM::Wait();
       
   425 
       
   426 	TInt i=aOffset>>MM::RamPageShift;
       
   427 	TInt n=aSize>>MM::RamPageShift;
       
   428 	TInt r;
       
   429 	if (iPageBitMap->NotAllocated(i,n))
       
   430 		r=KErrNotFound; // some pages aren't committed
       
   431 	else
       
   432 		{
       
   433 		r = KErrNone;
       
   434 		for(TInt j=i; j<i+n; j++)
       
   435 			{
       
   436 			if(iUnlockedPageBitMap->NotFree(j,1))
       
   437 				{
       
   438 				// lock this page...
       
   439 				if(MM::ReclaimedCacheMemory)
       
   440 					{
       
   441 					r = KErrNotFound;
       
   442 					break;
       
   443 					}
       
   444 				iUnlockedPageBitMap->Free(j);
       
   445 				MM::CacheMemory -= MM::RamPageSize;
       
   446 				}
       
   447 			}
       
   448 		}
       
   449 	if(r!=KErrNone)
       
   450 		{
       
   451 		// decommit memory on error...
       
   452 		for(TInt j=i; j<i+n; j++)
       
   453 			{
       
   454 			if(iUnlockedPageBitMap->NotFree(j,1))
       
   455 				{
       
   456 				iUnlockedPageBitMap->Free(j);
       
   457 				if(MM::ReclaimedCacheMemory)
       
   458 					{
       
   459 					MM::ReclaimedCacheMemory -= MM::RamPageSize;
       
   460 					MM::FreeMemory -= MM::RamPageSize; // reclaimed memory already counted, so adjust
       
   461 					}
       
   462 				else
       
   463 					MM::CacheMemory -= MM::RamPageSize;
       
   464 				}
       
   465 			}
       
   466 		iPageBitMap->SelectiveFree(i,n);
       
   467 		DoDecommit(aOffset,aSize);
       
   468 		}
       
   469 	MM::CheckMemoryCounters();
       
   470 	MM::Signal();
       
   471 	return r;
       
   472 	}
       
   473 
       
   474 TInt DWin32Chunk::CheckAccess()
       
   475 	{
       
   476 	DProcess* pP=TheCurrentThread->iOwningProcess;
       
   477 	if (iAttributes&EPrivate)
       
   478 		{
       
   479 		if (iOwningProcess && iOwningProcess!=pP && pP!=K::TheKernelProcess)
       
   480 			return KErrAccessDenied;
       
   481 		}
       
   482 	return KErrNone;
       
   483 	}
       
   484 
       
   485 TInt DWin32Chunk::Address(TInt aOffset, TInt aSize, TLinAddr& aKernelAddress)
       
   486 	{
       
   487 	if(!iPermanentPageBitMap)
       
   488 		return KErrAccessDenied;
       
   489 	if(TUint(aOffset)>=TUint(iMaxSize))
       
   490 		return KErrArgument;
       
   491 	if(TUint(aOffset+aSize)>TUint(iMaxSize))
       
   492 		return KErrArgument;
       
   493 	if(aSize<=0)
       
   494 		return KErrArgument;
       
   495 	TInt pageShift = MM::RamPageShift;
       
   496 	TInt start = aOffset>>pageShift;
       
   497 	TInt size = ((aOffset+aSize-1)>>pageShift)-start+1;
       
   498 	if(iPermanentPageBitMap->NotAllocated(start,size))
       
   499 		return KErrNotFound;
       
   500 	aKernelAddress = (TLinAddr)iBase+aOffset;
       
   501 	return KErrNone;
       
   502 	}
       
   503 
       
   504 void DWin32Chunk::Substitute(TInt /*aOffset*/, TPhysAddr /*aOldAddr*/, TPhysAddr /*aNewAddr*/)
       
   505 	{
       
   506 	MM::Panic(MM::ENotSupportedOnEmulator);
       
   507 	}
       
   508 
       
   509 TInt DWin32Chunk::PhysicalAddress(TInt aOffset, TInt aSize, TLinAddr& aKernelAddress, TUint32& aPhysicalAddress, TUint32* aPhysicalPageList)
       
   510 	{
       
   511 	TInt r=Address(aOffset,aSize,aKernelAddress);
       
   512 	if(r!=KErrNone)
       
   513 		return r;
       
   514 
       
   515 	// return fake physical addresses which are the same as the linear address
       
   516 	aPhysicalAddress = 	aKernelAddress;
       
   517 
       
   518 	TInt pageShift = MM::RamPageShift;
       
   519 	TUint32 page = aKernelAddress>>pageShift<<pageShift;
       
   520 	TUint32 lastPage = (aKernelAddress+aSize-1)>>pageShift<<pageShift;
       
   521 	TUint32* pageList = aPhysicalPageList;
       
   522 	TUint32 pageSize = 1<<pageShift;
       
   523 	if(pageList)
       
   524 		for(; page<=lastPage; page += pageSize)
       
   525 			*pageList++ = page;
       
   526 	return KErrNone;
       
   527 	}
       
   528 
       
   529 TInt DWin32Chunk::DoCommit(TInt aOffset, TInt aSize)
       
   530 //
       
   531 // Get win32 to commit the pages.
       
   532 // We know they are not already committed - this is guaranteed by the caller so we can update the memory info easily
       
   533 //
       
   534 	{
       
   535 	if (aSize==0)
       
   536 		return KErrNone;
       
   537 
       
   538 	TBool execute = (iChunkType == EUserSelfModCode) ? ETrue : EFalse;
       
   539 
       
   540 	TInt r = MM::Commit(reinterpret_cast<TLinAddr>(iBase + aOffset), aSize, iClearByte, execute);
       
   541 
       
   542 	if (r == KErrNone)
       
   543 		{
       
   544 		iSize += aSize;
       
   545 
       
   546 		if(iPermanentPageBitMap)
       
   547 	        iPermanentPageBitMap->Alloc(aOffset>>MM::RamPageShift,aSize>>MM::RamPageShift);
       
   548 
       
   549 		__KTRACE_OPT(KMEMTRACE, {Kern::Printf("MT:A %d %x %x %O",NTickCount(),this,iSize,this);});
       
   550 #ifdef BTRACE_CHUNKS
       
   551 		BTraceContext12(BTrace::EChunks,BTrace::EChunkMemoryAllocated,this,aOffset,aSize);
       
   552 #endif
       
   553 		return KErrNone;
       
   554 		}
       
   555 
       
   556 	return KErrNoMemory;
       
   557 	}
       
   558 
       
   559 void DWin32Chunk::DoDecommit(TInt anOffset, TInt aSize)
       
   560 //
       
   561 // Get win32 to decommit the pages.
       
   562 // The pages may or may not be committed: we need to find out which ones are so that the memory info is updated correctly
       
   563 //
       
   564 	{
       
   565 	TInt freed = MM::Decommit(reinterpret_cast<TLinAddr>(iBase+anOffset), aSize);
       
   566 
       
   567 	iSize -= freed;
       
   568 	__KTRACE_OPT(KMEMTRACE, {Kern::Printf("MT:A %d %x %x %O",NTickCount(),this,iSize,this);});
       
   569 	}
       
   570 
       
   571 TUint32 MM::RoundToChunkSize(TUint32 aSize)
       
   572 	{
       
   573 	TUint32 m=MM::RamChunkSize-1;
       
   574 	return (aSize+m)&~m;
       
   575 	}
       
   576 
       
   577 void DWin32Chunk::BTracePrime(TInt aCategory)
       
   578 	{
       
   579 	DChunk::BTracePrime(aCategory);
       
   580 	
       
   581 #ifdef BTRACE_CHUNKS
       
   582 	if (aCategory == BTrace::EChunks || aCategory == -1)
       
   583 		{
       
   584 		MM::Wait();
       
   585 		// it is essential that the following code is in braces because __LOCK_HOST
       
   586 		// creates an object which must be destroyed before the MM::Signal() at the end.
       
   587 			{
       
   588 			__LOCK_HOST;
       
   589 			// output traces for all memory which has been committed to the chunk...
       
   590 			TInt offset=0;
       
   591 			while(offset<iMaxSize)
       
   592 				{
       
   593 				MEMORY_BASIC_INFORMATION info;
       
   594 				VirtualQuery(LPVOID(iBase + offset), &info, sizeof(info));
       
   595 				TUint size = Min(iMaxSize-offset, info.RegionSize);
       
   596 				if(info.State == MEM_COMMIT)
       
   597 					BTrace12(BTrace::EChunks, BTrace::EChunkMemoryAllocated,this,offset,size);
       
   598 				offset += size;
       
   599 				}
       
   600 			}
       
   601 			MM::Signal();
       
   602 		}
       
   603 #endif
       
   604 	}