kernel/eka/memmodel/epoc/flexible/mmu/mobject.cpp
changeset 0 a41df078684a
child 4 56f325a607ea
equal deleted inserted replaced
-1:000000000000 0:a41df078684a
       
     1 // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
       
     2 // All rights reserved.
       
     3 // This component and the accompanying materials are made available
       
     4 // under the terms of the License "Eclipse Public License v1.0"
       
     5 // which accompanies this distribution, and is available
       
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
       
     7 //
       
     8 // Initial Contributors:
       
     9 // Nokia Corporation - initial contribution.
       
    10 //
       
    11 // Contributors:
       
    12 //
       
    13 // Description:
       
    14 //
       
    15 
       
    16 #include <plat_priv.h>
       
    17 #include "mm.h"
       
    18 #include "mmu.h"
       
    19 
       
    20 #include "mobject.h"
       
    21 #include "mmapping.h"
       
    22 #include "mptalloc.h"
       
    23 #include "mmanager.h"
       
    24 #include "cache_maintenance.inl"
       
    25 
       
    26 const TUint KMaxMappingsInOneGo = KMaxPageInfoUpdatesInOneGo; // must be power-of-2
       
    27 
       
    28 
       
    29 
       
    30 //
       
    31 // MemoryObjectLock
       
    32 //
       
    33 
       
    34 /**
       
    35 The mutex pool used to assign locks to memory objects.
       
    36 @see #MemoryObjectLock.
       
    37 */
       
    38 DMutexPool MemoryObjectMutexPool;
       
    39 
       
    40 void MemoryObjectLock::Lock(DMemoryObject* aMemory)
       
    41 	{
       
    42 	TRACE2(("MemoryObjectLock::Lock(0x%08x) try",aMemory));
       
    43 	MemoryObjectMutexPool.Wait(aMemory->iLock);
       
    44 	TRACE2(("MemoryObjectLock::Lock(0x%08x) acquired",aMemory));
       
    45 	}
       
    46 
       
    47 void MemoryObjectLock::Unlock(DMemoryObject* aMemory)
       
    48 	{
       
    49 	TRACE2(("MemoryObjectLock::Unlock(0x%08x)",aMemory));
       
    50 	MemoryObjectMutexPool.Signal(aMemory->iLock);
       
    51 	}
       
    52 
       
    53 TBool MemoryObjectLock::IsHeld(DMemoryObject* aMemory)
       
    54 	{
       
    55 	return MemoryObjectMutexPool.IsHeld(aMemory->iLock);
       
    56 	}
       
    57 
       
    58 
       
    59 
       
    60 //
       
    61 // DMemoryObject
       
    62 //
       
    63 
       
    64 DMemoryObject::DMemoryObject(DMemoryManager* aManager, TUint aFlags, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags)
       
    65 	: iManager(aManager), iFlags(aFlags), iAttributes(Mmu::CanonicalMemoryAttributes(aAttributes)),
       
    66 	  iSizeInPages(aSizeInPages)
       
    67 	{
       
    68 	__ASSERT_COMPILE(EMemoryAttributeMask<0x100); // make sure aAttributes fits into a TUint8
       
    69 
       
    70 	TMemoryType type = (TMemoryType)(aAttributes&EMemoryAttributeTypeMask);
       
    71 	iRamAllocFlags = type;
       
    72 	if(aCreateFlags&EMemoryCreateNoWipe)
       
    73 		iRamAllocFlags |= Mmu::EAllocNoWipe;
       
    74 	else if(aCreateFlags&EMemoryCreateUseCustomWipeByte)
       
    75 		{
       
    76 		TUint8 wipeByte = (aCreateFlags>>EMemoryCreateWipeByteShift)&0xff;
       
    77 		iRamAllocFlags |= wipeByte<<Mmu::EAllocWipeByteShift;
       
    78 		iRamAllocFlags |= Mmu::EAllocUseCustomWipeByte;
       
    79 		}
       
    80 
       
    81 	if(aCreateFlags&EMemoryCreateDemandPaged)
       
    82 		iFlags |= EDemandPaged;
       
    83 	if(aCreateFlags&EMemoryCreateReserveAllResources)
       
    84 		iFlags |= EReserveResources;
       
    85 	if(aCreateFlags&EMemoryCreateDisallowPinning)
       
    86 		iFlags |= EDenyPinning;
       
    87 	if(aCreateFlags&EMemoryCreateReadOnly)
       
    88 		iFlags |= EDenyWriteMappings;
       
    89 	if(!(aCreateFlags&EMemoryCreateAllowExecution))
       
    90 		iFlags |= EDenyExecuteMappings;
       
    91 	}
       
    92 
       
    93 
       
    94 TInt DMemoryObject::Construct()
       
    95 	{
       
    96 	TBool preAllocateMemory = iFlags&(EReserveResources|EDemandPaged);
       
    97 	TInt r = iPages.Construct(iSizeInPages,preAllocateMemory);
       
    98 	return r;
       
    99 	}
       
   100 
       
   101 
       
   102 DMemoryObject::~DMemoryObject()
       
   103 	{
       
   104 	TRACE(("DMemoryObject[0x%08x]::~DMemoryObject()",this));
       
   105 	__NK_ASSERT_DEBUG(iMappings.IsEmpty());
       
   106 	}
       
   107 
       
   108 
       
   109 TBool DMemoryObject::CheckRegion(TUint aIndex, TUint aCount)
       
   110 	{
       
   111 	TUint end = aIndex+aCount;
       
   112 	return end>=aIndex && end<=iSizeInPages;
       
   113 	}
       
   114 
       
   115 
       
   116 void DMemoryObject::ClipRegion(TUint& aIndex, TUint& aCount)
       
   117 	{
       
   118 	TUint end = aIndex+aCount;
       
   119 	if(end<aIndex) // overflow?
       
   120 		end = ~0u;
       
   121 	if(end>iSizeInPages)
       
   122 		end = iSizeInPages;
       
   123 	if(aIndex>=end)
       
   124 		aIndex = end;
       
   125 	aCount = end-aIndex;
       
   126 	}
       
   127 
       
   128 
       
   129 void DMemoryObject::SetLock(DMutex* aLock)
       
   130 	{
       
   131 	__NK_ASSERT_DEBUG(!iLock);
       
   132 	iLock = aLock;
       
   133 	TRACE(("MemoryObject[0x%08x]::SetLock(0x%08x) \"%O\"",this,aLock,aLock));
       
   134 	}
       
   135 
       
   136 
       
   137 DMemoryMapping* DMemoryObject::CreateMapping(TUint, TUint)
       
   138 	{
       
   139 	return new DFineMapping();
       
   140 	}
       
   141 
       
   142 
       
   143 TInt DMemoryObject::MapPages(RPageArray::TIter aPages)
       
   144 	{
       
   145 	TRACE2(("DMemoryObject[0x%08x]::MapPages(?) index=0x%x count=0x%x",this,aPages.Index(),aPages.Count()));
       
   146 
       
   147 	TUint offset = aPages.Index();
       
   148 	TUint offsetEnd = aPages.IndexEnd();
       
   149 	TInt r = KErrNone;
       
   150 
       
   151 	iMappings.Lock();
       
   152 	TMappingListIter iter;
       
   153 	DMemoryMappingBase* mapping = iter.Start(iMappings);
       
   154 	while(mapping)
       
   155 		{
       
   156 		if(mapping->IsPinned())
       
   157 			{
       
   158 			// pinned mappings don't change, so nothing to do...
       
   159 			iMappings.Unlock();
       
   160 			}
       
   161 		else
       
   162 			{
       
   163 			// get region where pages overlap the mapping...
       
   164 			TUint start = mapping->iStartIndex;
       
   165 			TUint end = start+mapping->iSizeInPages;
       
   166 			if(start<offset)
       
   167 				start = offset;
       
   168 			if(end>offsetEnd)
       
   169 				end = offsetEnd;
       
   170 			if(start>=end)
       
   171 				{
       
   172 				// the mapping doesn't contain the pages...
       
   173 				iMappings.Unlock();
       
   174 				}
       
   175 			else
       
   176 				{
       
   177 				// map pages in the mapping...
       
   178 				mapping->Open();
       
   179 				TUint mapInstanceCount = mapping->MapInstanceCount();
       
   180 				iMappings.Unlock();
       
   181 				r = mapping->MapPages(aPages.Slice(start,end),mapInstanceCount);
       
   182 				mapping->AsyncClose();
       
   183 				if(r!=KErrNone)
       
   184 					{
       
   185 					iMappings.Lock();
       
   186 					break;
       
   187 					}
       
   188 				}
       
   189 			}
       
   190 		iMappings.Lock();
       
   191 		mapping = iter.Next();
       
   192 		}
       
   193 	iter.Finish();
       
   194 	iMappings.Unlock();
       
   195 
       
   196 	return r;
       
   197 	}
       
   198 
       
   199 
       
   200 void DMemoryObject::RemapPage(TPhysAddr& aPageArray, TUint aIndex, TBool aInvalidateTLB)
       
   201 	{
       
   202 	TRACE2(("DMemoryObject[0x%08x]::RemapPage(0x%x,%d,%d)",this,aPageArray,aIndex,aInvalidateTLB));
       
   203 
       
   204 	iMappings.RemapPage(aPageArray, aIndex, aInvalidateTLB);
       
   205 
       
   206 #ifdef COARSE_GRAINED_TLB_MAINTENANCE
       
   207 	if (aInvalidateTLB)
       
   208 		InvalidateTLB();
       
   209 #endif
       
   210 	}
       
   211 
       
   212 
       
   213 void DMemoryObject::UnmapPages(RPageArray::TIter aPages, TBool aDecommitting)
       
   214 	{
       
   215 	TRACE2(("DMemoryObject[0x%08x]::UnmapPages(?,%d) index=0x%x count=0x%x",this,(bool)aDecommitting,aPages.Index(),aPages.Count()));
       
   216 
       
   217 	TUint offset = aPages.Index();
       
   218 	TUint offsetEnd = aPages.IndexEnd();
       
   219 	if(offset==offsetEnd)
       
   220 		return;
       
   221 
       
   222 	iMappings.Lock();
       
   223 	TMappingListIter iter;
       
   224 	DMemoryMappingBase* mapping = iter.Start(iMappings);
       
   225 	while(mapping)
       
   226 		{
       
   227 		// get region where pages overlap the mapping...
       
   228 		TUint start = mapping->iStartIndex;
       
   229 		TUint end = start+mapping->iSizeInPages;
       
   230 		if(start<offset)
       
   231 			start = offset;
       
   232 		if(end>offsetEnd)
       
   233 			end = offsetEnd;
       
   234 		if(start>=end)
       
   235 			{
       
   236 			// the mapping doesn't contain the pages...
       
   237 			iMappings.Unlock();
       
   238 			}
       
   239 		else
       
   240 			{
       
   241 			RPageArray::TIter pages = aPages.Slice(start,end);
       
   242 			if(mapping->IsPinned())
       
   243 				{
       
   244 				// pinned mappings veto page unmapping...
       
   245 				if(aDecommitting)
       
   246 					__e32_atomic_ior_ord8(&mapping->Flags(), (TUint8)DMemoryMapping::EPageUnmapVetoed);
       
   247 				iMappings.Unlock();
       
   248 				TRACE2(("DFineMemoryMapping[0x%08x] veto UnmapPages, index=0x%x count=0x%x",mapping,pages.Index(),pages.Count()));
       
   249 				pages.VetoUnmap();
       
   250 				}
       
   251 			else
       
   252 				{
       
   253 				// unmap pages in the mapping...
       
   254 				mapping->Open();
       
   255 				TUint mapInstanceCount = mapping->MapInstanceCount();
       
   256 				iMappings.Unlock();
       
   257 				mapping->UnmapPages(pages,mapInstanceCount);
       
   258 				mapping->AsyncClose();
       
   259 				}
       
   260 			}
       
   261 		iMappings.Lock();
       
   262 		mapping = iter.Next();
       
   263 		}
       
   264 	iter.Finish();
       
   265 	iMappings.Unlock();
       
   266 
       
   267 #ifdef COARSE_GRAINED_TLB_MAINTENANCE
       
   268 	InvalidateTLB();
       
   269 #endif
       
   270 	}
       
   271 
       
   272 
       
   273 void DMemoryObject::RestrictPages(RPageArray::TIter aPages, TRestrictPagesType aRestriction)
       
   274 	{
       
   275 	TRACE2(("DMemoryObject[0x%08x]::RestrictPages(?,%d) index=0x%x count=0x%x",this,aRestriction,aPages.Index(),aPages.Count()));
       
   276 
       
   277 	TUint offset = aPages.Index();
       
   278 	TUint offsetEnd = aPages.IndexEnd();
       
   279 	if(offset==offsetEnd)
       
   280 		return;
       
   281 
       
   282 	iMappings.Lock();
       
   283 	TMappingListIter iter;
       
   284 	DMemoryMappingBase* mapping = iter.Start(iMappings);
       
   285 	while(mapping)
       
   286 		{
       
   287 		// get region where pages overlap the mapping...
       
   288 		TUint start = mapping->iStartIndex;
       
   289 		TUint end = start+mapping->iSizeInPages;
       
   290 		if(start<offset)
       
   291 			start = offset;
       
   292 		if(end>offsetEnd)
       
   293 			end = offsetEnd;
       
   294 		if(start>=end)
       
   295 			{
       
   296 			// the mapping doesn't contain the pages...
       
   297 			iMappings.Unlock();
       
   298 			}
       
   299 		else
       
   300 			{
       
   301 			RPageArray::TIter pages = aPages.Slice(start,end);
       
   302 			if(mapping->IsPhysicalPinning() ||
       
   303 				(!(aRestriction & ERestrictPagesForMovingFlag) && mapping->IsPinned()))
       
   304 				{
       
   305 				// Pinned mappings veto page restrictions except for page moving 
       
   306 				// where only physically pinned mappings block page moving.
       
   307 				iMappings.Unlock();
       
   308 				TRACE2(("DFineMemoryMapping[0x%08x] veto RestrictPages, index=0x%x count=0x%x",mapping,pages.Index(),pages.Count()));
       
   309 				pages.VetoRestrict(aRestriction & ERestrictPagesForMovingFlag);
       
   310 				// Mappings lock required for iter.Finish() as iter will be removed from the mappings list.
       
   311 				iMappings.Lock();
       
   312 				break;
       
   313 				}
       
   314 			else
       
   315 				{
       
   316 				// pages not pinned so do they need restricting...
       
   317 				if(aRestriction == ERestrictPagesForMovingFlag)
       
   318 					{
       
   319 					// nothing to do when just checking for pinned mappings for 
       
   320 					// page moving purposes and not restricting to NA.
       
   321 					iMappings.Unlock();
       
   322 					}
       
   323 				else
       
   324 					{
       
   325 					// restrict pages in the mapping...
       
   326 					mapping->Open();
       
   327 					TUint mapInstanceCount = mapping->MapInstanceCount();
       
   328 					iMappings.Unlock();
       
   329 					mapping->RestrictPagesNA(pages, mapInstanceCount);
       
   330 					mapping->AsyncClose();
       
   331 					}
       
   332 				}
       
   333 			}
       
   334 		iMappings.Lock();
       
   335 		mapping = iter.Next();
       
   336 		}
       
   337 
       
   338 	if(aRestriction & ERestrictPagesForMovingFlag)
       
   339 		{// Clear the mappings addded flag so page moving can detect whether any 
       
   340 		// new mappings have been added
       
   341 		ClearMappingAddedFlag();
       
   342 		}
       
   343 
       
   344 	iter.Finish();
       
   345 	iMappings.Unlock();
       
   346 
       
   347 	#ifdef COARSE_GRAINED_TLB_MAINTENANCE
       
   348 	// Writable memory objects will have been restricted no access so invalidate TLB.
       
   349 	if (aRestriction != ERestrictPagesForMovingFlag)
       
   350 		InvalidateTLB();
       
   351 	#endif
       
   352 	}
       
   353 
       
   354 
       
   355 TInt DMemoryObject::CheckNewMapping(DMemoryMappingBase* aMapping)
       
   356 	{
       
   357 	if(iFlags&EDenyPinning && aMapping->IsPinned())
       
   358 		return KErrAccessDenied;
       
   359 	if(iFlags&EDenyMappings)
       
   360 		return KErrAccessDenied;
       
   361 	if(iFlags&EDenyWriteMappings && !aMapping->IsReadOnly())
       
   362 		return KErrAccessDenied;
       
   363 #ifdef MMU_SUPPORTS_EXECUTE_NEVER
       
   364 	if((iFlags&EDenyExecuteMappings) && aMapping->IsExecutable())
       
   365 		return KErrAccessDenied;
       
   366 #endif
       
   367 	return KErrNone;
       
   368 	}
       
   369 
       
   370 
       
   371 TInt DMemoryObject::AddMapping(DMemoryMappingBase* aMapping)
       
   372 	{
       
   373 	__NK_ASSERT_DEBUG(!aMapping->IsCoarse());
       
   374 
       
   375 	// check mapping allowed...
       
   376 	MmuLock::Lock();
       
   377 	iMappings.Lock();
       
   378 
       
   379 	TInt r = CheckNewMapping(aMapping);
       
   380 	if(r == KErrNone)
       
   381 		{
       
   382 		Open();
       
   383 		aMapping->LinkToMemory(this, iMappings);
       
   384 		}
       
   385 
       
   386 	iMappings.Unlock();
       
   387 	MmuLock::Unlock();
       
   388 
       
   389 	TRACE(("DMemoryObject[0x%08x]::AddMapping(0x%08x)  returns %d", this, aMapping, r));
       
   390 
       
   391 	return r;
       
   392 	}
       
   393 
       
   394 
       
   395 void DMemoryObject::RemoveMapping(DMemoryMappingBase* aMapping)
       
   396 	{
       
   397 	aMapping->UnlinkFromMemory(iMappings);
       
   398 	Close();
       
   399 	}
       
   400 
       
   401 
       
   402 TInt DMemoryObject::SetReadOnly()
       
   403 	{
       
   404 	TRACE(("DMemoryObject[0x%08x]::SetReadOnly()",this));
       
   405 	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(this));
       
   406 
       
   407 	TInt r = KErrNone;
       
   408 	iMappings.Lock();
       
   409 	if (iFlags & EDenyWriteMappings)
       
   410 		{// The object is already read only.
       
   411 		iMappings.Unlock();
       
   412 		return KErrNone;
       
   413 		}		
       
   414 
       
   415 	TMappingListIter iter;
       
   416 	DMemoryMappingBase* mapping = iter.Start(iMappings);
       
   417 	while(mapping)
       
   418 		{
       
   419 		if (!mapping->IsReadOnly())
       
   420 			{
       
   421 			r = KErrInUse;
       
   422 			goto exit;
       
   423 			}
       
   424 		// This will flash iMappings.Lock to stop it being held too long.
       
   425 		// This is safe as new mappings will be added to the end of the list so we
       
   426 		// won't miss them.
       
   427 		mapping = iter.Next();
       
   428 		}
       
   429 	// Block any writable mapping from being added to this memory object.
       
   430 	// Use atomic operation as iMappings.Lock protects EDenyWriteMappings
       
   431 	// but not the whole word.
       
   432 	__e32_atomic_ior_ord8(&iFlags, (TUint8)EDenyWriteMappings);
       
   433 
       
   434 exit:
       
   435 	iter.Finish();
       
   436 	iMappings.Unlock();
       
   437 	return r;
       
   438 	}
       
   439 
       
   440 
       
   441 void DMemoryObject::DenyMappings()
       
   442 	{
       
   443 	TRACE(("DMemoryObject[0x%08x]::LockMappings()",this));
       
   444 	MmuLock::Lock();
       
   445 	// Use atomic operation as MmuLock protects EDenyMappings
       
   446 	// but not the whole word.
       
   447 	__e32_atomic_ior_ord8(&iFlags, (TUint8)EDenyMappings);
       
   448 	MmuLock::Unlock();
       
   449 	}
       
   450 
       
   451 
       
   452 TInt DMemoryObject::PhysAddr(TUint aIndex, TUint aCount, TPhysAddr& aPhysicalAddress, TPhysAddr* aPhysicalPageList)
       
   453 	{
       
   454 	TRACE2(("DMemoryObject[0x%08x]::PhysAddr(0x%x,0x%x,?,?)",this,aIndex,aCount));
       
   455 	TInt r = iPages.PhysAddr(aIndex,aCount,aPhysicalAddress,aPhysicalPageList);
       
   456 	TRACE2(("DMemoryObject[0x%08x]::PhysAddr(0x%x,0x%x,?,?) returns %d aPhysicalAddress=0x%08x",this,aIndex,aCount,r,aPhysicalAddress));
       
   457 	return r;
       
   458 	}
       
   459 
       
   460 
       
   461 void DMemoryObject::BTraceCreate()
       
   462 	{
       
   463 	BTraceContext8(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectCreate,this,iSizeInPages);
       
   464 	}
       
   465 
       
   466 
       
   467 TUint DMemoryObject::PagingManagerData(TUint aIndex)
       
   468 	{
       
   469 	TRACE2(("DMemoryObject[0x%08x]::PagingManagerData(0x%x)",this,aIndex));
       
   470 	__NK_ASSERT_DEBUG(IsDemandPaged());
       
   471 	TUint value = iPages.PagingManagerData(aIndex);
       
   472 	TRACE2(("DMemoryObject[0x%08x]::PagingManagerData(0x%x) returns 0x%x",this,aIndex,value));
       
   473 	return value;
       
   474 	}
       
   475 
       
   476 
       
   477 void DMemoryObject::SetPagingManagerData(TUint aIndex, TUint aValue)
       
   478 	{
       
   479 	TRACE(("DMemoryObject[0x%08x]::SetPagingManagerData(0x%x,0x%08x)",this,aIndex,aValue));
       
   480 	__NK_ASSERT_DEBUG(IsDemandPaged());
       
   481 	iPages.SetPagingManagerData(aIndex, aValue);
       
   482 	__NK_ASSERT_DEBUG(iPages.PagingManagerData(aIndex)==aValue);
       
   483 	}
       
   484 
       
   485 
       
   486 
       
   487 //
       
   488 // DCoarseMemory::DPageTables
       
   489 //
       
   490 
       
   491 DCoarseMemory::DPageTables::DPageTables(DCoarseMemory* aMemory, TInt aNumPts, TUint aPteType)
       
   492 	: iMemory(aMemory), iPteType(aPteType), iPermanenceCount(0), iNumPageTables(aNumPts)
       
   493 	{
       
   494 	aMemory->Open();
       
   495 	iBlankPte = Mmu::BlankPte(aMemory->Attributes(),aPteType);
       
   496 	}
       
   497 
       
   498 
       
   499 DCoarseMemory::DPageTables* DCoarseMemory::DPageTables::New(DCoarseMemory* aMemory, TUint aNumPages, TUint aPteType)
       
   500 	{
       
   501 	TRACE2(("DCoarseMemory::DPageTables::New(0x%08x,0x%x,0x%08x)",aMemory, aNumPages, aPteType));
       
   502 	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
       
   503 	__NK_ASSERT_DEBUG((aNumPages&(KChunkMask>>KPageShift))==0);
       
   504 	TUint numPts = aNumPages>>(KChunkShift-KPageShift);
       
   505 	DPageTables* self = (DPageTables*)Kern::AllocZ(sizeof(DPageTables)+(numPts-1)*sizeof(TPte*));
       
   506 	if(self)
       
   507 		{
       
   508 		new (self) DPageTables(aMemory,numPts,aPteType);
       
   509 		TInt r = self->Construct();
       
   510 		if(r!=KErrNone)
       
   511 			{
       
   512 			self->Close();
       
   513 			self = 0;
       
   514 			}
       
   515 		}
       
   516 	TRACE2(("DCoarseMemory::DPageTables::New(0x%08x,0x%x,0x%08x) returns 0x%08x",aMemory, aNumPages, aPteType, self));
       
   517 	return self;
       
   518 	}
       
   519 
       
   520 
       
   521 TInt DCoarseMemory::DPageTables::Construct()
       
   522 	{
       
   523 	if(iMemory->IsDemandPaged())
       
   524 		{
       
   525 		// do nothing, allow pages to be mapped on demand...
       
   526 		return KErrNone;
       
   527 		}
       
   528 
       
   529 	RPageArray::TIter pageIter;
       
   530 	iMemory->iPages.FindStart(0,iMemory->iSizeInPages,pageIter);
       
   531 
       
   532 	// map pages...
       
   533 	TInt r = KErrNone;
       
   534 	for(;;)
       
   535 		{
       
   536 		// find some pages...
       
   537 		RPageArray::TIter pageList;
       
   538 		TUint n = pageIter.Find(pageList);
       
   539 		if(!n)
       
   540 			break; // done
       
   541 
       
   542 		// map some pages...
       
   543 		r = MapPages(pageList);
       
   544 
       
   545 		// done with pages...
       
   546 		pageIter.FindRelease(n);
       
   547 
       
   548 		if(r!=KErrNone)
       
   549 			break;
       
   550 		}
       
   551 
       
   552 	iMemory->iPages.FindEnd(0,iMemory->iSizeInPages);
       
   553 
       
   554 	return r;
       
   555 	}
       
   556 
       
   557 
       
   558 void DCoarseMemory::DPageTables::Close()
       
   559 	{
       
   560 	__NK_ASSERT_DEBUG(CheckCloseIsSafe());
       
   561 	MmuLock::Lock();
       
   562 	if (__e32_atomic_tas_ord32(&iReferenceCount, 1, -1, 0) != 1)
       
   563 		{
       
   564 		MmuLock::Unlock();
       
   565 		return;
       
   566 		}
       
   567 	DCoarseMemory* memory = iMemory;
       
   568 	if(memory)
       
   569 		{
       
   570 		iMemory->iPageTables[iPteType] = 0;
       
   571 		iMemory = 0;
       
   572 		}
       
   573 	MmuLock::Unlock();
       
   574 	if(memory)
       
   575 		memory->Close();
       
   576 	delete this;
       
   577 	}
       
   578 
       
   579 
       
   580 void DCoarseMemory::DPageTables::AsyncClose()
       
   581 	{
       
   582 	__NK_ASSERT_DEBUG(CheckAsyncCloseIsSafe());
       
   583 	MmuLock::Lock();
       
   584 	if (__e32_atomic_tas_ord32(&iReferenceCount, 1, -1, 0) != 1)
       
   585 		{
       
   586 		MmuLock::Unlock();
       
   587 		return;
       
   588 		}
       
   589 	DCoarseMemory* memory = iMemory;
       
   590 	if(memory)
       
   591 		{
       
   592 		iMemory->iPageTables[iPteType] = 0;
       
   593 		iMemory = 0;
       
   594 		}
       
   595 	MmuLock::Unlock();
       
   596 	if(memory)
       
   597 		memory->AsyncClose();
       
   598 	AsyncDelete();
       
   599 	}
       
   600 
       
   601 
       
   602 DCoarseMemory::DPageTables::~DPageTables()
       
   603 	{
       
   604 	TRACE2(("DCoarseMemory::DPageTables[0x%08x]::~DPageTables()",this));
       
   605 	__NK_ASSERT_DEBUG(!iMemory);
       
   606 	__NK_ASSERT_DEBUG(iMappings.IsEmpty());
       
   607 	TUint i=0;
       
   608 	while(i<iNumPageTables)
       
   609 		{
       
   610 		TPte* pt = iTables[i];
       
   611 		if(pt)
       
   612 			{
       
   613 			iTables[i] = 0;
       
   614 			::PageTables.Lock();
       
   615 			::PageTables.Free(pt);
       
   616 			::PageTables.Unlock();
       
   617 			}
       
   618 		++i;
       
   619 		}
       
   620 	}
       
   621 
       
   622 
       
   623 TPte* DCoarseMemory::DPageTables::GetOrAllocatePageTable(TUint aChunkIndex)
       
   624 	{
       
   625 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
       
   626 
       
   627 	// get page table...
       
   628 	TPte* pt = GetPageTable(aChunkIndex);
       
   629 	if(!pt)
       
   630 		pt = AllocatePageTable(aChunkIndex, iMemory->IsDemandPaged());
       
   631 
       
   632 	return pt;
       
   633 	}
       
   634 
       
   635 
       
   636 TPte* DCoarseMemory::DPageTables::GetOrAllocatePageTable(TUint aChunkIndex, TPinArgs& aPinArgs)
       
   637 	{
       
   638 	__NK_ASSERT_DEBUG(aPinArgs.iPinnedPageTables);
       
   639 
       
   640 	if(!aPinArgs.HaveSufficientPages(KNumPagesToPinOnePageTable))
       
   641 		return 0;
       
   642 
       
   643 	TPte* pinnedPt = 0;
       
   644 	for(;;)
       
   645 		{
       
   646 		TPte* pt = GetOrAllocatePageTable(aChunkIndex);
       
   647 
       
   648 		if(pinnedPt && pinnedPt!=pt)
       
   649 			{
       
   650 			// previously pinned page table not needed...
       
   651 			PageTableAllocator::UnpinPageTable(pinnedPt,aPinArgs);
       
   652 
       
   653 			// make sure we have memory for next pin attempt...
       
   654 			MmuLock::Unlock();
       
   655 			aPinArgs.AllocReplacementPages(KNumPagesToPinOnePageTable);
       
   656 			MmuLock::Lock();
       
   657 			if(!aPinArgs.HaveSufficientPages(KNumPagesToPinOnePageTable)) // if out of memory...
       
   658 				{
       
   659 				// make sure we free any unneeded page table we allocated...
       
   660 				if(pt)
       
   661 					FreePageTable(aChunkIndex);
       
   662 				return 0;
       
   663 				}
       
   664 			}
       
   665 
       
   666 		if(!pt)
       
   667 			return 0; // out of memory
       
   668 
       
   669 		if(pt==pinnedPt)
       
   670 			{
       
   671 			// we got a page table and it was pinned...
       
   672 			*aPinArgs.iPinnedPageTables++ = pt;
       
   673 			++aPinArgs.iNumPinnedPageTables;
       
   674 			return pt;
       
   675 			}
       
   676 
       
   677 		// don't pin page table if it's not paged (e.g. unpaged part of ROM)...
       
   678 		SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt);
       
   679 		if(!pti->IsDemandPaged())
       
   680 			return pt;
       
   681 
       
   682 		// pin the page table...
       
   683 		pinnedPt = pt;
       
   684 		PageTableAllocator::PinPageTable(pinnedPt,aPinArgs);
       
   685 		}
       
   686 	}
       
   687 
       
   688 
       
   689 TPte* DCoarseMemory::DPageTables::AllocatePageTable(TUint aChunkIndex, TBool aDemandPaged, TBool aPermanent)
       
   690 	{
       
   691 	TRACE2(("DCoarseMemory::DPageTables[0x%08x]::AllocatePageTable(0x%08x,%d,%d)",this,aChunkIndex,aDemandPaged,aPermanent));
       
   692 
       
   693 	TPte* pt;
       
   694 	do
       
   695 		{
       
   696 		// acquire page table lock...
       
   697 		MmuLock::Unlock();
       
   698 		::PageTables.Lock();
       
   699 
       
   700 		// see if we still need to allocate a page table...
       
   701 		pt = iTables[aChunkIndex];
       
   702 		if(!pt)
       
   703 			{
       
   704 			// allocate page table...
       
   705 			pt = ::PageTables.Alloc(aDemandPaged);
       
   706 			if(!pt)
       
   707 				{
       
   708 				// out of memory...
       
   709 				::PageTables.Unlock();
       
   710 				MmuLock::Lock();
       
   711 				return 0;
       
   712 				}
       
   713 			AssignPageTable(aChunkIndex,pt);
       
   714 			}
       
   715 
       
   716 		// release page table lock...
       
   717 		::PageTables.Unlock();
       
   718 		MmuLock::Lock();
       
   719 
       
   720 		// check again...
       
   721 		pt = iTables[aChunkIndex];
       
   722 		}
       
   723 	while(!pt);
       
   724 
       
   725 	// we have a page table...
       
   726 	if(aPermanent)
       
   727 		{
       
   728 		__NK_ASSERT_ALWAYS(!aDemandPaged);
       
   729 		SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt);
       
   730 		pti->IncPermanenceCount();
       
   731 		}
       
   732 	return pt;
       
   733 	}
       
   734 
       
   735 
       
   736 void DCoarseMemory::DPageTables::AssignPageTable(TUint aChunkIndex, TPte* aPageTable)
       
   737 	{
       
   738 	__NK_ASSERT_DEBUG(PageTablesLockIsHeld());
       
   739 
       
   740 	MmuLock::Lock();
       
   741 
       
   742 	// get physical address of page table now, this can't change whilst we have the page table allocator mutex...
       
   743 	TPhysAddr ptPhys = Mmu::PageTablePhysAddr(aPageTable);
       
   744 
       
   745 	// update mappings with new page table...
       
   746 	TUint offset = aChunkIndex<<(KChunkShift-KPageShift);
       
   747 	iMappings.Lock();
       
   748 	TMappingListIter iter;
       
   749 	DMemoryMapping* mapping = (DMemoryMapping*)iter.Start(iMappings);
       
   750 	TUint flash = 0;
       
   751 	while(mapping)
       
   752 		{
       
   753 		TUint size = mapping->iSizeInPages;
       
   754 		TUint start = offset-mapping->iStartIndex;
       
   755 		if(start<size && !mapping->BeingDetached())
       
   756 			{
       
   757 			// page table is used by this mapping, so set PDE...
       
   758 			TLinAddr linAddrAndOsAsid = mapping->LinAddrAndOsAsid()+start*KPageSize;
       
   759 			TPde* pPde = Mmu::PageDirectoryEntry(linAddrAndOsAsid&KPageMask,linAddrAndOsAsid);
       
   760 			TPde pde = ptPhys|mapping->BlankPde();
       
   761 			TRACE2(("!PDE %x=%x",pPde,pde));
       
   762 			__NK_ASSERT_DEBUG(((*pPde^pde)&~KPdeMatchMask)==0 || *pPde==KPdeUnallocatedEntry);
       
   763 			*pPde = pde;
       
   764 			SinglePdeUpdated(pPde);
       
   765 
       
   766 			++flash; // increase flash rate because we've done quite a bit more work
       
   767 			}
       
   768 		iMappings.Unlock();
       
   769 		MmuLock::Flash(flash,KMaxMappingsInOneGo);
       
   770 		iMappings.Lock();
       
   771 		mapping = (DMemoryMapping*)iter.Next();
       
   772 		}
       
   773 	iter.Finish();
       
   774 	iMappings.Unlock();
       
   775 
       
   776 	// next, assign page table to us...
       
   777 	// NOTE: Must happen before MmuLock is released after reaching the end of the mapping list
       
   778 	// otherwise it would be possible for a new mapping to be added and mapped before we manage
       
   779 	// to update iTables with the page table it should use.
       
   780 	SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPageTable);
       
   781 	pti->SetCoarse(iMemory,aChunkIndex,iPteType);
       
   782 	__NK_ASSERT_DEBUG(!iTables[aChunkIndex]);
       
   783 	iTables[aChunkIndex] = aPageTable; // new mappings can now see the page table
       
   784 
       
   785 	MmuLock::Unlock();
       
   786 	}
       
   787 
       
   788 
       
   789 void DCoarseMemory::DPageTables::FreePageTable(TUint aChunkIndex)
       
   790 	{
       
   791 	TRACE2(("DCoarseMemory::DPageTables[0x%08x]::FreePageTable(0x%08x)",this,aChunkIndex));
       
   792 
       
   793 	// acquire locks...
       
   794 	::PageTables.Lock();
       
   795 	MmuLock::Lock();
       
   796 
       
   797 	// test if page table still needs freeing...
       
   798 	TPte* pt = iTables[aChunkIndex];
       
   799 	if(pt)
       
   800 		{
       
   801 		SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt);
       
   802 		if(pti->PageCount()==0 && pti->PermanenceCount()==0)
       
   803 			{
       
   804 			// page table needs freeing...
       
   805 			UnassignPageTable(aChunkIndex);
       
   806 			MmuLock::Unlock();
       
   807 			::PageTables.Free(pt);
       
   808 			::PageTables.Unlock();
       
   809 			return;
       
   810 			}
       
   811 		}
       
   812 
       
   813 	// page table doesn't need freeing...
       
   814 	MmuLock::Unlock();
       
   815 	::PageTables.Unlock();
       
   816 	return;
       
   817 	}
       
   818 
       
   819 
       
   820 void DCoarseMemory::StealPageTable(TUint aChunkIndex, TUint aPteType)
       
   821 	{
       
   822 	__NK_ASSERT_DEBUG(PageTablesLockIsHeld());
       
   823 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
       
   824 	__NK_ASSERT_DEBUG(iPageTables[aPteType]);
       
   825 	iPageTables[aPteType]->StealPageTable(aChunkIndex);
       
   826 	}
       
   827 
       
   828 
       
   829 void DCoarseMemory::DPageTables::StealPageTable(TUint aChunkIndex)
       
   830 	{
       
   831 	__NK_ASSERT_DEBUG(PageTablesLockIsHeld());
       
   832 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
       
   833 #ifdef _DEBUG
       
   834 	TPte* pt = iTables[aChunkIndex];
       
   835 	__NK_ASSERT_DEBUG(pt);
       
   836 	SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt);
       
   837 	__NK_ASSERT_DEBUG(pti->PageCount()==0);
       
   838 	__NK_ASSERT_DEBUG(pti->PermanenceCount()==0);
       
   839 #endif
       
   840 	UnassignPageTable(aChunkIndex);
       
   841 	}
       
   842 
       
   843 
       
   844 void DCoarseMemory::DPageTables::UnassignPageTable(TUint aChunkIndex)
       
   845 	{
       
   846 	__NK_ASSERT_DEBUG(PageTablesLockIsHeld());
       
   847 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
       
   848 
       
   849 #ifdef _DEBUG
       
   850 	TPhysAddr ptPhys = Mmu::PageTablePhysAddr(iTables[aChunkIndex]);
       
   851 #endif
       
   852 
       
   853 	// zero page table pointer immediately so new mappings or memory commits will be force to
       
   854 	// create a new one (which will block until we've finished here because it also needs the
       
   855 	// PageTablesLock...
       
   856 	iTables[aChunkIndex] = 0; 
       
   857 
       
   858 	// remove page table from mappings...
       
   859 	TUint offset = aChunkIndex<<(KChunkShift-KPageShift);
       
   860 	iMappings.Lock();
       
   861 	TMappingListIter iter;
       
   862 	DMemoryMapping* mapping = (DMemoryMapping*)iter.Start(iMappings);
       
   863 	TUint flash = 0;
       
   864 	while(mapping)
       
   865 		{
       
   866 		__NK_ASSERT_DEBUG(iTables[aChunkIndex]==0); // can't have been recreated because we hold PageTablesLock
       
   867 		TUint size = mapping->iSizeInPages;
       
   868 		TUint start = offset-mapping->iStartIndex;
       
   869 		if(start<size)
       
   870 			{
       
   871 			// page table is used by this mapping, so clear PDE...
       
   872 			TLinAddr linAddrAndOsAsid = mapping->LinAddrAndOsAsid()+start*KPageSize;
       
   873 			TPde* pPde = Mmu::PageDirectoryEntry(linAddrAndOsAsid&KPageMask,linAddrAndOsAsid);
       
   874 			TPde pde = KPdeUnallocatedEntry;
       
   875 			TRACE2(("!PDE %x=%x",pPde,pde));
       
   876 			__NK_ASSERT_DEBUG(*pPde==pde || (*pPde&~KPageTableMask)==ptPhys);
       
   877 			*pPde = pde;
       
   878 			SinglePdeUpdated(pPde);
       
   879 
       
   880 			++flash; // increase flash rate because we've done quite a bit more work
       
   881 			}
       
   882 		iMappings.Unlock();
       
   883 		MmuLock::Flash(flash,KMaxMappingsInOneGo);
       
   884 		iMappings.Lock();
       
   885 		mapping = (DMemoryMapping*)iter.Next();
       
   886 		}
       
   887 	iter.Finish();
       
   888 
       
   889 	iMappings.Unlock();
       
   890 	}
       
   891 
       
   892 
       
   893 TInt DCoarseMemory::DPageTables::AllocatePermanentPageTables()
       
   894 	{
       
   895 	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(iMemory));
       
   896 	__NK_ASSERT_ALWAYS(!iMemory->IsDemandPaged());
       
   897 
       
   898 	if(iPermanenceCount++)
       
   899 		{
       
   900 		// page tables already marked permanent, so end...
       
   901 		return KErrNone;
       
   902 		}
       
   903 
       
   904 	// allocate all page tables...
       
   905 	MmuLock::Lock();
       
   906 	TUint flash = 0;
       
   907 	TUint i;
       
   908 	for(i=0; i<iNumPageTables; ++i)
       
   909 		{
       
   910 		TPte* pt = iTables[i];
       
   911 		if(pt)
       
   912 			{
       
   913 			// already have page table...
       
   914 			SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt);
       
   915 			pti->IncPermanenceCount();
       
   916 			MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo);
       
   917 			}
       
   918 		else
       
   919 			{
       
   920 			// allocate new page table...
       
   921 			pt = AllocatePageTable(i,EFalse,ETrue);
       
   922 			if(!pt)
       
   923 				{
       
   924 				MmuLock::Unlock();
       
   925 				--iPermanenceCount;
       
   926 				FreePermanentPageTables(0,i);
       
   927 				return KErrNoMemory;
       
   928 				}
       
   929 			}
       
   930 		}
       
   931 	MmuLock::Unlock();
       
   932 
       
   933 	return KErrNone;
       
   934 	}
       
   935 
       
   936 
       
   937 void DCoarseMemory::DPageTables::FreePermanentPageTables(TUint aChunkIndex, TUint aChunkCount)
       
   938 	{
       
   939 	MmuLock::Lock();
       
   940 
       
   941 	TUint flash = 0;
       
   942 	TUint i;
       
   943 	for(i=aChunkIndex; i<aChunkIndex+aChunkCount; ++i)
       
   944 		{
       
   945 		TPte* pt = iTables[i];
       
   946 		SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt);
       
   947 		if(pti->DecPermanenceCount() || pti->PageCount())
       
   948 			{
       
   949 			// still in use...
       
   950 			MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo);
       
   951 			}
       
   952 		else
       
   953 			{
       
   954 			// page table no longer used for anything...
       
   955 			MmuLock::Unlock();
       
   956 			FreePageTable(i);
       
   957 			MmuLock::Lock();
       
   958 			}
       
   959 		}
       
   960 
       
   961 	MmuLock::Unlock();
       
   962 	}
       
   963 
       
   964 
       
   965 void DCoarseMemory::DPageTables::FreePermanentPageTables()
       
   966 	{
       
   967 	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(iMemory));
       
   968 
       
   969 	if(--iPermanenceCount)
       
   970 		{
       
   971 		// page tables still permanent, so end...
       
   972 		return;
       
   973 		}
       
   974 
       
   975 	FreePermanentPageTables(0,iNumPageTables);
       
   976 	}
       
   977 
       
   978 
       
   979 TInt DCoarseMemory::DPageTables::AddMapping(DCoarseMapping* aMapping)
       
   980 	{
       
   981 	TRACE(("DCoarseMemory::DPageTables[0x%08x]::AddMapping(0x%08x)",this,aMapping));
       
   982 	__NK_ASSERT_DEBUG(aMapping->IsCoarse());
       
   983 	Open();
       
   984 	MmuLock::Lock();
       
   985 	iMappings.Lock();
       
   986 	aMapping->LinkToMemory(iMemory,iMappings);
       
   987 	iMappings.Unlock();
       
   988 	MmuLock::Unlock();
       
   989 	return KErrNone;
       
   990 	}
       
   991 
       
   992 
       
   993 void DCoarseMemory::DPageTables::RemoveMapping(DCoarseMapping* aMapping)
       
   994 	{
       
   995 	aMapping->UnlinkFromMemory(iMappings);
       
   996 	Close();
       
   997 	}
       
   998 
       
   999 
       
  1000 void DCoarseMemory::DPageTables::RemapPage(TPhysAddr& aPageArray, TUint aIndex, TBool aInvalidateTLB)
       
  1001 	{
       
  1002 	TUint pteIndex = aIndex & (KChunkMask>>KPageShift);
       
  1003 
       
  1004 	// get address of page table...
       
  1005 	MmuLock::Lock();
       
  1006 	TUint i = aIndex>>(KChunkShift-KPageShift);
       
  1007 	TPte* pPte = GetPageTable(i);
       
  1008 
       
  1009 	if (!pPte)
       
  1010 		{// This page has been unmapped so just return.
       
  1011 		MmuLock::Unlock();
       
  1012 		return;
       
  1013 		}
       
  1014 
       
  1015 	// remap the page...
       
  1016 	pPte += pteIndex;
       
  1017 	Mmu::RemapPage(pPte, aPageArray, iBlankPte);
       
  1018 
       
  1019 	MmuLock::Unlock();
       
  1020 	
       
  1021 	if (aInvalidateTLB)
       
  1022 		FlushTLB(aIndex, aIndex + 1);
       
  1023 	}
       
  1024 
       
  1025 
       
  1026 TInt DCoarseMemory::DPageTables::MapPages(RPageArray::TIter aPages)
       
  1027 	{
       
  1028 	__NK_ASSERT_DEBUG(aPages.Count());
       
  1029 
       
  1030 	for(;;)
       
  1031 		{
       
  1032 		TUint pteIndex = aPages.Index()&(KChunkMask>>KPageShift);
       
  1033 
       
  1034 		// calculate max number of pages to do...
       
  1035 		TUint n = (KChunkSize>>KPageShift)-pteIndex; // pages left in page table
       
  1036 		if(n>KMaxPagesInOneGo)
       
  1037 			n = KMaxPagesInOneGo;
       
  1038 
       
  1039 		// get some pages...
       
  1040 		TPhysAddr* pages;
       
  1041 		n = aPages.Pages(pages,n);
       
  1042 		if(!n)
       
  1043 			break;
       
  1044 
       
  1045 		// get address of page table...
       
  1046 		MmuLock::Lock();
       
  1047 		TUint i = aPages.Index()>>(KChunkShift-KPageShift);
       
  1048 		TPte* pPte = GetOrAllocatePageTable(i);
       
  1049 
       
  1050 		// check for OOM...
       
  1051 		if(!pPte)
       
  1052 			{
       
  1053 			MmuLock::Unlock();
       
  1054 			return KErrNoMemory;
       
  1055 			}
       
  1056 
       
  1057 		// map some pages...
       
  1058 		pPte += pteIndex;
       
  1059 		TBool keepPt = Mmu::MapPages(pPte, n, pages, iBlankPte);
       
  1060 		MmuLock::Unlock();
       
  1061 
       
  1062 		// free page table if no longer needed...
       
  1063 		if(!keepPt)
       
  1064 			FreePageTable(i);
       
  1065 
       
  1066 		// move on...
       
  1067 		aPages.Skip(n);
       
  1068 		}
       
  1069 
       
  1070 	return KErrNone;
       
  1071 	}
       
  1072 
       
  1073 
       
  1074 void DCoarseMemory::DPageTables::UnmapPages(RPageArray::TIter aPages, TBool aDecommitting)
       
  1075 	{
       
  1076 	__NK_ASSERT_DEBUG(aPages.Count());
       
  1077 
       
  1078 	TUint startIndex = aPages.Index();
       
  1079 
       
  1080 	for(;;)
       
  1081 		{
       
  1082 		TUint pteIndex = aPages.Index()&(KChunkMask>>KPageShift);
       
  1083 
       
  1084 		// calculate max number of pages to do...
       
  1085 		TUint n = (KChunkSize>>KPageShift)-pteIndex; // pages left in page table
       
  1086 		if(n>KMaxPagesInOneGo)
       
  1087 			n = KMaxPagesInOneGo;
       
  1088 
       
  1089 		// get some pages...
       
  1090 		TPhysAddr* pages;
       
  1091 		n = aPages.Pages(pages,n);
       
  1092 		if(!n)
       
  1093 			break;
       
  1094 
       
  1095 		// get address of PTE for pages...
       
  1096 		MmuLock::Lock();
       
  1097 		TUint i = aPages.Index()>>(KChunkShift-KPageShift);
       
  1098 		TPte* pPte = iTables[i];
       
  1099 		if(pPte)
       
  1100 			{
       
  1101 			// unmap some pages...
       
  1102 			pPte += pteIndex;
       
  1103 			TBool keepPt = Mmu::UnmapPages(pPte,n,pages);
       
  1104 			MmuLock::Unlock();
       
  1105 
       
  1106 			// free page table if no longer needed...
       
  1107 			if(!keepPt)
       
  1108 				FreePageTable(i);
       
  1109 			}
       
  1110 		else
       
  1111 			{
       
  1112 			// no page table found...
       
  1113 			MmuLock::Unlock();
       
  1114 			}
       
  1115 
       
  1116 		// move on...
       
  1117 		aPages.Skip(n);
       
  1118 		}
       
  1119 
       
  1120 	FlushTLB(startIndex,aPages.IndexEnd());
       
  1121 	}
       
  1122 
       
  1123 
       
  1124 void DCoarseMemory::DPageTables::RestrictPagesNA(RPageArray::TIter aPages)
       
  1125 	{
       
  1126 	__NK_ASSERT_DEBUG(aPages.Count());
       
  1127 
       
  1128 	TUint startIndex = aPages.Index();
       
  1129 
       
  1130 	for(;;)
       
  1131 		{
       
  1132 		TUint pteIndex = aPages.Index()&(KChunkMask>>KPageShift);
       
  1133 
       
  1134 		// calculate max number of pages to do...
       
  1135 		TUint n = (KChunkSize>>KPageShift)-pteIndex; // pages left in page table
       
  1136 		if(n>KMaxPagesInOneGo)
       
  1137 			n = KMaxPagesInOneGo;
       
  1138 
       
  1139 		// get some pages...
       
  1140 		TPhysAddr* pages;
       
  1141 		n = aPages.Pages(pages,n);
       
  1142 		if(!n)
       
  1143 			break;
       
  1144 
       
  1145 		// get address of PTE for pages...
       
  1146 		MmuLock::Lock();
       
  1147 		TUint i = aPages.Index()>>(KChunkShift-KPageShift);
       
  1148 		TPte* pPte = iTables[i];
       
  1149 		if(pPte)
       
  1150 			{
       
  1151 			// restrict some pages...
       
  1152 			pPte += pteIndex;
       
  1153 			Mmu::RestrictPagesNA(pPte,n,pages);
       
  1154 			}
       
  1155 		MmuLock::Unlock();
       
  1156 
       
  1157 		// move on...
       
  1158 		aPages.Skip(n);
       
  1159 		}
       
  1160 
       
  1161 	FlushTLB(startIndex,aPages.IndexEnd());
       
  1162 	}
       
  1163 
       
  1164 
       
  1165 TInt DCoarseMemory::DPageTables::PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, 
       
  1166 										DMemoryMappingBase* aMapping, TUint aMapInstanceCount)
       
  1167 	{
       
  1168 	__NK_ASSERT_DEBUG(aPages.Count());
       
  1169 
       
  1170 	TBool pinPageTable = aPinArgs.iPinnedPageTables!=0; // check if we need to pin the first page table
       
  1171 	for(;;)
       
  1172 		{
       
  1173 		TUint pteIndex = aPages.Index()&(KChunkMask>>KPageShift);
       
  1174 		if(pteIndex==0)
       
  1175 			pinPageTable = aPinArgs.iPinnedPageTables!=0;	// started a new page table, check if we need to pin it
       
  1176 
       
  1177 		// calculate max number of pages to do...
       
  1178 		TUint n = (KChunkSize>>KPageShift)-pteIndex; // pages left in page table
       
  1179 		if(n>KMaxPagesInOneGo)
       
  1180 			n = KMaxPagesInOneGo;
       
  1181 
       
  1182 		// get some pages...
       
  1183 		TPhysAddr* pages;
       
  1184 		n = aPages.Pages(pages,n);
       
  1185 		if(!n)
       
  1186 			break;
       
  1187 
       
  1188 		// make sure we have memory to pin the page table if required...
       
  1189 		if(pinPageTable)
       
  1190 			aPinArgs.AllocReplacementPages(KNumPagesToPinOnePageTable);
       
  1191 
       
  1192 		// get address of page table...
       
  1193 		MmuLock::Lock();
       
  1194 		TUint i = aPages.Index()>>(KChunkShift-KPageShift);
       
  1195 		TPte* pPte;
       
  1196 		if(pinPageTable)
       
  1197 			pPte = GetOrAllocatePageTable(i,aPinArgs);
       
  1198 		else
       
  1199 			pPte = GetOrAllocatePageTable(i);
       
  1200 
       
  1201 		// check for OOM...
       
  1202 		if(!pPte)
       
  1203 			{
       
  1204 			MmuLock::Unlock();
       
  1205 			return KErrNoMemory;
       
  1206 			}
       
  1207 
       
  1208 		if (aMapInstanceCount != aMapping->MapInstanceCount())
       
  1209 			{// The mapping that took the page fault has been reused.
       
  1210 			MmuLock::Unlock();
       
  1211 			FreePageTable(i);	// This will only free if this is the only pt referencer.
       
  1212 			return KErrNotFound;
       
  1213 			}
       
  1214 
       
  1215 		// map some pages...
       
  1216 		pPte += pteIndex;
       
  1217 		TPte blankPte = iBlankPte;
       
  1218 		if(aPinArgs.iReadOnly)
       
  1219 			blankPte = Mmu::MakePteInaccessible(blankPte,true);
       
  1220 		TBool keepPt = Mmu::PageInPages(pPte, n, pages, blankPte);
       
  1221 		MmuLock::Unlock();
       
  1222 
       
  1223 		// free page table if no longer needed...
       
  1224 		if(!keepPt)
       
  1225 			FreePageTable(i);
       
  1226 
       
  1227 		// move on...
       
  1228 		aPages.Skip(n);
       
  1229 		pinPageTable = false;
       
  1230 		}
       
  1231 
       
  1232 	return KErrNone;
       
  1233 	}
       
  1234 
       
  1235 
       
  1236 TBool DCoarseMemory::DPageTables::MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex)
       
  1237 	{
       
  1238 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
       
  1239 
       
  1240 	TUint pteIndex = aIndex & (KChunkMask >> KPageShift);
       
  1241 
       
  1242 	// get address of page table...
       
  1243 	TUint i = aIndex >> (KChunkShift - KPageShift);
       
  1244 	TPte* pPte = GetPageTable(i);
       
  1245 
       
  1246 	// Check the page is still mapped..
       
  1247 	if (!pPte)
       
  1248 		return EFalse;
       
  1249 
       
  1250 	// map the page...
       
  1251 	pPte += pteIndex;
       
  1252 	Mmu::RemapPage(pPte, aPageArrayPtr, iBlankPte);
       
  1253 	return ETrue;
       
  1254 	}
       
  1255 
       
  1256 
       
  1257 void DCoarseMemory::DPageTables::FlushTLB(TUint aStartIndex, TUint aEndIndex)
       
  1258 	{
       
  1259 #ifndef COARSE_GRAINED_TLB_MAINTENANCE
       
  1260 	iMappings.Lock();
       
  1261 	TMappingListIter iter;
       
  1262 	DMemoryMapping* mapping = (DMemoryMapping*)iter.Start(iMappings);
       
  1263 	while(mapping)
       
  1264 		{
       
  1265 		// get region which overlaps the mapping...
       
  1266 		TUint start = mapping->iStartIndex;
       
  1267 		TUint end = start+mapping->iSizeInPages;
       
  1268 		if(start<aStartIndex)
       
  1269 			start = aStartIndex;
       
  1270 		if(end>aEndIndex)
       
  1271 			end = aEndIndex;
       
  1272 		if(start>=end)
       
  1273 			{
       
  1274 			// the mapping doesn't contain the pages...
       
  1275 			iMappings.Unlock();
       
  1276 			}
       
  1277 		else
       
  1278 			{
       
  1279 			// flush TLB for pages in the mapping...
       
  1280 			TUint size = end-start;
       
  1281 			start -= mapping->iStartIndex;
       
  1282 			TLinAddr addr = mapping->LinAddrAndOsAsid()+start*KPageSize;
       
  1283 			TLinAddr endAddr = addr+size*KPageSize;
       
  1284 			iMappings.Unlock();
       
  1285 			do
       
  1286 				{
       
  1287 				InvalidateTLBForPage(addr);
       
  1288 				}
       
  1289 			while((addr+=KPageSize)<endAddr);
       
  1290 			}
       
  1291 		iMappings.Lock();
       
  1292 		mapping = (DMemoryMapping*)iter.Next();
       
  1293 		}
       
  1294 	iter.Finish();
       
  1295 	iMappings.Unlock();
       
  1296 #endif
       
  1297 	}
       
  1298 
       
  1299 
       
  1300 
       
  1301 //
       
  1302 // DCoarseMemory
       
  1303 //
       
  1304 
       
  1305 DCoarseMemory::DCoarseMemory(DMemoryManager* aManager, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags)
       
  1306 	: DMemoryObject(aManager,ECoarseObject,aSizeInPages,aAttributes,aCreateFlags)
       
  1307 	{
       
  1308 	}
       
  1309 
       
  1310 
       
  1311 DCoarseMemory* DCoarseMemory::New(DMemoryManager* aManager, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags)
       
  1312 	{
       
  1313 	DCoarseMemory* self = new DCoarseMemory(aManager, aSizeInPages, aAttributes, aCreateFlags);
       
  1314 	if(self)
       
  1315 		{
       
  1316 		if(self->Construct()==KErrNone)
       
  1317 			return self;
       
  1318 		self->Close();
       
  1319 		}
       
  1320 	return 0;
       
  1321 	}
       
  1322 
       
  1323 
       
  1324 DCoarseMemory::~DCoarseMemory()
       
  1325 	{
       
  1326 	TRACE2(("DCoarseMemory[0x%08x]::~DCoarseMemory()",this));
       
  1327 #ifdef _DEBUG
       
  1328 	for(TUint i=0; i<ENumPteTypes; i++)
       
  1329 		{
       
  1330 		__NK_ASSERT_DEBUG(!iPageTables[i]);
       
  1331 		}
       
  1332 #endif
       
  1333 	}
       
  1334 
       
  1335 
       
  1336 DMemoryMapping* DCoarseMemory::CreateMapping(TUint aIndex, TUint aCount)
       
  1337 	{
       
  1338 	if (((aIndex|aCount)&(KChunkMask>>KPageShift))==0)
       
  1339 		return new DCoarseMapping();
       
  1340 	else
       
  1341 		return new DFineMapping();
       
  1342 	}
       
  1343 
       
  1344 
       
  1345 TInt DCoarseMemory::MapPages(RPageArray::TIter aPages)
       
  1346 	{
       
  1347 	TRACE2(("DCoarseMemory[0x%08x]::MapPages(?) index=0x%x count=0x%x",this,aPages.Index(),aPages.Count()));
       
  1348 
       
  1349 	// map pages in all page tables for coarse mapping...
       
  1350 	MmuLock::Lock();
       
  1351 	TUint pteType = 0;
       
  1352 	do
       
  1353 		{
       
  1354 		DPageTables* tables = iPageTables[pteType];
       
  1355 		if(tables)
       
  1356 			{
       
  1357 			tables->Open();
       
  1358 			MmuLock::Unlock();
       
  1359 			TInt r = tables->MapPages(aPages);
       
  1360 			tables->AsyncClose();
       
  1361 			if(r!=KErrNone)
       
  1362 				return r;
       
  1363 			MmuLock::Lock();
       
  1364 			}
       
  1365 		}
       
  1366 	while(++pteType<ENumPteTypes);
       
  1367 	MmuLock::Unlock();
       
  1368 
       
  1369 	// map page in all fine mappings...
       
  1370 	return DMemoryObject::MapPages(aPages);
       
  1371 	}
       
  1372 
       
  1373 
       
  1374 void DCoarseMemory::RemapPage(TPhysAddr& aPageArray, TUint aIndex, TBool aInvalidateTLB)
       
  1375 	{
       
  1376 	TRACE2(("DCoarseMemory[0x%08x]::RemapPage() index=0x%x",this, aIndex));
       
  1377 
       
  1378 	// remap pages in all page tables for coarse mapping...
       
  1379 	MmuLock::Lock();
       
  1380 	TUint pteType = 0;
       
  1381 	do
       
  1382 		{
       
  1383 		DPageTables* tables = iPageTables[pteType];
       
  1384 		if(tables)
       
  1385 			{
       
  1386 			tables->Open();
       
  1387 			MmuLock::Unlock();
       
  1388 			tables->RemapPage(aPageArray, aIndex, aInvalidateTLB);
       
  1389 			tables->AsyncClose();
       
  1390 			MmuLock::Lock();
       
  1391 			}
       
  1392 		}
       
  1393 	while(++pteType<ENumPteTypes);
       
  1394 	MmuLock::Unlock();
       
  1395 
       
  1396 	// remap page in all fine mappings...
       
  1397 	DMemoryObject::RemapPage(aPageArray, aIndex, aInvalidateTLB);
       
  1398 	}
       
  1399 
       
  1400 
       
  1401 void DCoarseMemory::UnmapPages(RPageArray::TIter aPages, TBool aDecommitting)
       
  1402 	{
       
  1403 	TRACE2(("DCoarseMemory[0x%08x]::UnmapPages(?,%d) index=0x%x count=0x%x",this,(bool)aDecommitting,aPages.Index(),aPages.Count()));
       
  1404 
       
  1405 	if(!aPages.Count())
       
  1406 		return;
       
  1407 
       
  1408 	// unmap pages from all page tables for coarse mapping...
       
  1409 	MmuLock::Lock();
       
  1410 	TUint pteType = 0;
       
  1411 	do
       
  1412 		{
       
  1413 		DPageTables* tables = iPageTables[pteType];
       
  1414 		if(tables)
       
  1415 			{
       
  1416 			tables->Open();
       
  1417 			MmuLock::Unlock();
       
  1418 			tables->UnmapPages(aPages,aDecommitting);
       
  1419 			tables->AsyncClose();
       
  1420 			MmuLock::Lock();
       
  1421 			}
       
  1422 		}
       
  1423 	while(++pteType<ENumPteTypes);
       
  1424 	MmuLock::Unlock();
       
  1425 
       
  1426 	// unmap pages from all fine mappings...
       
  1427 	DMemoryObject::UnmapPages(aPages,aDecommitting);
       
  1428 	}
       
  1429 
       
  1430 
       
  1431 void DCoarseMemory::RestrictPages(RPageArray::TIter aPages, TRestrictPagesType aRestriction)
       
  1432 	{
       
  1433 	TRACE2(("DCoarseMemory[0x%08x]::RestrictPages(?,%d) index=0x%x count=0x%x",this,aRestriction,aPages.Index(),aPages.Count()));
       
  1434 	__ASSERT_COMPILE(ERestrictPagesForMovingFlag != ERestrictPagesNoAccessForMoving);
       
  1435 
       
  1436 	if(!aPages.Count())
       
  1437 		return;
       
  1438 
       
  1439 	if (aRestriction != ERestrictPagesForMovingFlag)
       
  1440 		{// restrict pages in all the page tables for the coarse mapping...
       
  1441 		MmuLock::Lock();
       
  1442 		TUint pteType = 0;
       
  1443 		do
       
  1444 			{
       
  1445 			DPageTables* tables = iPageTables[pteType];
       
  1446 			if(tables)
       
  1447 				{
       
  1448 				tables->Open();
       
  1449 				MmuLock::Unlock();
       
  1450 				tables->RestrictPagesNA(aPages);
       
  1451 				tables->AsyncClose();
       
  1452 				MmuLock::Lock();
       
  1453 				}
       
  1454 			}
       
  1455 		while(++pteType<ENumPteTypes);
       
  1456 		MmuLock::Unlock();
       
  1457 		}
       
  1458 
       
  1459 	// restrict pages in all fine mappings, will also check for pinned mappings...
       
  1460 	DMemoryObject::RestrictPages(aPages,aRestriction);
       
  1461 	}
       
  1462 
       
  1463 
       
  1464 TPte* DCoarseMemory::GetPageTable(TUint aPteType, TUint aChunkIndex)
       
  1465 	{
       
  1466 	__NK_ASSERT_DEBUG(aChunkIndex < (iSizeInPages >> KPagesInPDEShift));
       
  1467 	return iPageTables[aPteType]->GetPageTable(aChunkIndex);
       
  1468 	}
       
  1469 
       
  1470 
       
  1471 TInt DCoarseMemory::PageIn(DCoarseMapping* aMapping, RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount)
       
  1472 	{
       
  1473 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
       
  1474 
       
  1475 	DPageTables* tables = iPageTables[aMapping->PteType()];
       
  1476 	tables->Open();
       
  1477 
       
  1478 	MmuLock::Unlock();
       
  1479 
       
  1480 #ifndef COARSE_GRAINED_TLB_MAINTENANCE
       
  1481 	TLinAddr startAddr = aMapping->Base()+(aPages.Index()-aMapping->iStartIndex)*KPageSize;
       
  1482 	TLinAddr endAddr = startAddr+aPages.Count()*KPageSize;
       
  1483 #endif
       
  1484 
       
  1485 	TInt r = tables->PageIn(aPages, aPinArgs, aMapping, aMapInstanceCount);
       
  1486 
       
  1487 	// clean TLB...
       
  1488 #ifdef COARSE_GRAINED_TLB_MAINTENANCE
       
  1489 	InvalidateTLBForAsid(aMapping->OsAsid());
       
  1490 #else
       
  1491 	TLinAddr addr = startAddr+aMapping->OsAsid();
       
  1492 	do InvalidateTLBForPage(addr);
       
  1493 	while((addr+=KPageSize)<endAddr);
       
  1494 #endif
       
  1495 
       
  1496 	tables->AsyncClose();
       
  1497 
       
  1498 	return r;
       
  1499 	}
       
  1500 
       
  1501 
       
  1502 TBool DCoarseMemory::MovingPageIn(DCoarseMapping* aMapping, TPhysAddr& aPageArrayPtr, TUint aIndex)
       
  1503 	{
       
  1504 	DCoarseMemory::DPageTables* tables = iPageTables[aMapping->PteType()];
       
  1505 	return tables->MovingPageIn(aPageArrayPtr, aIndex);
       
  1506 	}
       
  1507 
       
  1508 
       
  1509 TPte* DCoarseMemory::FindPageTable(DCoarseMapping* aMapping, TLinAddr aLinAddr, TUint aMemoryIndex)
       
  1510 	{
       
  1511 	DCoarseMemory::DPageTables* tables = iPageTables[aMapping->PteType()];
       
  1512 
       
  1513 	// get address of page table...
       
  1514 	TUint i = aMemoryIndex >> (KChunkShift - KPageShift);	
       
  1515 	return tables->GetPageTable(i);
       
  1516 	}
       
  1517 
       
  1518 
       
  1519 TInt DCoarseMemory::ClaimInitialPages(TLinAddr aBase, TUint aSize, TMappingPermissions aPermissions, TBool aAllowGaps, TBool aAllowNonRamPages)
       
  1520 	{
       
  1521 	TRACE(("DCoarseMemory[0x%08x]::ClaimInitialPages(0x%08x,0x%08x,0x%08x,%d,%d)",this,aBase,aSize,aPermissions,aAllowGaps,aAllowNonRamPages));
       
  1522 
       
  1523 	// validate arguments...
       
  1524 	if(aBase&KChunkMask || aBase<KGlobalMemoryBase)
       
  1525 		return KErrArgument;
       
  1526 	if(aSize&KPageMask || aSize>iSizeInPages*KPageSize)
       
  1527 		return KErrArgument;
       
  1528 
       
  1529 	// get DPageTables object...
       
  1530 	TUint pteType = Mmu::PteType(aPermissions,true);
       
  1531 	MemoryObjectLock::Lock(this);
       
  1532 	DPageTables* tables = GetOrAllocatePageTables(pteType);
       
  1533 	MemoryObjectLock::Unlock(this);
       
  1534 	__NK_ASSERT_DEBUG(tables);
       
  1535 
       
  1536 	// check and allocate page array entries...
       
  1537 	RPageArray::TIter pageIter;
       
  1538 	TInt r = iPages.AddStart(0,aSize>>KPageShift,pageIter);
       
  1539 	__NK_ASSERT_ALWAYS(r==KErrNone);
       
  1540 
       
  1541 	// hold MmuLock for long time, shouldn't matter as this is only done during boot
       
  1542 	::PageTables.Lock();
       
  1543 	MmuLock::Lock();
       
  1544 
       
  1545 	TPte blankPte = tables->iBlankPte;
       
  1546 	TPte** pPt = tables->iTables;
       
  1547 	TPde* pPde = Mmu::PageDirectoryEntry(KKernelOsAsid,aBase);
       
  1548 	TUint offset = 0;
       
  1549 	TUint size = aSize;
       
  1550 	while(size)
       
  1551 		{
       
  1552 		TPde pde = *pPde;
       
  1553 		TRACE(("DCoarseMemory::ClaimInitialPages: %08x: 0x%08x",aBase+offset,pde));
       
  1554 		
       
  1555 		TPte* pPte = NULL;
       
  1556 		SPageTableInfo* pti = NULL;
       
  1557 
       
  1558 		if (Mmu::PdeMapsSection(pde))
       
  1559 			{
       
  1560 			TPhysAddr sectionBase = Mmu::SectionBaseFromPde(pde);
       
  1561 			TRACE(("  chunk is section mapped, base at %08x", sectionBase));
       
  1562 			__NK_ASSERT_DEBUG(sectionBase != KPhysAddrInvalid);
       
  1563 
       
  1564 			TPde pde = sectionBase | Mmu::BlankSectionPde(Attributes(),pteType);
       
  1565 			__NK_ASSERT_DEBUG(((*pPde^pde)&~KPdeMatchMask)==0);			
       
  1566 			*pPde = pde;
       
  1567 			SinglePdeUpdated(pPde);
       
  1568 			InvalidateTLB();
       
  1569 
       
  1570 			// We allocate and populate a page table for the section even though it won't be mapped
       
  1571 			// initially because the presense of the page table is used to check whether RAM is
       
  1572 			// mapped in a chunk, and because it makes it possible to break the section mapping
       
  1573 			// without allocating memory.  This may change in the future.
       
  1574 
       
  1575 			// Note these page table are always unpaged here regardless of paged bit in iFlags
       
  1576 			// (e.g. ROM object is marked as paged despite initial pages being unpaged)
       
  1577 			pPte = tables->AllocatePageTable(offset >> KChunkShift, EFalse, EFalse);
       
  1578 			if (!pPte)
       
  1579 				{
       
  1580 				MmuLock::Unlock();
       
  1581 				return KErrNoMemory;
       
  1582 				}
       
  1583 			pti = SPageTableInfo::FromPtPtr(pPte);
       
  1584 			}
       
  1585 		else if (Mmu::PdeMapsPageTable(pde))
       
  1586 			{
       
  1587 			pPte = Mmu::PageTableFromPde(*pPde);
       
  1588 			TRACE(("  page table found at %08x", pPte));
       
  1589 			__NK_ASSERT_DEBUG(pPte);
       
  1590 			pti = SPageTableInfo::FromPtPtr(pPte);
       
  1591 			pti->SetCoarse(this,offset>>KChunkShift,pteType);
       
  1592 			}
       
  1593 		
       
  1594 		*pPt++ = pPte;
       
  1595 		++pPde;
       
  1596 		
       
  1597 		TUint numPages = 0;
       
  1598 		do
       
  1599 			{
       
  1600 			TPhysAddr pagePhys = Mmu::LinearToPhysical(aBase+offset);
       
  1601 			TPte pte;
       
  1602 			if(pagePhys==KPhysAddrInvalid)
       
  1603 				{
       
  1604 				if(size)
       
  1605 					{
       
  1606 					__NK_ASSERT_ALWAYS(aAllowGaps); // we have a gap, check this is allowed
       
  1607 					pageIter.Skip(1);
       
  1608 					}
       
  1609 
       
  1610 				pte = KPteUnallocatedEntry;
       
  1611 				}
       
  1612 			else
       
  1613 				{
       
  1614 				__NK_ASSERT_ALWAYS(size); // pages can't be mapped above aSize
       
  1615 
       
  1616 				pageIter.Add(1,&pagePhys);
       
  1617 
       
  1618 				SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pagePhys);
       
  1619 				__NK_ASSERT_ALWAYS(pi || aAllowNonRamPages);
       
  1620 				if(pi)
       
  1621 					{
       
  1622 					__NK_ASSERT_ALWAYS(pi->Type()==SPageInfo::EFixed);
       
  1623 					pi->SetManaged(this,offset>>KPageShift,PageInfoFlags());
       
  1624 					}
       
  1625 
       
  1626 				++numPages;
       
  1627 				pte = pagePhys|blankPte;
       
  1628 				}
       
  1629 
       
  1630 			if(pPte)
       
  1631 				{
       
  1632 				TRACE2(("!PTE %x=%x (was %x)",pPte,pte,*pPte));
       
  1633 				__NK_ASSERT_DEBUG(((*pPte^pte)&~KPteMatchMask)==0 || *pPte==KPteUnallocatedEntry);
       
  1634 				*pPte = pte;
       
  1635 				CacheMaintenance::SinglePteUpdated((TLinAddr)pPte);
       
  1636 				++pPte;
       
  1637 				}
       
  1638 
       
  1639 			offset += KPageSize;
       
  1640 			if(size)
       
  1641 				size -= KPageSize;
       
  1642 			}
       
  1643 		while(offset&(KChunkMask&~KPageMask));
       
  1644 
       
  1645 		if(pti)
       
  1646 			{
       
  1647 			pti->IncPageCount(numPages);
       
  1648 			TRACE2(("pt %x page count=%d",TLinAddr(pPte)-KPageTableSize,numPages));
       
  1649 			__NK_ASSERT_DEBUG(pti->CheckPageCount());
       
  1650 			}
       
  1651 		}
       
  1652 
       
  1653 	InvalidateTLBForAsid(KKernelOsAsid);
       
  1654 
       
  1655 	MmuLock::Unlock();
       
  1656 	::PageTables.Unlock();
       
  1657 
       
  1658 	// release page array entries...
       
  1659 	iPages.AddEnd(0,aSize>>KPageShift);
       
  1660 
       
  1661 	return KErrNone;
       
  1662 	}
       
  1663 
       
  1664 
       
  1665 DCoarseMemory::DPageTables* DCoarseMemory::GetOrAllocatePageTables(TUint aPteType)
       
  1666 	{
       
  1667 	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(this));
       
  1668 	
       
  1669 	MmuLock::Lock();
       
  1670 	DPageTables* tables = iPageTables[aPteType];
       
  1671 	if(tables)
       
  1672 		tables->Open();
       
  1673 	MmuLock::Unlock();
       
  1674 
       
  1675 	if(!tables)
       
  1676 		{
       
  1677 		// allocate a new one if required...
       
  1678 		tables = DPageTables::New(this, iSizeInPages, aPteType);
       
  1679 		if (tables)
       
  1680 			{
       
  1681 			__NK_ASSERT_DEBUG(!iPageTables[aPteType]);
       
  1682 			iPageTables[aPteType] = tables;
       
  1683 			}
       
  1684 		}		
       
  1685 
       
  1686 	return tables;
       
  1687 	}
       
  1688 
       
  1689 
       
  1690 TInt DCoarseMemory::AddMapping(DMemoryMappingBase* aMapping)
       
  1691 	{
       
  1692 	if(!aMapping->IsCoarse())
       
  1693 		{
       
  1694 		// not coarse mapping...
       
  1695 		return DMemoryObject::AddMapping(aMapping);
       
  1696 		}
       
  1697 
       
  1698 	__NK_ASSERT_DEBUG(aMapping->IsPinned()==false); // coarse mappings can't pin
       
  1699 
       
  1700 	// Check mapping allowed.  Must hold memory object lock to prevent changes 
       
  1701 	// to object's restrictions.
       
  1702 	MemoryObjectLock::Lock(this);
       
  1703 	TInt r = CheckNewMapping(aMapping);
       
  1704 	if(r!=KErrNone)
       
  1705 		{
       
  1706 		MemoryObjectLock::Unlock(this);
       
  1707 		return r;
       
  1708 		}
       
  1709 
       
  1710 	// get DPageTable for mapping...
       
  1711 	DPageTables* tables = GetOrAllocatePageTables(aMapping->PteType());
       
  1712 	
       
  1713 	// Safe to release here as no restrictions to this type of mapping can be added as 
       
  1714 	// we now have an iPageTables entry for this type of mapping.
       
  1715 	MemoryObjectLock::Unlock(this);	
       
  1716 	if(!tables)
       
  1717 		return KErrNoMemory;
       
  1718 
       
  1719 	// add mapping to DPageTable...
       
  1720 	r = tables->AddMapping((DCoarseMapping*)aMapping);
       
  1721 	if(r==KErrNone)
       
  1722 		{
       
  1723 		// allocate permanent page tables if required...
       
  1724 		if(aMapping->Flags()&DMemoryMapping::EPermanentPageTables)
       
  1725 			{
       
  1726 			MemoryObjectLock::Lock(this);
       
  1727 			r = tables->AllocatePermanentPageTables();
       
  1728 			MemoryObjectLock::Unlock(this);
       
  1729 
       
  1730 			if(r==KErrNone)
       
  1731 				__e32_atomic_ior_ord8(&aMapping->Flags(), (TUint8)DMemoryMapping::EPageTablesAllocated);
       
  1732 			else
       
  1733 				tables->RemoveMapping((DCoarseMapping*)aMapping);
       
  1734 			}
       
  1735 		}
       
  1736 
       
  1737 	tables->Close();
       
  1738 
       
  1739 	return r;
       
  1740 	}
       
  1741 
       
  1742 
       
  1743 void DCoarseMemory::RemoveMapping(DMemoryMappingBase* aMapping)
       
  1744 	{
       
  1745 	if(!aMapping->IsCoarse())
       
  1746 		{
       
  1747 		// not coarse mapping...
       
  1748 		DMemoryObject::RemoveMapping(aMapping);
       
  1749 		return;
       
  1750 		}
       
  1751 
       
  1752 	// need a temporary reference on self because we may be removing the last mapping
       
  1753 	// which will delete this...
       
  1754 	Open();
       
  1755 
       
  1756 	// get DPageTable the mapping is attached to...
       
  1757 	DPageTables* tables = iPageTables[aMapping->PteType()];
       
  1758 	__NK_ASSERT_DEBUG(tables); // must exist because aMapping has a reference on it
       
  1759 
       
  1760 	// free permanent page tables if required...
       
  1761 	if(aMapping->Flags()&DMemoryMapping::EPageTablesAllocated)
       
  1762 		{
       
  1763 		MemoryObjectLock::Lock(this);
       
  1764 		tables->FreePermanentPageTables();
       
  1765 		MemoryObjectLock::Unlock(this);
       
  1766 		}
       
  1767 
       
  1768 	// remove mapping from page tables object...
       
  1769 	tables->RemoveMapping((DCoarseMapping*)aMapping);
       
  1770 
       
  1771 	Close(); // may delete this memory object
       
  1772 	}
       
  1773 
       
  1774 
       
  1775 TInt DCoarseMemory::SetReadOnly()
       
  1776 	{
       
  1777 	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(this));
       
  1778 	
       
  1779 	// Search for writable iPageTable entries.
       
  1780 	// We hold the MemoryObjectLock so iPageTable entries can't be added or removed.
       
  1781 	MmuLock::Lock();
       
  1782 	TUint pteType = 0;
       
  1783 	do
       
  1784 		{
       
  1785 		if((pteType & EPteTypeWritable) && iPageTables[pteType])
       
  1786 			{
       
  1787 			MmuLock::Unlock();
       
  1788 			return KErrInUse;
       
  1789 			}
       
  1790 		}
       
  1791 	while(++pteType < ENumPteTypes);
       
  1792 	MmuLock::Unlock();
       
  1793 
       
  1794 	// unmap pages from all fine mappings...
       
  1795 	return DMemoryObject::SetReadOnly();
       
  1796 	}
       
  1797 
       
  1798 
       
  1799 //
       
  1800 // DFineMemory
       
  1801 //
       
  1802 
       
  1803 DFineMemory::DFineMemory(DMemoryManager* aManager, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags)
       
  1804 	: DMemoryObject(aManager,0,aSizeInPages,aAttributes,aCreateFlags)
       
  1805 	{
       
  1806 	}
       
  1807 
       
  1808 
       
  1809 DFineMemory* DFineMemory::New(DMemoryManager* aManager, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags)
       
  1810 	{
       
  1811 	DFineMemory* self = new DFineMemory(aManager,aSizeInPages,aAttributes,aCreateFlags);
       
  1812 	if(self)
       
  1813 		{
       
  1814 		if(self->Construct()==KErrNone)
       
  1815 			return self;
       
  1816 		self->Close();
       
  1817 		}
       
  1818 	return 0;
       
  1819 	}
       
  1820 
       
  1821 
       
  1822 DFineMemory::~DFineMemory()
       
  1823 	{
       
  1824 	TRACE2(("DFineMemory[0x%08x]::~DFineMemory",this));
       
  1825 	}
       
  1826 
       
  1827 
       
  1828 TInt DFineMemory::ClaimInitialPages(TLinAddr aBase, TUint aSize, TMappingPermissions aPermissions, TBool aAllowGaps, TBool aAllowNonRamPages)
       
  1829 	{
       
  1830 	TRACE(("DFineMemory[0x%08x]::ClaimInitialPages(0x%08x,0x%08x,0x%08x,%d,%d)",this,aBase,aSize,aPermissions,aAllowGaps,aAllowNonRamPages));
       
  1831 	(void)aPermissions;
       
  1832 
       
  1833 	// validate arguments...
       
  1834 	if(aBase&KPageMask || aBase<KGlobalMemoryBase)
       
  1835 		return KErrArgument;
       
  1836 	if(aSize&KPageMask || aSize>iSizeInPages*KPageSize)
       
  1837 		return KErrArgument;
       
  1838 
       
  1839 #ifdef _DEBUG
       
  1840 	// calculate 'blankPte', the correct PTE value for pages in this memory object...
       
  1841 	TUint pteType = Mmu::PteType(aPermissions,true);
       
  1842 	TPte blankPte = Mmu::BlankPte(Attributes(),pteType);
       
  1843 #endif
       
  1844 
       
  1845 	// get page table...
       
  1846 	TPde* pPde = Mmu::PageDirectoryEntry(KKernelOsAsid,aBase);
       
  1847 	TPte* pPte = Mmu::PageTableFromPde(*pPde);
       
  1848 	if(!pPte)
       
  1849 		return KErrNone; // no pages mapped
       
  1850 
       
  1851 	// check and allocate page array entries...
       
  1852 	RPageArray::TIter pageIter;
       
  1853 	TInt r = iPages.AddStart(0,aSize>>KPageShift,pageIter);
       
  1854 	__NK_ASSERT_ALWAYS(r==KErrNone);
       
  1855 
       
  1856 	// hold MmuLock for long time, shouldn't matter as this is only done during boot
       
  1857 	MmuLock::Lock();
       
  1858 
       
  1859 	// setup page table for fine mappings...
       
  1860 	SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pPte);
       
  1861 	__NK_ASSERT_DEBUG(pti->CheckPageCount());
       
  1862 	TBool pageTableOk = pti->ClaimFine(aBase&~KChunkMask,KKernelOsAsid);
       
  1863 	__NK_ASSERT_ALWAYS(pageTableOk);
       
  1864 	TRACE(("DFineMemory::ClaimInitialPages page table = 0x%08x",pPte));
       
  1865 
       
  1866 	TUint pteIndex = (aBase>>KPageShift)&(KChunkMask>>KPageShift);
       
  1867 	TUint pageIndex = 0;
       
  1868 	TUint size = aSize;
       
  1869 	while(pageIndex<iSizeInPages)
       
  1870 		{
       
  1871 		TPhysAddr pagePhys = Mmu::PtePhysAddr(pPte[pteIndex],pteIndex);
       
  1872 		if(pagePhys==KPhysAddrInvalid)
       
  1873 			{
       
  1874 			if(size)
       
  1875 				{
       
  1876 				__NK_ASSERT_ALWAYS(aAllowGaps); // we have a gap, check this is allowed
       
  1877 				pageIter.Skip(1);
       
  1878 				}
       
  1879 
       
  1880 			// check PTE is correct...
       
  1881 			__NK_ASSERT_DEBUG(pPte[pteIndex]==KPteUnallocatedEntry);
       
  1882 			}
       
  1883 		else
       
  1884 			{
       
  1885 			__NK_ASSERT_ALWAYS(size); // pages can't be mapped above aSize
       
  1886 
       
  1887 			pageIter.Add(1,&pagePhys);
       
  1888 
       
  1889 			SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pagePhys);
       
  1890 
       
  1891 			if(!pi)
       
  1892 				__NK_ASSERT_ALWAYS(aAllowNonRamPages);
       
  1893 			else
       
  1894 				{
       
  1895 				__NK_ASSERT_ALWAYS(pi->Type()==SPageInfo::EFixed);
       
  1896 				pi->SetManaged(this,pageIndex,PageInfoFlags());
       
  1897 				}
       
  1898 
       
  1899 #ifdef _DEBUG
       
  1900 			// check PTE is correct...
       
  1901 			TPte pte = pagePhys|blankPte;
       
  1902 			__NK_ASSERT_DEBUG(((pPte[pteIndex]^pte)&~KPteMatchMask)==0);
       
  1903 #endif
       
  1904 			}
       
  1905 
       
  1906 		// move on to next page...
       
  1907 		++pteIndex;
       
  1908 		__NK_ASSERT_ALWAYS(pteIndex<(KChunkSize>>KPageShift));
       
  1909 		++pageIndex;
       
  1910 		if(size)
       
  1911 			size -= KPageSize;
       
  1912 		}
       
  1913 
       
  1914 	MmuLock::Unlock();
       
  1915 
       
  1916 	// release page array entries...
       
  1917 	iPages.AddEnd(0,aSize>>KPageShift);
       
  1918 
       
  1919 	return KErrNone;
       
  1920 	}
       
  1921 
       
  1922 
       
  1923