kernel/eka/memmodel/epoc/flexible/mmu/mmapping.cpp
changeset 9 96e5fb8b040d
child 10 36bfc973b146
equal deleted inserted replaced
-1:000000000000 9:96e5fb8b040d
       
     1 // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
       
     2 // All rights reserved.
       
     3 // This component and the accompanying materials are made available
       
     4 // under the terms of the License "Eclipse Public License v1.0"
       
     5 // which accompanies this distribution, and is available
       
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
       
     7 //
       
     8 // Initial Contributors:
       
     9 // Nokia Corporation - initial contribution.
       
    10 //
       
    11 // Contributors:
       
    12 //
       
    13 // Description:
       
    14 //
       
    15 
       
    16 #include <plat_priv.h>
       
    17 #include "mm.h"
       
    18 #include "mmu.h"
       
    19 #include "mmapping.h"
       
    20 #include "mobject.h"
       
    21 #include "maddressspace.h"
       
    22 #include "mptalloc.h"
       
    23 #include "mmanager.h" // needed for DMemoryManager::Pin/Unpin, not nice, but no obvious way to break dependency
       
    24 #include "cache_maintenance.inl"
       
    25 
       
    26 //
       
    27 // DMemoryMapping
       
    28 //
       
    29 
       
    30 DMemoryMapping::DMemoryMapping(TUint aType)
       
    31 	: DMemoryMappingBase(aType)
       
    32 	{
       
    33 	}
       
    34 
       
    35 
       
    36 TInt DMemoryMapping::Construct(TMemoryAttributes aAttributes, TMappingCreateFlags aFlags, TInt aOsAsid, TLinAddr aAddr, TUint aSize, TLinAddr aColourOffset)
       
    37 	{
       
    38 	TRACE(("DMemoryMapping[0x%08x]::Construct(0x%x,0x%x,%d,0x%08x,0x%08x,0x%08x)",this,(TUint32&)aAttributes,aFlags,aOsAsid,aAddr,aSize,aColourOffset));
       
    39 
       
    40 	// setup PDE values...
       
    41 	iBlankPde = Mmu::BlankPde(aAttributes);
       
    42 
       
    43 	// setup flags...
       
    44 	if(aFlags&EMappingCreateReserveAllResources)
       
    45 		Flags() |= EPermanentPageTables;
       
    46 
       
    47 	// allocate virtual memory...
       
    48 	TInt r = AllocateVirtualMemory(aFlags,aOsAsid,aAddr,aSize,aColourOffset);
       
    49 	if(r==KErrNone)
       
    50 		{
       
    51 		// add to address space...
       
    52 		TLinAddr addr = iAllocatedLinAddrAndOsAsid&~KPageMask;
       
    53 		TInt osAsid = iAllocatedLinAddrAndOsAsid&KPageMask;
       
    54 		r = AddressSpace[osAsid]->AddMapping(addr,this);
       
    55 		if(r!=KErrNone)
       
    56 			FreeVirtualMemory();
       
    57 		}
       
    58 
       
    59 	return r;
       
    60 	}
       
    61 
       
    62 
       
    63 DMemoryMapping::~DMemoryMapping()
       
    64 	{
       
    65 	TRACE(("DMemoryMapping[0x%08x]::~DMemoryMapping()",this));
       
    66 	__NK_ASSERT_DEBUG(!IsAttached());
       
    67 
       
    68 	// remove from address space...
       
    69 	TLinAddr addr = iAllocatedLinAddrAndOsAsid&~KPageMask;
       
    70 	TInt osAsid = iAllocatedLinAddrAndOsAsid&KPageMask;
       
    71 	TAny* removed = AddressSpace[osAsid]->RemoveMapping(addr);
       
    72 	if(removed)
       
    73 		__NK_ASSERT_DEBUG(removed==this);
       
    74 
       
    75 	FreeVirtualMemory();
       
    76 	}
       
    77 
       
    78 
       
    79 void DMemoryMapping::BTraceCreate()
       
    80 	{
       
    81 	MmuLock::Lock();
       
    82 	TUint32 data[4] = { iStartIndex, iSizeInPages, OsAsid(), Base() };
       
    83 	BTraceContextN(BTrace::EFlexibleMemModel,BTrace::EMemoryMappingCreate,this,Memory(),data,sizeof(data));
       
    84 	MmuLock::Unlock();
       
    85 	}
       
    86 
       
    87 
       
    88 TInt DMemoryMapping::Map(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions)
       
    89 	{
       
    90 	TRACE(("DMemoryMapping[0x%08x]::Map(0x%08x,0x%x,0x%x,0x%08x)",this,aMemory,aIndex,aCount,aPermissions));
       
    91 	__NK_ASSERT_DEBUG(!IsAttached());
       
    92 
       
    93 	// check reserved resources are compatible (memory objects with reserved resources 
       
    94 	// don't expect to have to allocate memory when mapping new pages),,,
       
    95 	if(aMemory->iFlags&DMemoryObject::EReserveResources && !(Flags()&EPermanentPageTables))
       
    96 		return KErrArgument;
       
    97 
       
    98 	// check arguments for coarse mappings...
       
    99 	if(IsCoarse())
       
   100 		{
       
   101 		if(!aMemory->IsCoarse())
       
   102 			return KErrArgument;
       
   103 		if((aCount|aIndex)&(KChunkMask>>KPageShift))
       
   104 			return KErrArgument;
       
   105 		}
       
   106 
       
   107 	TLinAddr base = iAllocatedLinAddrAndOsAsid & ~KPageMask;
       
   108 	TLinAddr top = base + (aCount << KPageShift);
       
   109 
       
   110 	// check user/supervisor memory partitioning...
       
   111 	if (aPermissions & EUser)
       
   112 		{
       
   113 		if (base > KUserMemoryLimit || top > KUserMemoryLimit)
       
   114 			return KErrAccessDenied;
       
   115 		}
       
   116 	else
       
   117 		{
       
   118 		if (base < KUserMemoryLimit || top < KUserMemoryLimit)
       
   119 			return KErrAccessDenied;
       
   120 		}
       
   121 
       
   122 	// check that mapping doesn't straddle KUserMemoryLimit or KGlobalMemoryBase ...
       
   123 	__NK_ASSERT_DEBUG((base < KUserMemoryLimit) == (top <= KUserMemoryLimit));
       
   124 	__NK_ASSERT_DEBUG((base < KGlobalMemoryBase) == (top <= KGlobalMemoryBase));
       
   125 
       
   126 	// check that only global memory is mapped into the kernel process
       
   127 	TBool global = base >= KGlobalMemoryBase;
       
   128 	__NK_ASSERT_DEBUG(global || (iAllocatedLinAddrAndOsAsid & KPageMask) != KKernelOsAsid);
       
   129 
       
   130 	// setup attributes...
       
   131 	PteType() =	Mmu::PteType(aPermissions,global);
       
   132 	iBlankPte = Mmu::BlankPte(aMemory->Attributes(),PteType());
       
   133 
       
   134 	// setup base address... 
       
   135 	TUint colourOffset = ((aIndex&KPageColourMask)<<KPageShift);
       
   136 	if(colourOffset+aCount*KPageSize > iAllocatedSize)
       
   137 		return KErrTooBig;
       
   138 	__NK_ASSERT_DEBUG(!iLinAddrAndOsAsid || ((iLinAddrAndOsAsid^iAllocatedLinAddrAndOsAsid)&~(KPageColourMask<<KPageShift))==0); // new, OR, only differ in page colour
       
   139 	iLinAddrAndOsAsid = iAllocatedLinAddrAndOsAsid+colourOffset;
       
   140 
       
   141 	// attach to memory object...
       
   142 	TInt r = Attach(aMemory,aIndex,aCount);
       
   143 
       
   144 	// cleanup if error...
       
   145 	if(r!=KErrNone)
       
   146 		iLinAddrAndOsAsid = 0;
       
   147 
       
   148 	return r;
       
   149 	}
       
   150 
       
   151 
       
   152 void DMemoryMapping::Unmap()
       
   153 	{
       
   154 	Detach();
       
   155 	// we can't clear iLinAddrAndOsAsid here because this may be needed by other code,
       
   156 	// e.g. DFineMapping::MapPages/UnmapPages/RestrictPages/PageIn
       
   157 	}
       
   158 
       
   159 
       
   160 TInt DMemoryMapping::AllocateVirtualMemory(TMappingCreateFlags aFlags, TInt aOsAsid, TLinAddr aAddr, TUint aSize, TLinAddr aColourOffset)
       
   161 	{
       
   162 	TRACE(("DMemoryMapping[0x%08x]::AllocateVirtualMemory(0x%x,%d,0x%08x,0x%08x,0x%08x)",this,aFlags,aOsAsid,aAddr,aSize,aColourOffset));
       
   163 	__NK_ASSERT_DEBUG((aAddr&KPageMask)==0);
       
   164 	__NK_ASSERT_DEBUG(!iAllocatedLinAddrAndOsAsid);
       
   165 	__NK_ASSERT_DEBUG(!iAllocatedSize);
       
   166 
       
   167 	// setup PDE type...
       
   168 	TUint pdeType = 0;
       
   169 	if(aFlags&EMappingCreateCommonVirtual)
       
   170 		pdeType |= EVirtualSlabTypeCommonVirtual;
       
   171 	if(aFlags&EMappingCreateDemandPaged)
       
   172 		pdeType |= EVirtualSlabTypeDemandPaged;
       
   173 
       
   174 	TInt r;
       
   175 	TUint colourOffset = aColourOffset&(KPageColourMask<<KPageShift);
       
   176 	TLinAddr addr;
       
   177 	TUint size;
       
   178 	if(aFlags&(EMappingCreateFixedVirtual|EMappingCreateAdoptVirtual))
       
   179 		{
       
   180 		// just use the supplied virtual address...
       
   181 		__NK_ASSERT_ALWAYS(aAddr);
       
   182 		__NK_ASSERT_ALWAYS(colourOffset==0);
       
   183 		__NK_ASSERT_DEBUG((aFlags&EMappingCreateAdoptVirtual)==0 || AddressSpace[aOsAsid]->CheckPdeType(aAddr,aSize,pdeType));
       
   184 		addr = aAddr;
       
   185 		size = aSize;
       
   186 		r = KErrNone;
       
   187 		}
       
   188 	else
       
   189 		{
       
   190 		if(aFlags&(EMappingCreateExactVirtual|EMappingCreateCommonVirtual))
       
   191 			{
       
   192 			__NK_ASSERT_ALWAYS(aAddr); // address must be specified
       
   193 			}
       
   194 		else
       
   195 			{
       
   196 			__NK_ASSERT_ALWAYS(!aAddr); // address shouldn't have been specified
       
   197 			}
       
   198 
       
   199 		// adjust for colour...
       
   200 		TUint allocSize = aSize+colourOffset;
       
   201 		TUint allocAddr = aAddr;
       
   202 		if(allocAddr)
       
   203 			{
       
   204 			allocAddr -= colourOffset;
       
   205 			if(allocAddr&(KPageColourMask<<KPageShift))
       
   206 				return KErrArgument; // wrong colour
       
   207 			}
       
   208 
       
   209 		// allocate virtual addresses...
       
   210 		if(aFlags&EMappingCreateUserGlobalVirtual)
       
   211 			{
       
   212 			if(aOsAsid!=(TInt)KKernelOsAsid)
       
   213 				return KErrArgument;
       
   214 			r = DAddressSpace::AllocateUserGlobalVirtualMemory(addr,size,allocAddr,allocSize,pdeType);
       
   215 			}
       
   216 		else
       
   217 			r = AddressSpace[aOsAsid]->AllocateVirtualMemory(addr,size,allocAddr,allocSize,pdeType);
       
   218 		}
       
   219 
       
   220 	if(r==KErrNone)
       
   221 		{
       
   222 		iAllocatedLinAddrAndOsAsid = addr|aOsAsid;
       
   223 		iAllocatedSize = size;
       
   224 		}
       
   225 
       
   226 	TRACE(("DMemoryMapping[0x%08x]::AllocateVirtualMemory returns %d address=0x%08x",this,r,addr));
       
   227 	return r;
       
   228 	}
       
   229 
       
   230 
       
   231 void DMemoryMapping::FreeVirtualMemory()
       
   232 	{
       
   233 	if(!iAllocatedSize)
       
   234 		return; // no virtual memory to free
       
   235 
       
   236 	TRACE(("DMemoryMapping[0x%08x]::FreeVirtualMemory()",this));
       
   237 
       
   238 	iLinAddrAndOsAsid = 0;
       
   239 
       
   240 	TLinAddr addr = iAllocatedLinAddrAndOsAsid&~KPageMask;
       
   241 	TInt osAsid = iAllocatedLinAddrAndOsAsid&KPageMask;
       
   242 	AddressSpace[osAsid]->FreeVirtualMemory(addr,iAllocatedSize);
       
   243 	iAllocatedLinAddrAndOsAsid = 0;
       
   244 	iAllocatedSize = 0;
       
   245 	}
       
   246 
       
   247 
       
   248 
       
   249 //
       
   250 // DCoarseMapping
       
   251 //
       
   252 
       
   253 DCoarseMapping::DCoarseMapping()
       
   254 	: DMemoryMapping(ECoarseMapping)
       
   255 	{
       
   256 	}
       
   257 
       
   258 
       
   259 DCoarseMapping::DCoarseMapping(TUint aFlags)
       
   260 	: DMemoryMapping(ECoarseMapping|aFlags)
       
   261 	{
       
   262 	}
       
   263 
       
   264 
       
   265 DCoarseMapping::~DCoarseMapping()
       
   266 	{
       
   267 	}
       
   268 
       
   269 
       
   270 TInt DCoarseMapping::DoMap()
       
   271 	{
       
   272 	TRACE(("DCoarseMapping[0x%08x]::DoMap()", this));
       
   273 	__NK_ASSERT_DEBUG(((iStartIndex|iSizeInPages)&(KChunkMask>>KPageShift))==0); // be extra paranoid about alignment
       
   274 
       
   275 	MmuLock::Lock();
       
   276 	TPde* pPde = Mmu::PageDirectoryEntry(OsAsid(),Base());
       
   277 	DCoarseMemory* memory = (DCoarseMemory*)Memory(true); // safe because we're called from code which has added mapping to memory
       
   278 	
       
   279 	TUint flash = 0;
       
   280 	TUint chunk = iStartIndex >> KPagesInPDEShift;
       
   281 	TUint endChunk = (iStartIndex + iSizeInPages) >> KPagesInPDEShift;
       
   282 	TBool sectionMappingsBroken = EFalse;
       
   283 	
       
   284 	while(chunk < endChunk)
       
   285 		{
       
   286 		MmuLock::Flash(flash,KMaxPdesInOneGo*2);
       
   287 		TPte* pt = memory->GetPageTable(PteType(), chunk);
       
   288 		if(!pt)
       
   289 			{
       
   290 			TRACE2(("!PDE %x=%x (was %x)",pPde,KPdeUnallocatedEntry,*pPde));
       
   291 			__NK_ASSERT_DEBUG(*pPde==KPdeUnallocatedEntry);
       
   292 			}
       
   293 		else
       
   294 			{
       
   295 			TPde pde = Mmu::PageTablePhysAddr(pt)|iBlankPde;
       
   296 #ifdef	__USER_MEMORY_GUARDS_ENABLED__
       
   297 			if (IsUserMapping())
       
   298 				pde = PDE_IN_DOMAIN(pde, USER_MEMORY_DOMAIN);
       
   299 #endif
       
   300 			TRACE2(("!PDE %x=%x (was %x)",pPde,pde,*pPde));
       
   301 			if (Mmu::PdeMapsSection(*pPde))
       
   302 				{
       
   303 				// break previous section mapping...
       
   304 				__NK_ASSERT_DEBUG(*pPde==Mmu::PageToSectionEntry(pt[0],iBlankPde));
       
   305 				sectionMappingsBroken = ETrue;
       
   306 				}
       
   307 			else
       
   308 				__NK_ASSERT_DEBUG(*pPde==KPdeUnallocatedEntry || ((*pPde^pde)&~KPdeMatchMask)==0); 
       
   309 			*pPde = pde;
       
   310 			SinglePdeUpdated(pPde);
       
   311 			flash += 3; // increase flash rate because we've done quite a bit more work
       
   312 			}
       
   313 		++pPde;
       
   314 		++chunk;
       
   315 		}
       
   316 	MmuLock::Unlock();
       
   317 
       
   318 	if (sectionMappingsBroken)
       
   319 		{
       
   320 		// We must invalidate the TLB since we broke section mappings created by the bootstrap.
       
   321 		// Since this will only ever happen on boot, we just invalidate the entire TLB for this
       
   322 		// process.
       
   323 		InvalidateTLBForAsid(OsAsid());
       
   324 		}
       
   325 
       
   326 	return KErrNone;
       
   327 	}
       
   328 
       
   329 
       
   330 void DCoarseMapping::DoUnmap()
       
   331 	{
       
   332 	TRACE(("DCoarseMapping[0x%08x]::DoUnmap()", this));
       
   333 	MmuLock::Lock();
       
   334 	TPde* pPde = Mmu::PageDirectoryEntry(OsAsid(),Base());
       
   335 	TPde* pPdeEnd = pPde+(iSizeInPages>>(KChunkShift-KPageShift));
       
   336 	TUint flash = 0;
       
   337 	do
       
   338 		{
       
   339 		MmuLock::Flash(flash,KMaxPdesInOneGo);
       
   340 		TPde pde = KPdeUnallocatedEntry;
       
   341 		TRACE2(("!PDE %x=%x",pPde,pde));
       
   342 		*pPde = pde;
       
   343 		SinglePdeUpdated(pPde);
       
   344 		++pPde;
       
   345 		}
       
   346 	while(pPde<pPdeEnd);
       
   347 	MmuLock::Unlock();
       
   348 
       
   349 	InvalidateTLBForAsid(OsAsid());
       
   350 	}
       
   351 
       
   352 
       
   353 TInt DCoarseMapping::MapPages(RPageArray::TIter aPages, TUint aMapInstanceCount)
       
   354 	{
       
   355 	// shouldn't ever be called because coarse mappings don't have their own page tables...
       
   356 	__NK_ASSERT_DEBUG(0);
       
   357 	return KErrNotSupported;
       
   358 	}
       
   359 
       
   360 
       
   361 void DCoarseMapping::UnmapPages(RPageArray::TIter aPages, TUint aMapInstanceCount)
       
   362 	{
       
   363 	// shouldn't ever be called because coarse mappings don't have their own page tables...
       
   364 	__NK_ASSERT_DEBUG(0);
       
   365 	}
       
   366 
       
   367 void DCoarseMapping::RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB)
       
   368 	{
       
   369 	// shouldn't ever be called because coarse mappings don't have their own page tables...
       
   370 	__NK_ASSERT_DEBUG(0);
       
   371 	}
       
   372 
       
   373 void DCoarseMapping::RestrictPagesNA(RPageArray::TIter aPages, TUint aMapInstanceCount)
       
   374 	{
       
   375 	// shouldn't ever be called because coarse mappings don't have their own page tables...
       
   376 	__NK_ASSERT_DEBUG(0);
       
   377 	}
       
   378 
       
   379 
       
   380 TInt DCoarseMapping::PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount)
       
   381 	{
       
   382 	MmuLock::Lock();
       
   383 
       
   384 	if(!IsAttached())
       
   385 		{
       
   386 		MmuLock::Unlock();
       
   387 		return KErrNotFound;
       
   388 		}
       
   389 
       
   390 	DCoarseMemory* memory = (DCoarseMemory*)Memory(true); // safe because we've checked mapping IsAttached
       
   391 	return memory->PageIn(this, aPages, aPinArgs, aMapInstanceCount);
       
   392 	}
       
   393 
       
   394 
       
   395 TBool DCoarseMapping::MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex)
       
   396 	{
       
   397 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
       
   398 	__NK_ASSERT_DEBUG(IsAttached());
       
   399 
       
   400 	DCoarseMemory* memory = (DCoarseMemory*)Memory(true); // safe because we've checked mapping IsAttached
       
   401 	TBool success = memory->MovingPageIn(this, aPageArrayPtr, aIndex);
       
   402 	if (success)
       
   403 		{
       
   404 		TLinAddr addr = Base() + (aIndex - iStartIndex) * KPageSize;
       
   405 		InvalidateTLBForPage(addr);
       
   406 		}
       
   407 	return success;
       
   408 	}
       
   409 
       
   410 
       
   411 TPte* DCoarseMapping::FindPageTable(TLinAddr aLinAddr, TUint aMemoryIndex)
       
   412 	{
       
   413 	TRACE(("DCoarseMapping::FindPageTable(0x%x, %d)", aLinAddr, aMemoryIndex));
       
   414 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
       
   415 	__NK_ASSERT_DEBUG(IsAttached());
       
   416 	DCoarseMemory* memory = (DCoarseMemory*)Memory(true); // safe because we've checked mapping IsAttached
       
   417 	return memory->FindPageTable(this, aLinAddr, aMemoryIndex);
       
   418 	}
       
   419 
       
   420 
       
   421 
       
   422 //
       
   423 // DFineMapping
       
   424 //
       
   425 
       
   426 DFineMapping::DFineMapping()
       
   427 	: DMemoryMapping(0)
       
   428 	{
       
   429 	}
       
   430 
       
   431 
       
   432 DFineMapping::~DFineMapping()
       
   433 	{
       
   434 	TRACE(("DFineMapping[0x%08x]::~DFineMapping()",this));
       
   435 	FreePermanentPageTables();
       
   436 	}
       
   437 
       
   438 #ifdef _DEBUG
       
   439 void DFineMapping::ValidatePageTable(TPte* aPt, TLinAddr aAddr)
       
   440 	{
       
   441 	if(aPt)
       
   442 		{
       
   443 		// check page table is correct...
       
   444 		SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPt);
       
   445 		__NK_ASSERT_DEBUG(pti->CheckFine(aAddr&~KChunkMask,OsAsid()));
       
   446 		DMemoryObject* memory = Memory();
       
   447 		if(memory)
       
   448 			{
       
   449 			if(memory->IsDemandPaged() && !IsPinned() && !(Flags()&EPageTablesAllocated))
       
   450 				__NK_ASSERT_DEBUG(pti->IsDemandPaged());
       
   451 			else
       
   452 				__NK_ASSERT_DEBUG(!pti->IsDemandPaged());
       
   453 			}
       
   454 		}
       
   455 	}
       
   456 #endif
       
   457 
       
   458 TPte* DFineMapping::GetPageTable(TLinAddr aAddr)
       
   459 	{
       
   460 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
       
   461 
       
   462 	// get address of PDE which refers to the page table...
       
   463 	TPde* pPde = Mmu::PageDirectoryEntry(OsAsid(),aAddr);
       
   464 
       
   465 	// get page table...
       
   466 	TPte* pt = Mmu::PageTableFromPde(*pPde);
       
   467 #ifdef _DEBUG
       
   468 	ValidatePageTable(pt, aAddr);
       
   469 #endif
       
   470 	return pt;
       
   471 	}
       
   472 
       
   473 
       
   474 TPte* DFineMapping::GetOrAllocatePageTable(TLinAddr aAddr)
       
   475 	{
       
   476 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
       
   477 
       
   478 	// get address of PDE which refers to the page table...
       
   479 	TPde* pPde = Mmu::PageDirectoryEntry(OsAsid(),aAddr);
       
   480 
       
   481 	// get page table...
       
   482 	TPte* pt = Mmu::PageTableFromPde(*pPde);
       
   483 	if(!pt)
       
   484 		{
       
   485 		pt = AllocatePageTable(aAddr,pPde);
       
   486 #ifdef _DEBUG
       
   487 		ValidatePageTable(pt, aAddr);
       
   488 #endif
       
   489 		}
       
   490 
       
   491 	return pt;
       
   492 	}
       
   493 
       
   494 
       
   495 TPte* DFineMapping::GetOrAllocatePageTable(TLinAddr aAddr, TPinArgs& aPinArgs)
       
   496 	{
       
   497 	__NK_ASSERT_DEBUG(aPinArgs.iPinnedPageTables);
       
   498 
       
   499 	if(!aPinArgs.HaveSufficientPages(KNumPagesToPinOnePageTable))
       
   500 		return 0;
       
   501 
       
   502 	TPte* pinnedPt = 0;
       
   503 	for(;;)
       
   504 		{
       
   505 		TPte* pt = GetOrAllocatePageTable(aAddr);
       
   506 
       
   507 		if(pinnedPt && pinnedPt!=pt)
       
   508 			{
       
   509 			// previously pinned page table not needed...
       
   510 			PageTableAllocator::UnpinPageTable(pinnedPt,aPinArgs);
       
   511 
       
   512 			// make sure we have memory for next pin attempt...
       
   513 			MmuLock::Unlock();
       
   514 			aPinArgs.AllocReplacementPages(KNumPagesToPinOnePageTable);
       
   515 			MmuLock::Lock();
       
   516 			if(!aPinArgs.HaveSufficientPages(KNumPagesToPinOnePageTable)) // if out of memory...
       
   517 				{
       
   518 				// make sure we free any unneeded page table we allocated...
       
   519 				if(pt)
       
   520 					FreePageTable(Mmu::PageDirectoryEntry(OsAsid(),aAddr));
       
   521 				return 0;
       
   522 				}
       
   523 			}
       
   524 
       
   525 		if(!pt)
       
   526 			return 0; // out of memory
       
   527 
       
   528 		if(pt==pinnedPt)
       
   529 			{
       
   530 			// we got a page table and it was pinned...
       
   531 			*aPinArgs.iPinnedPageTables++ = pt;
       
   532 			++aPinArgs.iNumPinnedPageTables;
       
   533 			return pt;
       
   534 			}
       
   535 
       
   536 		// don't pin page table if it's not paged (e.g. unpaged part of ROM)...
       
   537 		SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt);
       
   538 		if(!pti->IsDemandPaged())
       
   539 			return pt;
       
   540 
       
   541 		// pin the page table...
       
   542 		pinnedPt = pt;
       
   543 		PageTableAllocator::PinPageTable(pinnedPt,aPinArgs);
       
   544 		}
       
   545 	}
       
   546 
       
   547 
       
   548 TInt DFineMapping::AllocateVirtualMemory(TMappingCreateFlags aFlags, TInt aOsAsid, TLinAddr aAddr, TUint aSize, TLinAddr aColourOffset)
       
   549 	{
       
   550 	TInt r = DMemoryMapping::AllocateVirtualMemory(aFlags,aOsAsid,aAddr,aSize,aColourOffset);
       
   551 	if(r==KErrNone && (Flags()&EPermanentPageTables))
       
   552 		{
       
   553 		r = AllocatePermanentPageTables();
       
   554 		if(r!=KErrNone)
       
   555 			FreeVirtualMemory();
       
   556 		}
       
   557 	return r;
       
   558 	}
       
   559 
       
   560 
       
   561 void DFineMapping::FreeVirtualMemory()
       
   562 	{
       
   563 	FreePermanentPageTables();
       
   564 	DMemoryMapping::FreeVirtualMemory();
       
   565 	}
       
   566 
       
   567 
       
   568 TPte* DFineMapping::AllocatePageTable(TLinAddr aAddr, TPde* aPdeAddress, TBool aPermanent)
       
   569 	{
       
   570 	TRACE2(("DFineMapping[0x%08x]::AllocatePageTable(0x%08x,0x%08x,%d)",this,aAddr,aPdeAddress,aPermanent));
       
   571 
       
   572 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
       
   573 
       
   574 	for(;;)
       
   575 		{
       
   576 		// mapping is going, so we don't need a page table any more...
       
   577 		if(BeingDetached())
       
   578 			return 0;
       
   579 
       
   580 		// get paged state...
       
   581 		TBool demandPaged = false;
       
   582 		if(!aPermanent)
       
   583 			{
       
   584 			DMemoryObject* memory = Memory();
       
   585 			__NK_ASSERT_DEBUG(memory); // can't be NULL because not BeingDetached()
       
   586 			demandPaged = memory->IsDemandPaged();
       
   587 			}
       
   588 
       
   589 		// get page table...
       
   590 		TPte* pt = Mmu::PageTableFromPde(*aPdeAddress);
       
   591 		if(pt!=0)
       
   592 			{
       
   593 			// we have a page table...
       
   594 			__NK_ASSERT_DEBUG(SPageTableInfo::FromPtPtr(pt)->CheckFine(aAddr&~KChunkMask,iAllocatedLinAddrAndOsAsid&KPageMask));
       
   595 			if(aPermanent)
       
   596 				{
       
   597 				__NK_ASSERT_DEBUG(BeingDetached()==false);
       
   598 				__NK_ASSERT_ALWAYS(!demandPaged);
       
   599 				SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt);
       
   600 				pti->IncPermanenceCount();
       
   601 				}
       
   602 			return pt;
       
   603 			}
       
   604 
       
   605 		// allocate a new page table...
       
   606 		MmuLock::Unlock();
       
   607 		::PageTables.Lock();
       
   608 		TPte* newPt = ::PageTables.Alloc(demandPaged);
       
   609 		if(!newPt)
       
   610 			{
       
   611 			// out of memory...
       
   612 			::PageTables.Unlock();
       
   613 			MmuLock::Lock();
       
   614 			return 0;
       
   615 			}
       
   616 
       
   617 		// check if new page table is still needed...
       
   618 		MmuLock::Lock();
       
   619 		pt = Mmu::PageTableFromPde(*aPdeAddress);
       
   620 		if(pt)
       
   621 			{
       
   622 			// someone else has already allocated a page table,
       
   623 			// so free the one we just allocated and try again...
       
   624 			MmuLock::Unlock();
       
   625 			::PageTables.Free(newPt);
       
   626 			}
       
   627 		else if(BeingDetached())
       
   628 			{
       
   629 			// mapping is going, so we don't need a page table any more...
       
   630 			MmuLock::Unlock();
       
   631 			::PageTables.Free(newPt);
       
   632 			::PageTables.Unlock();
       
   633 			MmuLock::Lock();
       
   634 			return 0;
       
   635 			}
       
   636 		else
       
   637 			{
       
   638 			// setup new page table...
       
   639 			SPageTableInfo* pti = SPageTableInfo::FromPtPtr(newPt);
       
   640 			pti->SetFine(aAddr&~KChunkMask,iAllocatedLinAddrAndOsAsid&KPageMask);
       
   641 
       
   642 			TPde pde = Mmu::PageTablePhysAddr(newPt)|iBlankPde;
       
   643 #ifdef	__USER_MEMORY_GUARDS_ENABLED__
       
   644 			if (IsUserMapping())
       
   645 				pde = PDE_IN_DOMAIN(pde, USER_MEMORY_DOMAIN);
       
   646 #endif
       
   647 			TRACE2(("!PDE %x=%x",aPdeAddress,pde));
       
   648 			__NK_ASSERT_DEBUG(((*aPdeAddress^pde)&~KPdeMatchMask)==0 || *aPdeAddress==KPdeUnallocatedEntry);
       
   649 			*aPdeAddress = pde;
       
   650 			SinglePdeUpdated(aPdeAddress);
       
   651 
       
   652 			MmuLock::Unlock();
       
   653 			}
       
   654 
       
   655 		// loop back and recheck...
       
   656 		::PageTables.Unlock();
       
   657 		MmuLock::Lock();
       
   658 		}
       
   659 	}
       
   660 
       
   661 
       
   662 void DFineMapping::FreePageTable(TPde* aPdeAddress)
       
   663 	{
       
   664 	TRACE2(("DFineMapping[0x%08x]::FreePageTable(0x%08x)",this,aPdeAddress));
       
   665 
       
   666 	// get page table lock...
       
   667 	::PageTables.Lock();
       
   668 	MmuLock::Lock();
       
   669 
       
   670 	// find page table...
       
   671 	TPte* pt = Mmu::PageTableFromPde(*aPdeAddress);
       
   672 	if(pt)
       
   673 		{
       
   674 		SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt);
       
   675 		if(pti->PageCount() || pti->PermanenceCount())
       
   676 			{
       
   677 			// page table still in use, so don't free it...
       
   678 			pt = 0;
       
   679 			}
       
   680 		else
       
   681 			{
       
   682 			// page table not used, so unmap it...
       
   683 			TPde pde = KPdeUnallocatedEntry;
       
   684 			TRACE2(("!PDE %x=%x",aPdeAddress,pde));
       
   685 			*aPdeAddress = pde;
       
   686 			SinglePdeUpdated(aPdeAddress);
       
   687 			}
       
   688 		}
       
   689 
       
   690 	MmuLock::Unlock();
       
   691 	if(pt)
       
   692 		::PageTables.Free(pt);
       
   693 	::PageTables.Unlock();
       
   694 	}
       
   695 
       
   696 
       
   697 void DFineMapping::RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB)
       
   698 	{
       
   699 	TRACE2(("DFineMemoryMapping[0x%08x]::RemapPage(0x%x,0x%x,%d,%d)",this,aPageArray,aIndex,aMapInstanceCount,aInvalidateTLB));
       
   700 
       
   701 	__NK_ASSERT_DEBUG(aIndex >= iStartIndex);
       
   702 	__NK_ASSERT_DEBUG(aIndex < iStartIndex + iSizeInPages);
       
   703 
       
   704 	TLinAddr addr = Base() + ((aIndex - iStartIndex) << KPageShift);
       
   705 	TUint pteIndex = (addr >> KPageShift) & (KChunkMask >> KPageShift);
       
   706 
       
   707 	// get address of page table...
       
   708 	MmuLock::Lock();
       
   709 	TPte* pPte = GetPageTable(addr);
       
   710 
       
   711 	// check the page is still mapped and mapping isn't being detached 
       
   712 	// or hasn't been reused for another purpose...
       
   713 	if(!pPte || BeingDetached() || aMapInstanceCount != MapInstanceCount())
       
   714 		{
       
   715 		// can't map pages to this mapping any more so just exit.
       
   716 		MmuLock::Unlock();
       
   717 		return;
       
   718 		}
       
   719 
       
   720 	// remap the page...
       
   721 	pPte += pteIndex;
       
   722 	Mmu::RemapPage(pPte, aPageArray, iBlankPte);
       
   723 	MmuLock::Unlock();
       
   724 
       
   725 #ifndef COARSE_GRAINED_TLB_MAINTENANCE
       
   726 	// clean TLB...
       
   727 	if (aInvalidateTLB)
       
   728 		{
       
   729 		InvalidateTLBForPage(addr + OsAsid());
       
   730 		}
       
   731 #endif
       
   732 	}
       
   733 
       
   734 
       
   735 TInt DFineMapping::MapPages(RPageArray::TIter aPages, TUint aMapInstanceCount)
       
   736 	{
       
   737 	TRACE2(("DFineMapping[0x%08x]::MapPages(?,%d) index=0x%x count=0x%x",this,aMapInstanceCount,aPages.Index(),aPages.Count()));
       
   738 
       
   739 	__NK_ASSERT_DEBUG(aPages.Count());
       
   740 	__NK_ASSERT_DEBUG(aPages.Index()>=iStartIndex);
       
   741 	__NK_ASSERT_DEBUG(aPages.IndexEnd()-iStartIndex<=iSizeInPages);
       
   742 
       
   743 	TLinAddr addr = Base()+(aPages.Index()-iStartIndex)*KPageSize;
       
   744 	for(;;)
       
   745 		{
       
   746 		TUint pteIndex = (addr>>KPageShift)&(KChunkMask>>KPageShift);
       
   747 
       
   748 		// calculate max number of pages to do...
       
   749 		TUint n = (KChunkSize>>KPageShift)-pteIndex; // pages left in page table
       
   750 		if(n>KMaxPagesInOneGo)
       
   751 			n = KMaxPagesInOneGo;
       
   752 
       
   753 		// get some pages...
       
   754 		TPhysAddr* pages;
       
   755 		n = aPages.Pages(pages,n);
       
   756 		if(!n)
       
   757 			break;
       
   758 
       
   759 		// get address of page table...
       
   760 		MmuLock::Lock();
       
   761 		TPte* pPte = GetOrAllocatePageTable(addr);
       
   762 
       
   763 		// check mapping isn't being unmapped, or been reused for another purpose...
       
   764 		if(BeingDetached() || aMapInstanceCount!=MapInstanceCount())
       
   765 			{
       
   766 			// can't map pages to this mapping any more, so free any page table
       
   767 			// we just got (if it's not used)...
       
   768 			if(!pPte)
       
   769 				MmuLock::Unlock();
       
   770 			else
       
   771 				{
       
   772 				SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pPte);
       
   773 				TBool keepPt = pti->PermanenceCount() ||  pti->PageCount();
       
   774 				MmuLock::Unlock();
       
   775 				if(!keepPt)
       
   776 					FreePageTable(Mmu::PageDirectoryEntry(OsAsid(),addr));
       
   777 				}
       
   778 			// then end...
       
   779 			return KErrNone;
       
   780 			}
       
   781 
       
   782 		// check for OOM...
       
   783 		if(!pPte)
       
   784 			{
       
   785 			MmuLock::Unlock();
       
   786 			return KErrNoMemory;
       
   787 			}
       
   788 
       
   789 		// map some pages...
       
   790 		pPte += pteIndex;
       
   791 		TBool keepPt = Mmu::MapPages(pPte, n, pages, iBlankPte);
       
   792 		MmuLock::Unlock();
       
   793 
       
   794 		// free page table if no longer needed...
       
   795 		if(!keepPt)
       
   796 			FreePageTable(Mmu::PageDirectoryEntry(OsAsid(),addr));
       
   797 
       
   798 		// move on...
       
   799 		aPages.Skip(n);
       
   800 		addr += n*KPageSize;
       
   801 		}
       
   802 
       
   803 	return KErrNone;
       
   804 	}
       
   805 
       
   806 
       
   807 void DFineMapping::UnmapPages(RPageArray::TIter aPages, TUint aMapInstanceCount)
       
   808 	{
       
   809 	TRACE2(("DFineMapping[0x%08x]::UnmapPages(?,%d) index=0x%x count=0x%x",this,aMapInstanceCount,aPages.Index(),aPages.Count()));
       
   810 
       
   811 	__NK_ASSERT_DEBUG(aPages.Count());
       
   812 
       
   813 	TLinAddr addr = Base()+(aPages.Index()-iStartIndex)*KPageSize;
       
   814 #ifndef COARSE_GRAINED_TLB_MAINTENANCE
       
   815 	TLinAddr startAddr = addr;
       
   816 #endif
       
   817 	for(;;)
       
   818 		{
       
   819 		TUint pteIndex = (addr>>KPageShift)&(KChunkMask>>KPageShift);
       
   820 
       
   821 		// calculate max number of pages to do...
       
   822 		TUint n = (KChunkSize>>KPageShift)-pteIndex; // pages left in page table
       
   823 		if(n>KMaxPagesInOneGo)
       
   824 			n = KMaxPagesInOneGo;
       
   825 
       
   826 		// get some pages...
       
   827 		TPhysAddr* pages;
       
   828 		n = aPages.Pages(pages,n);
       
   829 		if(!n)
       
   830 			break;
       
   831 
       
   832 		MmuLock::Lock();
       
   833 
       
   834 		// check that mapping hasn't been reused for another purpose...
       
   835 		if(aMapInstanceCount!=MapInstanceCount())
       
   836 			{
       
   837 			MmuLock::Unlock();
       
   838 			break;
       
   839 			}
       
   840 
       
   841 		// get address of PTE for pages...
       
   842 		TPde* pPde = Mmu::PageDirectoryEntry(OsAsid(),addr);
       
   843 		TPte* pPte = Mmu::PageTableFromPde(*pPde);
       
   844 		if(pPte)
       
   845 			{
       
   846 			// unmap some pages...
       
   847 			pPte += pteIndex;
       
   848 			TBool keepPt = Mmu::UnmapPages(pPte,n,pages);
       
   849 			MmuLock::Unlock();
       
   850 
       
   851 			// free page table if no longer needed...
       
   852 			if(!keepPt)
       
   853 				FreePageTable(pPde);
       
   854 			}
       
   855 		else
       
   856 			{
       
   857 			// no page table found...
       
   858 			MmuLock::Unlock();
       
   859 			}
       
   860 
       
   861 		// move on...
       
   862 		aPages.Skip(n);
       
   863 		addr += n*KPageSize;
       
   864 		}
       
   865 
       
   866 #ifndef COARSE_GRAINED_TLB_MAINTENANCE
       
   867 	// clean TLB...
       
   868 	TLinAddr endAddr = addr;
       
   869 	addr = startAddr+OsAsid();
       
   870 	do InvalidateTLBForPage(addr);
       
   871 	while((addr+=KPageSize)<endAddr);
       
   872 #endif
       
   873 	}
       
   874 
       
   875 
       
   876 void DFineMapping::RestrictPagesNA(RPageArray::TIter aPages, TUint aMapInstanceCount)
       
   877 	{
       
   878 	TRACE2(("DFineMapping[0x%08x]::RestrictPages(?,%d) index=0x%x count=0x%x",this,aMapInstanceCount,aPages.Index(),aPages.Count()));
       
   879 
       
   880 	__NK_ASSERT_DEBUG(aPages.Count());
       
   881 
       
   882 	TLinAddr addr = Base()+(aPages.Index()-iStartIndex)*KPageSize;
       
   883 #ifndef COARSE_GRAINED_TLB_MAINTENANCE
       
   884 	TLinAddr startAddr = addr;
       
   885 #endif
       
   886 	for(;;)
       
   887 		{
       
   888 		TUint pteIndex = (addr>>KPageShift)&(KChunkMask>>KPageShift);
       
   889 
       
   890 		// calculate max number of pages to do...
       
   891 		TUint n = (KChunkSize>>KPageShift)-pteIndex; // pages left in page table
       
   892 		if(n>KMaxPagesInOneGo)
       
   893 			n = KMaxPagesInOneGo;
       
   894 
       
   895 		// get some pages...
       
   896 		TPhysAddr* pages;
       
   897 		n = aPages.Pages(pages,n);
       
   898 		if(!n)
       
   899 			break;
       
   900 
       
   901 		MmuLock::Lock();
       
   902 
       
   903 		// check that mapping hasn't been reused for another purpose...
       
   904 		if(aMapInstanceCount!=MapInstanceCount())
       
   905 			{
       
   906 			MmuLock::Unlock();
       
   907 			break;
       
   908 			}
       
   909 
       
   910 		// get address of PTE for pages...
       
   911 		TPde* pPde = Mmu::PageDirectoryEntry(OsAsid(),addr);
       
   912 		TPte* pPte = Mmu::PageTableFromPde(*pPde);
       
   913 		if(pPte)
       
   914 			{
       
   915 			// restrict some pages...
       
   916 			pPte += pteIndex;
       
   917 			Mmu::RestrictPagesNA(pPte,n,pages);
       
   918 			}
       
   919 		MmuLock::Unlock();
       
   920 
       
   921 		// move on...
       
   922 		aPages.Skip(n);
       
   923 		addr += n*KPageSize;
       
   924 		}
       
   925 
       
   926 #ifndef COARSE_GRAINED_TLB_MAINTENANCE
       
   927 	// clean TLB...
       
   928 	TLinAddr endAddr = addr;
       
   929 	addr = startAddr+OsAsid();
       
   930 	do InvalidateTLBForPage(addr);
       
   931 	while((addr+=KPageSize)<endAddr);
       
   932 #endif
       
   933 	}
       
   934 
       
   935 
       
   936 TInt DFineMapping::PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount)
       
   937 	{
       
   938 	TRACE2(("DFineMapping[0x%08x]::PageIn(?,?,%d) index=0x%x count=0x%x",this,aMapInstanceCount,aPages.Index(),aPages.Count()));
       
   939 
       
   940 	__NK_ASSERT_DEBUG(aPages.Count());
       
   941 	__NK_ASSERT_DEBUG(aPages.Index()>=iStartIndex);
       
   942 	__NK_ASSERT_DEBUG(aPages.IndexEnd()-iStartIndex<=iSizeInPages);
       
   943 
       
   944 	TInt r = KErrNone;
       
   945 
       
   946 	TLinAddr addr = Base()+(aPages.Index()-iStartIndex)*KPageSize;
       
   947 #ifndef COARSE_GRAINED_TLB_MAINTENANCE
       
   948 	TLinAddr startAddr = addr;
       
   949 #endif
       
   950 	TBool pinPageTable = aPinArgs.iPinnedPageTables!=0; // check if we need to pin the first page table
       
   951 	for(;;)
       
   952 		{
       
   953 		TUint pteIndex = (addr>>KPageShift)&(KChunkMask>>KPageShift);
       
   954 		if(pteIndex==0)
       
   955 			pinPageTable = aPinArgs.iPinnedPageTables!=0;	// started a new page table, check if we need to pin it
       
   956 
       
   957 		// calculate max number of pages to do...
       
   958 		TUint n = (KChunkSize>>KPageShift)-pteIndex; // pages left in page table
       
   959 		if(n>KMaxPagesInOneGo)
       
   960 			n = KMaxPagesInOneGo;
       
   961 
       
   962 		// get some pages...
       
   963 		TPhysAddr* pages;
       
   964 		n = aPages.Pages(pages,n);
       
   965 		if(!n)
       
   966 			break;
       
   967 
       
   968 		// make sure we have memory to pin the page table if required...
       
   969 		if(pinPageTable)
       
   970 			aPinArgs.AllocReplacementPages(KNumPagesToPinOnePageTable);
       
   971 
       
   972 		// get address of page table...
       
   973 		MmuLock::Lock();
       
   974 		TPte* pPte;
       
   975 		if(pinPageTable)
       
   976 			pPte = GetOrAllocatePageTable(addr,aPinArgs);
       
   977 		else
       
   978 			pPte = GetOrAllocatePageTable(addr);
       
   979 
       
   980 		// check mapping isn't being unmapped or hasn't been reused...
       
   981 		if(BeingDetached() || aMapInstanceCount != MapInstanceCount())
       
   982 			{
       
   983 			// can't map pages to this mapping any more, so free any page table
       
   984 			// we just got (if it's not used)...
       
   985 			if(!pPte)
       
   986 				MmuLock::Unlock();
       
   987 			else
       
   988 				{
       
   989 				SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pPte);
       
   990 				TBool keepPt = pti->PermanenceCount() ||  pti->PageCount();
       
   991 				MmuLock::Unlock();
       
   992 				if(!keepPt)
       
   993 					FreePageTable(Mmu::PageDirectoryEntry(OsAsid(),addr));
       
   994 				}
       
   995 			// then end...
       
   996 			r = KErrNotFound;
       
   997 			break;
       
   998 			}
       
   999 
       
  1000 		// check for OOM...
       
  1001 		if(!pPte)
       
  1002 			{
       
  1003 			MmuLock::Unlock();
       
  1004 			r = KErrNoMemory;
       
  1005 			break;
       
  1006 			}
       
  1007 
       
  1008 		// map some pages...
       
  1009 		pPte += pteIndex;
       
  1010 		TPte blankPte = iBlankPte;
       
  1011 		if(aPinArgs.iReadOnly)
       
  1012 			blankPte = Mmu::MakePteInaccessible(blankPte,true);
       
  1013 		TBool keepPt = Mmu::PageInPages(pPte, n, pages, blankPte);
       
  1014 		MmuLock::Unlock();
       
  1015 
       
  1016 		// free page table if no longer needed...
       
  1017 		if(!keepPt)
       
  1018 			FreePageTable(Mmu::PageDirectoryEntry(OsAsid(),addr));
       
  1019 
       
  1020 		// move on...
       
  1021 		aPages.Skip(n);
       
  1022 		addr += n*KPageSize;
       
  1023 		pinPageTable = false;
       
  1024 		}
       
  1025 
       
  1026 #ifndef COARSE_GRAINED_TLB_MAINTENANCE
       
  1027 	// clean TLB...
       
  1028 	TLinAddr endAddr = addr;
       
  1029 	addr = startAddr+OsAsid();
       
  1030 	do InvalidateTLBForPage(addr);
       
  1031 	while((addr+=KPageSize)<endAddr);
       
  1032 #endif
       
  1033 	return r;
       
  1034 	}
       
  1035 
       
  1036 
       
  1037 TBool DFineMapping::MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex)
       
  1038 	{
       
  1039 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
       
  1040 	__NK_ASSERT_DEBUG(IsAttached());
       
  1041 	__NK_ASSERT_DEBUG(!BeingDetached());
       
  1042 
       
  1043 	TLinAddr addr = Base() + (aIndex - iStartIndex) * KPageSize;
       
  1044 	TUint pteIndex = (addr >> KPageShift) & (KChunkMask >> KPageShift);
       
  1045 
       
  1046 	// get address of page table...
       
  1047 	TPte* pPte = GetPageTable(addr);
       
  1048 	
       
  1049 	// Check the page is still mapped.
       
  1050 	if (!pPte)
       
  1051 		return EFalse;
       
  1052 
       
  1053 	// map some pages...
       
  1054 	pPte += pteIndex;
       
  1055 	Mmu::RemapPage(pPte, aPageArrayPtr, iBlankPte);
       
  1056 	InvalidateTLBForPage(addr);
       
  1057 	return ETrue;
       
  1058 	}
       
  1059 
       
  1060 
       
  1061 TInt DFineMapping::DoMap()
       
  1062 	{
       
  1063 	TRACE(("DFineMapping[0x%08x]::DoMap()", this));
       
  1064 	DMemoryObject* memory = Memory(true); // safe because we're called from code which has added mapping to memory
       
  1065 	if(memory->IsDemandPaged())
       
  1066 		{
       
  1067 		// do nothing, allow pages to be mapped on demand...
       
  1068 		return KErrNone;
       
  1069 		}
       
  1070 
       
  1071 	RPageArray::TIter pageIter;
       
  1072 	memory->iPages.FindStart(iStartIndex,iSizeInPages,pageIter);
       
  1073 
       
  1074 	// map pages...
       
  1075 	TInt r = KErrNone;
       
  1076 	for(;;)
       
  1077 		{
       
  1078 		// find some pages...
       
  1079 		RPageArray::TIter pageList;
       
  1080 		TUint n = pageIter.Find(pageList);
       
  1081 		if(!n)
       
  1082 			break; // done
       
  1083 
       
  1084 		// map some pages...
       
  1085 		r = MapPages(pageList,MapInstanceCount());
       
  1086 
       
  1087 		// done with pages...
       
  1088 		pageIter.FindRelease(n);
       
  1089 
       
  1090 		if(r!=KErrNone)
       
  1091 			break;
       
  1092 		}
       
  1093 
       
  1094 	memory->iPages.FindEnd(iStartIndex,iSizeInPages);
       
  1095 	return r;
       
  1096 	}
       
  1097 
       
  1098 
       
  1099 void DFineMapping::DoUnmap()
       
  1100 	{
       
  1101 	TRACE2(("DFineMapping[0x%08x]::DoUnmap()",this));
       
  1102 
       
  1103 	TLinAddr startAddr = Base();
       
  1104 	TUint count = iSizeInPages;
       
  1105 	TLinAddr addr = startAddr;
       
  1106 	TPde* pPde = Mmu::PageDirectoryEntry(OsAsid(),addr);
       
  1107 
       
  1108 	for(;;)
       
  1109 		{
       
  1110 		TUint pteIndex = (addr>>KPageShift)&(KChunkMask>>KPageShift);
       
  1111 
       
  1112 		// calculate number of pages to do...
       
  1113 		TUint n = (KChunkSize>>KPageShift)-pteIndex; // pages left in page table
       
  1114 		if(n>count)
       
  1115 			n = count;
       
  1116 
       
  1117 		// get page table...
       
  1118 		MmuLock::Lock();
       
  1119 		TPte* pPte = Mmu::PageTableFromPde(*pPde);
       
  1120 		if(!pPte)
       
  1121 			{
       
  1122 			// no page table found, so nothing to do...
       
  1123 			MmuLock::Unlock();
       
  1124 			}
       
  1125 		else
       
  1126 			{
       
  1127 			// unmap some pages...
       
  1128 			pPte += pteIndex;
       
  1129 			if(n>KMaxPagesInOneGo)
       
  1130 				n = KMaxPagesInOneGo;
       
  1131 			TBool keepPt = Mmu::UnmapPages(pPte, n);
       
  1132 			MmuLock::Unlock();
       
  1133 
       
  1134 			// free page table if no longer needed...
       
  1135 			if(!keepPt)
       
  1136 				FreePageTable(pPde);
       
  1137 			}
       
  1138 
       
  1139 		// move on...
       
  1140 		addr += n*KPageSize;
       
  1141 		count -= n;
       
  1142 		if(!count)
       
  1143 			break;
       
  1144 		if(!(addr&KChunkMask))
       
  1145 			++pPde;
       
  1146 		}
       
  1147 
       
  1148 #ifdef COARSE_GRAINED_TLB_MAINTENANCE
       
  1149 	InvalidateTLBForAsid(OsAsid());
       
  1150 #else
       
  1151 	// clean TLB...
       
  1152 	TLinAddr endAddr = addr;
       
  1153 	addr = LinAddrAndOsAsid();
       
  1154 	do InvalidateTLBForPage(addr);
       
  1155 	while((addr+=KPageSize)<endAddr);
       
  1156 #endif
       
  1157 	}
       
  1158 
       
  1159 
       
  1160 TInt DFineMapping::AllocatePermanentPageTables()
       
  1161 	{
       
  1162 	TRACE2(("DFineMapping[0x%08x]::AllocatePermanentPageTables()",this));
       
  1163 	__NK_ASSERT_DEBUG(((Flags()&EPageTablesAllocated)==0));
       
  1164 	__NK_ASSERT_DEBUG(iBlankPde);
       
  1165 
       
  1166 	TLinAddr addr = iAllocatedLinAddrAndOsAsid&~KPageMask;
       
  1167 	TInt osAsid = iAllocatedLinAddrAndOsAsid&KPageMask;
       
  1168 	TPde* pStartPde = Mmu::PageDirectoryEntry(osAsid,addr);
       
  1169 	TPde* pEndPde = Mmu::PageDirectoryEntry(osAsid,addr+iAllocatedSize-1);
       
  1170 	TPde* pPde = pStartPde;
       
  1171 
       
  1172 	while(pPde<=pEndPde)
       
  1173 		{
       
  1174 		MmuLock::Lock();
       
  1175 		TPte* pPte = AllocatePageTable(addr,pPde,true);
       
  1176 		if(!pPte)
       
  1177 			{
       
  1178 			// out of memory...
       
  1179 			MmuLock::Unlock();
       
  1180 			FreePermanentPageTables(pStartPde,pPde-1);
       
  1181 			return KErrNoMemory;
       
  1182 			}
       
  1183 		MmuLock::Unlock();
       
  1184 
       
  1185 		addr += KChunkSize;
       
  1186 		++pPde;
       
  1187 		}
       
  1188 
       
  1189 	TRACE2(("DFineMapping[0x%08x]::AllocatePermanentPageTables() done",this));
       
  1190 	Flags() |= DMemoryMapping::EPageTablesAllocated;
       
  1191 	return KErrNone;
       
  1192 	}
       
  1193 
       
  1194 
       
  1195 void DFineMapping::FreePermanentPageTables(TPde* aFirstPde, TPde* aLastPde)
       
  1196 	{
       
  1197 	Flags() &= ~DMemoryMapping::EPageTablesAllocated;
       
  1198 
       
  1199 	MmuLock::Lock();
       
  1200 
       
  1201 	TUint flash = 0;
       
  1202 	TPde* pPde = aFirstPde;
       
  1203 	while(pPde<=aLastPde)
       
  1204 		{
       
  1205 		TPte* pPte = Mmu::PageTableFromPde(*pPde);
       
  1206 		__NK_ASSERT_DEBUG(pPte);
       
  1207 		SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pPte);
       
  1208 		if(pti->DecPermanenceCount() || pti->PageCount())
       
  1209 			{
       
  1210 			// still in use...
       
  1211 			MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo*2);
       
  1212 			}
       
  1213 		else
       
  1214 			{
       
  1215 			// page table no longer used for anything...
       
  1216 			MmuLock::Unlock();
       
  1217 			FreePageTable(pPde);
       
  1218 			MmuLock::Lock();
       
  1219 			}
       
  1220 
       
  1221 		++pPde;
       
  1222 		}
       
  1223 
       
  1224 	MmuLock::Unlock();
       
  1225 	}
       
  1226 
       
  1227 
       
  1228 void DFineMapping::FreePermanentPageTables()
       
  1229 	{
       
  1230 	if((Flags()&EPageTablesAllocated)==0)
       
  1231 		return;
       
  1232 
       
  1233 	TRACE2(("DFineMapping[0x%08x]::FreePermanentPageTables()",this));
       
  1234 
       
  1235 	TLinAddr addr = iAllocatedLinAddrAndOsAsid&~KPageMask;
       
  1236 	TInt osAsid = iAllocatedLinAddrAndOsAsid&KPageMask;
       
  1237 	TPde* pPde = Mmu::PageDirectoryEntry(osAsid,addr);
       
  1238 	TPde* pEndPde = Mmu::PageDirectoryEntry(osAsid,addr+iAllocatedSize-1);
       
  1239 	FreePermanentPageTables(pPde,pEndPde);
       
  1240 	}
       
  1241 
       
  1242 
       
  1243 TPte* DFineMapping::FindPageTable(TLinAddr aLinAddr, TUint aMemoryIndex)
       
  1244 	{
       
  1245 	TRACE(("DFineMapping::FindPageTable(0x%x, %d)", aLinAddr, aMemoryIndex));
       
  1246 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
       
  1247 	__NK_ASSERT_DEBUG(IsAttached());
       
  1248 	return GetPageTable(aLinAddr);
       
  1249 	}
       
  1250 
       
  1251 
       
  1252 
       
  1253 //
       
  1254 // DPhysicalPinMapping
       
  1255 //
       
  1256 
       
  1257 DPhysicalPinMapping::DPhysicalPinMapping()
       
  1258 	: DMemoryMappingBase(EPinned|EPhysicalPinningMapping)
       
  1259 	{
       
  1260 	}
       
  1261 
       
  1262 
       
  1263 TInt DPhysicalPinMapping::PhysAddr(TUint aIndex, TUint aCount, TPhysAddr& aPhysicalAddress, TPhysAddr* aPhysicalPageList)
       
  1264 	{
       
  1265 	__NK_ASSERT_ALWAYS(IsAttached());
       
  1266 
       
  1267 	__NK_ASSERT_ALWAYS(TUint(aIndex+aCount)>aIndex && TUint(aIndex+aCount)<=iSizeInPages);
       
  1268 	aIndex += iStartIndex;
       
  1269 
       
  1270 	DCoarseMemory* memory = (DCoarseMemory*)Memory(true); // safe because we should only be called whilst memory is Pinned
       
  1271 	TInt r = memory->PhysAddr(aIndex,aCount,aPhysicalAddress,aPhysicalPageList);
       
  1272 	if(r!=KErrNone)
       
  1273 		return r;
       
  1274 
       
  1275 	if(memory->IsDemandPaged() && !IsReadOnly())
       
  1276 		{
       
  1277 		// the memory is demand paged and writeable so we need to mark it as dirty
       
  1278 		// as we have to assume that the memory will be modified via the physical
       
  1279 		// addresses we return...
       
  1280 		MmuLock::Lock();
       
  1281 		TPhysAddr* pages = aPhysicalPageList;
       
  1282 		TUint count = aCount;
       
  1283 		while(count)
       
  1284 			{
       
  1285 			SPageInfo* pi = SPageInfo::FromPhysAddr(*(pages++));
       
  1286 			pi->SetDirty();
       
  1287 			if((count&(KMaxPageInfoUpdatesInOneGo-1))==0)
       
  1288 				MmuLock::Flash(); // flash lock every KMaxPageInfoUpdatesInOneGo iterations of the loop
       
  1289 			--count;
       
  1290 			}
       
  1291 		MmuLock::Unlock();
       
  1292 		}
       
  1293 
       
  1294 	return KErrNone;
       
  1295 	}
       
  1296 
       
  1297 
       
  1298 TInt DPhysicalPinMapping::Pin(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions)
       
  1299 	{
       
  1300 	PteType() =	Mmu::PteType(aPermissions,true);
       
  1301 	return Attach(aMemory,aIndex,aCount);
       
  1302 	}
       
  1303 
       
  1304 
       
  1305 void DPhysicalPinMapping::Unpin()
       
  1306 	{
       
  1307 	Detach();
       
  1308 	}
       
  1309 
       
  1310 
       
  1311 TInt DPhysicalPinMapping::MapPages(RPageArray::TIter /*aPages*/, TUint /*aMapInstanceCount*/)
       
  1312 	{
       
  1313 	// shouldn't ever be called because these mappings are always pinned...
       
  1314 	__NK_ASSERT_DEBUG(0);
       
  1315 	return KErrNotSupported;
       
  1316 	}
       
  1317 
       
  1318 
       
  1319 void DPhysicalPinMapping::UnmapPages(RPageArray::TIter /*aPages*/, TUint /*aMapInstanceCount*/)
       
  1320 	{
       
  1321 	// nothing to do...
       
  1322 	}
       
  1323 
       
  1324 
       
  1325 void DPhysicalPinMapping::RemapPage(TPhysAddr& /*aPageArrayPtr*/, TUint /*aIndex*/, TUint /*aMapInstanceCount*/, TBool /*aInvalidateTLB*/)
       
  1326 	{
       
  1327 	// shouldn't ever be called because physically pinned mappings block page moving.
       
  1328 	__NK_ASSERT_DEBUG(0);
       
  1329 	}
       
  1330 
       
  1331 
       
  1332 void DPhysicalPinMapping::RestrictPagesNA(RPageArray::TIter /*aPages*/, TUint /*aMapInstanceCount*/)
       
  1333 	{
       
  1334 	// nothing to do...
       
  1335 	}
       
  1336 
       
  1337 
       
  1338 TInt DPhysicalPinMapping::PageIn(RPageArray::TIter /*aPages*/, TPinArgs& /*aPinArgs*/, TUint /*aMapInstanceCount*/)
       
  1339 	{
       
  1340 	// nothing to do...
       
  1341 	return KErrNone;
       
  1342 	}
       
  1343 
       
  1344 
       
  1345 TInt DPhysicalPinMapping::MovingPageIn(TPhysAddr& /*aPageArrayPtr*/, TUint /*aIndex*/)
       
  1346 	{
       
  1347 	// Should never be asked to page in a page that is being moved as physical 
       
  1348 	// pin mappings don't own any page tables.
       
  1349 	__NK_ASSERT_DEBUG(0);
       
  1350 	return KErrAbort;
       
  1351 	}
       
  1352 
       
  1353 TInt DPhysicalPinMapping::DoMap()
       
  1354 	{
       
  1355 	// nothing to do...
       
  1356 	return KErrNone;
       
  1357 	}
       
  1358 
       
  1359 
       
  1360 void DPhysicalPinMapping::DoUnmap()
       
  1361 	{
       
  1362 	// nothing to do...
       
  1363 	}
       
  1364 
       
  1365 
       
  1366 
       
  1367 //
       
  1368 // DVirtualPinMapping
       
  1369 //
       
  1370 
       
  1371 DVirtualPinMapping::DVirtualPinMapping()
       
  1372 	: iMaxCount(0)
       
  1373 	{
       
  1374 	// Clear flag so it is possible to distingish between virtual and physical pin mappings.
       
  1375 	Flags() &= ~EPhysicalPinningMapping;
       
  1376 	}
       
  1377 
       
  1378 
       
  1379 DVirtualPinMapping::~DVirtualPinMapping()
       
  1380 	{
       
  1381 	TRACE(("DVirtualPinMapping[0x%08x]::~DVirtualPinMapping()",this));
       
  1382 	FreePageTableArray();
       
  1383 	}
       
  1384 
       
  1385 
       
  1386 DVirtualPinMapping* DVirtualPinMapping::New(TUint aMaxCount)
       
  1387 	{
       
  1388 	TRACE(("DVirtualPinMapping::New(0x%x)",aMaxCount));
       
  1389 	DVirtualPinMapping* self = new DVirtualPinMapping;
       
  1390 	if(aMaxCount)
       
  1391 		{
       
  1392 		// pages have been reserved for our use.
       
  1393 
       
  1394 		// Create the array for storing pinned paged tables now, so we
       
  1395 		// don't risk out-of-memory errors trying to do so later...
       
  1396 		if(self->AllocPageTableArray(aMaxCount)!=KErrNone)
       
  1397 			{
       
  1398 			// failed, so cleanup...
       
  1399 			self->Close();
       
  1400 			self = 0;
       
  1401 			}
       
  1402 		else
       
  1403 			{
       
  1404 			// success, so remember the pages that have been reserved for us...
       
  1405 			self->iMaxCount = aMaxCount;
       
  1406 			self->Flags() |= EPinningPagesReserved;
       
  1407 			}
       
  1408 		}
       
  1409 	TRACE(("DVirtualPinMapping::New(0x%x) returns 0x%08x",aMaxCount,self));
       
  1410 	return self;
       
  1411 	}
       
  1412 
       
  1413 
       
  1414 TUint DVirtualPinMapping::MaxPageTables(TUint aPageCount)
       
  1415 	{
       
  1416 	return (aPageCount+2*KChunkSize/KPageSize-2)>>(KChunkShift-KPageShift);
       
  1417 	}
       
  1418 
       
  1419 
       
  1420 TInt DVirtualPinMapping::AllocPageTableArray(TUint aCount)
       
  1421 	{
       
  1422 	__NK_ASSERT_ALWAYS(iAllocatedPinnedPageTables==0);
       
  1423 	TUint maxPt	= MaxPageTables(aCount);
       
  1424 	if(maxPt>KSmallPinnedPageTableCount)
       
  1425 		{
       
  1426 		iAllocatedPinnedPageTables = new TPte*[maxPt];
       
  1427 		if(!iAllocatedPinnedPageTables)
       
  1428 			return KErrNoMemory;
       
  1429 		}
       
  1430 	return KErrNone;
       
  1431 	}
       
  1432 
       
  1433 
       
  1434 void DVirtualPinMapping::FreePageTableArray()
       
  1435 	{
       
  1436 	delete [] iAllocatedPinnedPageTables;
       
  1437 	iAllocatedPinnedPageTables = 0;
       
  1438 	}
       
  1439 
       
  1440 
       
  1441 TPte** DVirtualPinMapping::PageTableArray()
       
  1442 	{
       
  1443 	return iAllocatedPinnedPageTables ? iAllocatedPinnedPageTables : iSmallPinnedPageTablesArray;
       
  1444 	}
       
  1445 
       
  1446 
       
  1447 TInt DVirtualPinMapping::Pin(	DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions, 
       
  1448 								DMemoryMappingBase* aMapping, TUint aMappingInstanceCount)
       
  1449 	{
       
  1450 	// Virtual pinning ensures a page is always mapped to a particular virtual address
       
  1451 	// and therefore require a non-pinning mapping of the virtual address to pin.
       
  1452 	__NK_ASSERT_ALWAYS(aMapping && !aMapping->IsPinned());
       
  1453 
       
  1454 	if(iMaxCount)
       
  1455 		{
       
  1456 		if(aCount>iMaxCount)
       
  1457 			return KErrArgument;
       
  1458 		}
       
  1459 	else
       
  1460 		{
       
  1461 		TInt r = AllocPageTableArray(aCount);
       
  1462 		if(r!=KErrNone)
       
  1463 			return r;
       
  1464 		}
       
  1465 
       
  1466 	iPinVirtualMapping = aMapping;
       
  1467 	iPinVirtualMapInstanceCount = aMappingInstanceCount;
       
  1468 	TInt r = DPhysicalPinMapping::Pin(aMemory,aIndex,aCount,aPermissions);
       
  1469 	iPinVirtualMapping = 0;
       
  1470 
       
  1471 	return r;
       
  1472 	}
       
  1473 
       
  1474 
       
  1475 void DVirtualPinMapping::Unpin()
       
  1476 	{
       
  1477 	Detach();
       
  1478 	}
       
  1479 
       
  1480 
       
  1481 void DVirtualPinMapping::UnpinPageTables(TPinArgs& aPinArgs)
       
  1482 	{
       
  1483 	TPte** pPt = PageTableArray();
       
  1484 	TPte** pPtEnd = pPt+iNumPinnedPageTables;
       
  1485 
       
  1486 	MmuLock::Lock();
       
  1487 	while(pPt<pPtEnd)
       
  1488 		PageTableAllocator::UnpinPageTable(*pPt++,aPinArgs);
       
  1489 	MmuLock::Unlock();
       
  1490 	iNumPinnedPageTables = 0;
       
  1491 
       
  1492 	if(!iMaxCount)
       
  1493 		FreePageTableArray();
       
  1494 	}
       
  1495 
       
  1496 
       
  1497 void DVirtualPinMapping::RemapPage(TPhysAddr& /*aPageArrayPtr*/, TUint /*aIndex*/, TUint /*aMapInstanceCount*/, TBool /*aInvalidateTLB*/)
       
  1498 	{
       
  1499 	__NK_ASSERT_DEBUG(0);
       
  1500 	}
       
  1501 
       
  1502 
       
  1503 TInt DVirtualPinMapping::PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount)
       
  1504 	{
       
  1505 	if(iPinVirtualMapping)
       
  1506 		return iPinVirtualMapping->PageIn(aPages, aPinArgs, iPinVirtualMapInstanceCount);
       
  1507 	return KErrNone;
       
  1508 	}
       
  1509 
       
  1510 
       
  1511 TInt DVirtualPinMapping::MovingPageIn(TPhysAddr& /*aPageArrayPtr*/, TUint /*aIndex*/)
       
  1512 	{
       
  1513 	// Should never be asked to page in a page that is being moved as virtual 
       
  1514 	// pin mappings don't own any page tables.
       
  1515 	__NK_ASSERT_DEBUG(0);
       
  1516 	return KErrAbort;
       
  1517 	}
       
  1518 
       
  1519 
       
  1520 TInt DVirtualPinMapping::DoPin(TPinArgs& aPinArgs)
       
  1521 	{
       
  1522 	// setup for page table pinning...
       
  1523 	aPinArgs.iPinnedPageTables = PageTableArray();
       
  1524 
       
  1525 	// do pinning...
       
  1526 	TInt r = DPhysicalPinMapping::DoPin(aPinArgs);
       
  1527 
       
  1528 	// save results...
       
  1529 	iNumPinnedPageTables = aPinArgs.iNumPinnedPageTables;
       
  1530 	__NK_ASSERT_DEBUG(iNumPinnedPageTables<=MaxPageTables(iSizeInPages));
       
  1531 
       
  1532 	// cleanup if error...
       
  1533 	if(r!=KErrNone)
       
  1534 		UnpinPageTables(aPinArgs);
       
  1535 
       
  1536 	return r;
       
  1537 	}
       
  1538 
       
  1539 
       
  1540 void DVirtualPinMapping::DoUnpin(TPinArgs& aPinArgs)
       
  1541 	{
       
  1542 	DPhysicalPinMapping::DoUnpin(aPinArgs);
       
  1543 	UnpinPageTables(aPinArgs);
       
  1544 	}
       
  1545 
       
  1546 
       
  1547 
       
  1548 //
       
  1549 // DMemoryMappingBase
       
  1550 //
       
  1551 
       
  1552 
       
  1553 DMemoryMappingBase::DMemoryMappingBase(TUint aType)
       
  1554 	{
       
  1555 	Flags() = aType; // rest of members cleared by DBase
       
  1556 	}
       
  1557 
       
  1558 
       
  1559 TInt DMemoryMappingBase::Attach(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
       
  1560 	{
       
  1561 	TRACE(("DMemoryMappingBase[0x%08x]::Attach(0x%08x,0x%x,0x%x)",this,aMemory,aIndex,aCount));
       
  1562 	__NK_ASSERT_DEBUG(!IsAttached());
       
  1563 	TInt r;
       
  1564 
       
  1565 	if(++iMapInstanceCount>1)
       
  1566 		{// This mapping is being reused...
       
  1567 
       
  1568 		// Non-pinned mappings can be reused however this is only exercised 
       
  1569 		// by aligned shared buffers whose memory is managed by the unpaged 
       
  1570 		// or hardware memory manager.  Reusing mappings to paged or movable 
       
  1571 		// memory hasn't tested and may need reusing mappings and its 
       
  1572 		// interactions with the fault handler, pinning etc to be tested.
       
  1573 		__NK_ASSERT_DEBUG(	IsPinned() ||
       
  1574 							aMemory->iManager == TheUnpagedMemoryManager || 
       
  1575 							aMemory->iManager == TheHardwareMemoryManager);
       
  1576 
       
  1577 		// make sure new instance count is seen by other threads which may be operating
       
  1578 		// on old mapping instance (this will stop them changing the mapping any more)...
       
  1579 		MmuLock::Lock();
       
  1580 		MmuLock::Unlock();
       
  1581 		// clear unmapping flag from previous use...
       
  1582 		__e32_atomic_and_ord16(&Flags(), (TUint16)~(EDetaching|EPageUnmapVetoed));
       
  1583 		}
       
  1584 
       
  1585 	__NK_ASSERT_DEBUG((Flags()&(EDetaching|EPageUnmapVetoed))==0);
       
  1586 
       
  1587 	// set region being mapped...
       
  1588 	iStartIndex = aIndex;
       
  1589 	iSizeInPages = aCount;
       
  1590 
       
  1591 	// reserve any pages required for pinning demand paged memory.
       
  1592 	// We must do this before we add the mapping to the memory object
       
  1593 	// because once that is done the pages we are mapping will be prevented
       
  1594 	// from being paged out. That could leave the paging system without
       
  1595 	// enough pages to correctly handle page faults...
       
  1596 	TPinArgs pinArgs;
       
  1597 	pinArgs.iReadOnly = IsReadOnly();
       
  1598 	if(IsPinned() && aMemory->IsDemandPaged())
       
  1599 		{
       
  1600 		pinArgs.iUseReserve = Flags()&EPinningPagesReserved;
       
  1601 		r = pinArgs.AllocReplacementPages(aCount);
       
  1602 		if(r!=KErrNone)
       
  1603 			return r;
       
  1604 		}
       
  1605 
       
  1606 	// link into memory object...
       
  1607 	r = aMemory->AddMapping(this);
       
  1608 	if(r==KErrNone)
       
  1609 		{
       
  1610 		// pin pages if needed...
       
  1611 		if(IsPinned())
       
  1612 			r = DoPin(pinArgs);
       
  1613 
       
  1614 		// add pages to this mapping...
       
  1615 		if(r==KErrNone)
       
  1616 			r = DoMap();
       
  1617 
       
  1618 		// revert if error...
       
  1619 		if(r!=KErrNone)
       
  1620 			Detach();
       
  1621 		}
       
  1622 
       
  1623 	// free any left over pinning pages...
       
  1624 	pinArgs.FreeReplacementPages();
       
  1625 
       
  1626 	return r;
       
  1627 	}
       
  1628 
       
  1629 
       
  1630 void DMemoryMappingBase::Detach()
       
  1631 	{
       
  1632 	TRACE(("DMemoryMappingBase[0x%08x]::Detach()",this));
       
  1633 	__NK_ASSERT_DEBUG(IsAttached());
       
  1634 
       
  1635 	// set EDetaching flag, which prevents anyone modifying pages in this
       
  1636 	// mapping, except to remove them...
       
  1637 	MmuLock::Lock();
       
  1638 	__e32_atomic_ior_ord16(&Flags(), (TUint16)EDetaching);
       
  1639 	MmuLock::Unlock();
       
  1640 
       
  1641 	// remove all pages from this mapping...
       
  1642 	DoUnmap();
       
  1643 
       
  1644 	// unpin pages if needed...
       
  1645 	TPinArgs pinArgs;
       
  1646 	if(IsPinned())
       
  1647 		DoUnpin(pinArgs);
       
  1648 
       
  1649 	// unlink from memory object...
       
  1650 	iMemory->RemoveMapping(this);
       
  1651 
       
  1652 	// free any spare pages produced by unpinning...
       
  1653 	pinArgs.FreeReplacementPages();
       
  1654 	}
       
  1655 
       
  1656 
       
  1657 TInt DMemoryMappingBase::DoPin(TPinArgs& aPinArgs)
       
  1658 	{
       
  1659 	DMemoryObject* memory = Memory(true); // safe because we're called from code which has added mapping to memory
       
  1660 	return memory->iManager->Pin(memory,this,aPinArgs);
       
  1661 	}
       
  1662 
       
  1663 
       
  1664 void DMemoryMappingBase::DoUnpin(TPinArgs& aPinArgs)
       
  1665 	{
       
  1666 	DMemoryObject* memory = Memory(true); // safe because we're called from code which will be removing this mapping from memory afterwards
       
  1667 	memory->iManager->Unpin(memory,this,aPinArgs);
       
  1668 	}
       
  1669 
       
  1670 
       
  1671 void DMemoryMappingBase::LinkToMemory(DMemoryObject* aMemory, TMappingList& aMappingList)
       
  1672 	{
       
  1673 	TRACE(("DMemoryMappingBase[0x%08x]::LinkToMemory(0x%08x,?)",this,aMemory));
       
  1674 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
       
  1675 	__NK_ASSERT_DEBUG(aMappingList.LockIsHeld());
       
  1676 	__NK_ASSERT_ALWAYS(!IsAttached());
       
  1677 	__NK_ASSERT_DEBUG(!BeingDetached());
       
  1678 	aMappingList.Add(this);
       
  1679 	iMemory = aMemory;
       
  1680 	iMemory->SetMappingAddedFlag();
       
  1681 	}
       
  1682 
       
  1683 
       
  1684 void DMemoryMappingBase::UnlinkFromMemory(TMappingList& aMappingList)
       
  1685 	{
       
  1686 	TRACE(("DMemoryMappingBase[0x%08x]::UnlinkMapping(?)",this));
       
  1687 
       
  1688 	// unlink...
       
  1689 	MmuLock::Lock();
       
  1690 	aMappingList.Lock();
       
  1691 	__NK_ASSERT_DEBUG(IsAttached());
       
  1692 	__NK_ASSERT_DEBUG(BeingDetached());
       
  1693 	aMappingList.Remove(this);
       
  1694 	DMemoryObject* memory = iMemory;
       
  1695 	iMemory = 0;
       
  1696 	aMappingList.Unlock();
       
  1697 	MmuLock::Unlock();
       
  1698 
       
  1699 	// if mapping had vetoed any page decommits...
       
  1700 	if(Flags()&DMemoryMapping::EPageUnmapVetoed)
       
  1701 		{
       
  1702 		// then queue cleanup of decommitted pages...
       
  1703 		memory->iManager->QueueCleanup(memory,DMemoryManager::ECleanupDecommitted);
       
  1704 		}
       
  1705 	}
       
  1706 
       
  1707 
       
  1708 
       
  1709 //
       
  1710 // Debug
       
  1711 //
       
  1712 
       
  1713 void DMemoryMappingBase::Dump()
       
  1714 	{
       
  1715 #ifdef _DEBUG
       
  1716 	Kern::Printf("DMemoryMappingBase[0x%08x]::Dump()",this);
       
  1717 	Kern::Printf("  IsAttached() = %d",(bool)IsAttached());
       
  1718 	Kern::Printf("  iMemory = 0x%08x",iMemory);
       
  1719 	Kern::Printf("  iStartIndex = 0x%x",iStartIndex);
       
  1720 	Kern::Printf("  iSizeInPages = 0x%x",iSizeInPages);
       
  1721 	Kern::Printf("  Flags() = 0x%x",Flags());
       
  1722 	Kern::Printf("  PteType() = 0x%x",PteType());
       
  1723 #endif // _DEBUG
       
  1724 	}
       
  1725 
       
  1726 
       
  1727 void DMemoryMapping::Dump()
       
  1728 	{
       
  1729 #ifdef _DEBUG
       
  1730 	Kern::Printf("DMemoryMapping[0x%08x]::Dump()",this);
       
  1731 	Kern::Printf("  Base() = 0x08%x",iLinAddrAndOsAsid&~KPageMask);
       
  1732 	Kern::Printf("  OsAsid() = %d",iLinAddrAndOsAsid&KPageMask);
       
  1733 	Kern::Printf("  iBlankPde = 0x%08x",iBlankPde);
       
  1734 	Kern::Printf("  iBlankPte = 0x%08x",iBlankPte);
       
  1735 	Kern::Printf("  iAllocatedLinAddrAndOsAsid = 0x%08x",iAllocatedLinAddrAndOsAsid);
       
  1736 	Kern::Printf("  iAllocatedSize = 0x%x",iAllocatedSize);
       
  1737 	DMemoryMappingBase::Dump();
       
  1738 #endif // _DEBUG
       
  1739 	}
       
  1740 
       
  1741 
       
  1742 void DVirtualPinMapping::Dump()
       
  1743 	{
       
  1744 #ifdef _DEBUG
       
  1745 	Kern::Printf("DVirtualPinMapping[0x%08x]::Dump()",this);
       
  1746 	Kern::Printf("  iMaxCount = %d",iMaxCount);
       
  1747 	Kern::Printf("  iNumPinnedPageTables = %d",iNumPinnedPageTables);
       
  1748 	DMemoryMappingBase::Dump();
       
  1749 #endif // _DEBUG
       
  1750 	}
       
  1751