kernel/eka/memmodel/epoc/flexible/mmu/mlargemappings.cpp
changeset 0 a41df078684a
child 15 4122176ea935
equal deleted inserted replaced
-1:000000000000 0:a41df078684a
       
     1 // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
       
     2 // All rights reserved.
       
     3 // This component and the accompanying materials are made available
       
     4 // under the terms of the License "Eclipse Public License v1.0"
       
     5 // which accompanies this distribution, and is available
       
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
       
     7 //
       
     8 // Initial Contributors:
       
     9 // Nokia Corporation - initial contribution.
       
    10 //
       
    11 // Contributors:
       
    12 //
       
    13 // Description:
       
    14 //
       
    15 
       
    16 #include "mlargemappings.h"
       
    17 #include <kernel/cache_maintenance.inl>
       
    18 
       
    19 
       
    20 //
       
    21 // DLargeMappedMemory
       
    22 //
       
    23 
       
    24 
       
    25 DLargeMappedMemory::DLargeMappedMemory(DMemoryManager* aManager, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags)
       
    26 	: DCoarseMemory(aManager, aSizeInPages, aAttributes, aCreateFlags)
       
    27 	{
       
    28 	}
       
    29 
       
    30 
       
    31 DLargeMappedMemory* DLargeMappedMemory::New(DMemoryManager* aManager, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags)
       
    32 	{
       
    33 	TRACE(("DLargeMappedMemory::New()"));
       
    34 	TUint chunkCount = (aSizeInPages + KPagesInPDE - 1) >> KPagesInPDEShift;
       
    35 	TUint wordCount = (chunkCount  + 31) >> 5;
       
    36 	TUint size = sizeof(DLargeMappedMemory) + sizeof(TUint) * (wordCount - 1);
       
    37 	DLargeMappedMemory* self = (DLargeMappedMemory*)Kern::AllocZ(size);
       
    38 	if(self)
       
    39 		{
       
    40 		new (self) DLargeMappedMemory(aManager, aSizeInPages, aAttributes, aCreateFlags);
       
    41 		if(self->Construct()!=KErrNone)
       
    42 			{
       
    43 			self->Close();
       
    44 			self = NULL;
       
    45 			}
       
    46 		}
       
    47 	TRACE(("DLargeMappedMemory::New() returns 0x%08x", self));
       
    48 	return self;
       
    49 	}
       
    50 
       
    51 
       
    52 DLargeMappedMemory::~DLargeMappedMemory()
       
    53 	{
       
    54 	TRACE2(("DLargeMappedMemory[0x%08x]::~DLargeMappedMemory()",this));
       
    55 	}
       
    56 
       
    57 
       
    58 DMemoryMapping* DLargeMappedMemory::CreateMapping(TUint aIndex, TUint aCount)
       
    59 	{
       
    60 	TRACE(("DLargeMappedMemory[0x%08x]::CreateMapping()",this));	
       
    61 	if (((aIndex|aCount)&(KChunkMask>>KPageShift))==0)
       
    62 		return new DLargeMapping();
       
    63 	else
       
    64 		return new DFineMapping();
       
    65 	}
       
    66 
       
    67 
       
    68 TInt DLargeMappedMemory::ClaimInitialPages(TLinAddr aBase,
       
    69 										   TUint aSize,
       
    70 										   TMappingPermissions aPermissions,
       
    71 										   TBool aAllowGaps,
       
    72 										   TBool aAllowNonRamPages)
       
    73 	{
       
    74 	TRACE(("DLargeMappedMemory[0x%08x]::ClaimInitialPages(0x%08x,0x%08x,0x%08x,%d,%d)",
       
    75 		   this,aBase,aSize,aPermissions,aAllowGaps,aAllowNonRamPages));
       
    76 	TInt r = DCoarseMemory::ClaimInitialPages(aBase,aSize,aPermissions,aAllowGaps,
       
    77 											  aAllowNonRamPages);
       
    78 	if (r != KErrNone)
       
    79 		return r;
       
    80 
       
    81 	// set initial contiguous state by checking which pages were section mapped by the bootstrap
       
    82 	MmuLock::Lock();
       
    83 	TPde* pPde = Mmu::PageDirectoryEntry(KKernelOsAsid,aBase);	
       
    84 	TUint endChunk = aSize >> KChunkShift;
       
    85 	for (TUint chunk = 0 ; chunk < endChunk ; ++chunk)
       
    86 		{	  
       
    87 		SetChunkContiguous(chunk, Mmu::PdeMapsSection(*pPde++));
       
    88 		TRACE(("  chunk %d contiguous state is %d", chunk, IsChunkContiguous(chunk)));
       
    89 		}
       
    90 	MmuLock::Unlock();
       
    91 	
       
    92 	return KErrNone;
       
    93 	}
       
    94 
       
    95 
       
    96 TInt DLargeMappedMemory::MapPages(RPageArray::TIter aPages)
       
    97 	{
       
    98 	TRACE2(("DLargeMappedMemory[0x%08x]::MapPages(?) index=0x%x count=0x%x",this,aPages.Index(),aPages.Count()));
       
    99 
       
   100 	// for now: assert pages do not overlapped a contiguous area
       
   101 	// todo: update contiguous state, update page tables and call MapPages on large mappings
       
   102 #ifdef _DEBUG
       
   103 	for (TUint index = aPages.Index() ; index < aPages.IndexEnd() ; index += KPagesInPDE)
       
   104 		{
       
   105 		MmuLock::Lock();
       
   106 		__NK_ASSERT_DEBUG(!IsChunkContiguous(index >> KPagesInPDEShift));
       
   107 		MmuLock::Unlock();
       
   108 		}
       
   109 #endif
       
   110 
       
   111 	// map pages in all page tables and fine mappings
       
   112 	return DCoarseMemory::MapPages(aPages);
       
   113 	}
       
   114 
       
   115 
       
   116 void DLargeMappedMemory::RemapPage(TPhysAddr& aPageArray, TUint aIndex, TBool aInvalidateTLB)
       
   117 	{
       
   118 	TRACE2(("DLargeMappedMemory[0x%08x]::RemapPage() index=0x%x",this, aIndex));
       
   119 
       
   120 	// update contiguous state...
       
   121 	// todo: for now we will assume that remapping a page makes it non-contiguous
       
   122 	MmuLock::Lock();
       
   123 	SetChunkContiguous(aIndex >> KPagesInPDEShift, EFalse);
       
   124 	MmuLock::Unlock();
       
   125 
       
   126 	// remap pages in all page tables and call RemapPage on large mappings...
       
   127 	MmuLock::Lock();
       
   128 	TUint pteType = 0;
       
   129 	do
       
   130 		{
       
   131 		DPageTables* tables = iPageTables[pteType];
       
   132 		if(tables)
       
   133 			{
       
   134 			tables->Open();
       
   135 			MmuLock::Unlock();
       
   136 			tables->RemapPage(aPageArray, aIndex, aInvalidateTLB);
       
   137 			tables->iMappings.RemapPage(aPageArray, aIndex, aInvalidateTLB);
       
   138 			tables->AsyncClose();
       
   139 			MmuLock::Lock();
       
   140 			}
       
   141 		}
       
   142 	while(++pteType<ENumPteTypes);
       
   143 	MmuLock::Unlock();
       
   144 
       
   145 	// remap page in all fine mappings...
       
   146 	DMemoryObject::RemapPage(aPageArray, aIndex, aInvalidateTLB);
       
   147 	}
       
   148 
       
   149 
       
   150 void DLargeMappedMemory::UnmapPages(RPageArray::TIter aPages, TBool aDecommitting)
       
   151 	{
       
   152 	TRACE2(("DLargeMappedMemory[0x%08x]::UnmapPages(?,%d) index=0x%x count=0x%x",this,(bool)aDecommitting,aPages.Index(),aPages.Count()));
       
   153 
       
   154 	// for now: assert pages do not overlapped a contiguous area
       
   155 	// todo: update contiguous state, update page tables and call UnmapPages on large mappings
       
   156 #ifdef _DEBUG
       
   157 	for (TUint index = aPages.Index() ; index < aPages.IndexEnd() ; index += KPagesInPDE)
       
   158 		{
       
   159 		MmuLock::Lock();
       
   160 		__NK_ASSERT_DEBUG(!IsChunkContiguous(index >> KPagesInPDEShift));
       
   161 		MmuLock::Unlock();
       
   162 		}
       
   163 #endif
       
   164 
       
   165 	// unmap pages in all page tables and fine mappings
       
   166 	DCoarseMemory::UnmapPages(aPages, aDecommitting);
       
   167 	}
       
   168 
       
   169 
       
   170 void DLargeMappedMemory::RestrictPages(RPageArray::TIter aPages, TRestrictPagesType aRestriction)
       
   171 	{
       
   172 	TRACE2(("DLargeMappedMemory[0x%08x]::RestrictPages(?,%d) index=0x%x count=0x%x",this,aRestriction,aPages.Index(),aPages.Count()));
       
   173 
       
   174 	// assert pages do not overlapped a contiguous area...
       
   175 #ifdef _DEBUG
       
   176 	for (TUint index = aPages.Index() ; index < aPages.IndexEnd() ; index += KPagesInPDE)
       
   177 		{
       
   178 		MmuLock::Lock();
       
   179 		__NK_ASSERT_DEBUG(!IsChunkContiguous(index >> KPagesInPDEShift));
       
   180 		MmuLock::Unlock();
       
   181 		}
       
   182 #endif
       
   183 
       
   184 	DCoarseMemory::RestrictPages(aPages, aRestriction);
       
   185 	}
       
   186 
       
   187 
       
   188 TBool DLargeMappedMemory::IsChunkContiguous(TInt aChunkIndex)
       
   189 	{
       
   190 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
       
   191 	TUint index = aChunkIndex >> 5;
       
   192 	TUint mask = 1 << (aChunkIndex & 31);
       
   193 	return (iContiguousState[index] & mask) != 0;
       
   194 	}
       
   195 
       
   196 
       
   197 void DLargeMappedMemory::SetChunkContiguous(TInt aChunkIndex, TBool aIsContiguous)
       
   198 	{
       
   199 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
       
   200 	TUint index = aChunkIndex >> 5;
       
   201 	TUint mask = 1 << (aChunkIndex & 31);
       
   202 	iContiguousState[index] = (iContiguousState[index] & ~mask) | (aIsContiguous ? mask : 0);
       
   203 	}
       
   204 
       
   205 
       
   206 //
       
   207 // DLargeMapping
       
   208 //
       
   209 
       
   210 
       
   211 DLargeMapping::DLargeMapping() : DCoarseMapping(ELargeMapping)
       
   212 	{
       
   213 	}
       
   214 
       
   215 
       
   216 TInt DLargeMapping::DoMap()
       
   217 	{
       
   218 	TRACE(("DLargeMapping[0x%08x]::DoMap()", this));
       
   219 	__NK_ASSERT_DEBUG(((iStartIndex|iSizeInPages)&(KChunkMask>>KPageShift))==0); // be extra paranoid about alignment
       
   220 
       
   221 	MmuLock::Lock();
       
   222 
       
   223 	TPde* pPde = Mmu::PageDirectoryEntry(OsAsid(),Base());
       
   224 	DLargeMappedMemory* memory = (DLargeMappedMemory*)Memory(ETrue); // safe because we're called from code which has added mapping to memory
       
   225 	
       
   226 	TUint flash = 0;
       
   227 	TUint chunk = iStartIndex >> KPagesInPDEShift;
       
   228 	TUint endChunk = (iStartIndex + iSizeInPages) >> KPagesInPDEShift;
       
   229 	
       
   230 	while(chunk < endChunk)
       
   231 		{
       
   232 		MmuLock::Flash(flash,KMaxPdesInOneGo*2);
       
   233 		TPde pde = KPdeUnallocatedEntry;
       
   234 		TPte* pt = memory->GetPageTable(PteType(), chunk);
       
   235 		if (memory->IsChunkContiguous(chunk))
       
   236 			pde = Mmu::PageToSectionEntry(pt[0],iBlankPde); // todo: use get phys addr?
       
   237 		else if (pt)
       
   238 			pde = Mmu::PageTablePhysAddr(pt)|iBlankPde;
       
   239 
       
   240 		if (pde == KPdeUnallocatedEntry)
       
   241 			{
       
   242 			TRACE2(("!PDE %x=%x (was %x)",pPde,KPdeUnallocatedEntry,*pPde));
       
   243 			__NK_ASSERT_DEBUG(*pPde==KPdeUnallocatedEntry);
       
   244 			}
       
   245 		else
       
   246 			{
       
   247 			TRACE2(("!PDE %x=%x (was %x)",pPde,pde,*pPde));
       
   248 			__NK_ASSERT_DEBUG(*pPde==KPdeUnallocatedEntry || ((*pPde^pde)&~KPdeMatchMask)==0);
       
   249 			*pPde = pde;
       
   250 			SinglePdeUpdated(pPde);
       
   251 			flash += 3; // increase flash rate because we've done quite a bit more work
       
   252 			}
       
   253 
       
   254 		++pPde;
       
   255 		++chunk;
       
   256 		}
       
   257 	MmuLock::Unlock();
       
   258 
       
   259 	return KErrNone;
       
   260 	}
       
   261 
       
   262 
       
   263 void DLargeMapping::RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB)
       
   264 	{
       
   265 	TRACE(("DLargeMapping[0x%08x]::RemapPage(%08x, %d, %d, %d)", this, aPageArray, aIndex, aMapInstanceCount, aInvalidateTLB));
       
   266 
       
   267 	TInt chunkIndex = aIndex >> KPagesInPDEShift;
       
   268 
       
   269 	MmuLock::Lock();
       
   270 	DLargeMappedMemory* memory = (DLargeMappedMemory*)Memory(); // safe because we're called from code which has reference on tables, which has reference on memory
       
   271 	TPte* pt = memory->GetPageTable(PteType(), chunkIndex);
       
   272 	
       
   273 	// check the page is still mapped and mapping isn't being detached 
       
   274 	// or hasn't been reused for another purpose...
       
   275 	if(!pt || BeingDetached() || aMapInstanceCount != MapInstanceCount())
       
   276 		{
       
   277 		// can't map pages to this mapping any more so just exit.
       
   278 		TRACE(("  page no longer mapped"));
       
   279 		MmuLock::Unlock();
       
   280 		return;
       
   281 		}
       
   282 	
       
   283 	TPde* pPde = Mmu::PageDirectoryEntry(OsAsid(),Base() + (chunkIndex << KChunkShift));
       
   284 	TPde currentPde = *pPde;
       
   285 	
       
   286 	if (!memory->IsChunkContiguous(chunkIndex) && Mmu::PdeMapsSection(currentPde))
       
   287 		{
       
   288 		// break section mapping and replace with page table...
       
   289 		TRACE2(("  breaking section mapping"));
       
   290 		TPde pde = Mmu::PageTablePhysAddr(pt)|iBlankPde;
       
   291 		TRACE2(("!PDE %x=%x (was %x)",pPde,pde,*pPde));
       
   292 		// can't assert old value if the first page has been remapped
       
   293 		__NK_ASSERT_DEBUG((aIndex & (KPagesInPDE - 1)) == 0 ||
       
   294 						  *pPde == Mmu::PageToSectionEntry(pt[0],iBlankPde));
       
   295 		*pPde = pde;
       
   296 		SinglePdeUpdated(pPde);
       
   297 		MmuLock::Unlock();
       
   298 #ifndef COARSE_GRAINED_TLB_MAINTENANCE
       
   299 		if (aInvalidateTLB) 
       
   300 			{
       
   301 			// invalidate chunk...
       
   302 			TUint start = (chunkIndex << KPagesInPDEShift) - iStartIndex;
       
   303 			TLinAddr addr = LinAddrAndOsAsid() + (start << KPageShift);
       
   304 			TLinAddr endAddr = addr + KChunkSize;
       
   305 			do InvalidateTLBForPage(addr);
       
   306 			while((addr+=KPageSize)<endAddr);
       
   307 			InvalidateTLBForPage(addr);
       
   308 			}
       
   309 #endif
       
   310 		}
       
   311 	else if (memory->IsChunkContiguous(chunkIndex) && Mmu::PdeMapsPageTable(currentPde))
       
   312 		{
       
   313 		// reform section mapping...
       
   314 		TRACE2(("  reforming section mapping"));
       
   315 		__NK_ASSERT_ALWAYS(0); // todo: not yet implemented
       
   316 		}
       
   317 	else
       
   318 		{
       
   319 		// remap already handled by page table update in DPageTables...
       
   320 		MmuLock::Unlock();
       
   321 #ifndef COARSE_GRAINED_TLB_MAINTENANCE
       
   322 		if (aInvalidateTLB)
       
   323 			{
       
   324 			// invalidate page...
       
   325 			TUint start = aIndex - iStartIndex;
       
   326 			TLinAddr addr = LinAddrAndOsAsid() + (start << KPageShift);
       
   327 			InvalidateTLBForPage(addr);
       
   328 			}
       
   329 #endif
       
   330 		}
       
   331 	
       
   332 	}
       
   333 
       
   334 
       
   335 TInt DLargeMapping::PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount)
       
   336 	{
       
   337 	TRACE(("DLargeMapping[0x%08x]::PageIn(%d, %d, ?, %d)", this, aPages.Index(), aPages.Count(), aMapInstanceCount));
       
   338 #ifdef _DEBUG
       
   339 	// assert that we're not trying to page in any section mapped pages
       
   340 	TUint startIndex = aPages.Index();
       
   341 	TUint endIndex = startIndex + aPages.Count();
       
   342 	for (TUint index = startIndex ; index < endIndex ; index += KPagesInPDE)
       
   343 		{
       
   344 		TLinAddr addr = Base() + ((index - iStartIndex) << KPageShift);
       
   345 		TRACE2(("  checking page %d at %08x", index, addr));
       
   346 		TPde* pPde = Mmu::PageDirectoryEntry(OsAsid(),addr);
       
   347 		__NK_ASSERT_DEBUG(!Mmu::PdeMapsSection(*pPde));
       
   348 		}
       
   349 #endif	
       
   350 	return DCoarseMapping::PageIn(aPages, aPinArgs, aMapInstanceCount);
       
   351 	}
       
   352 
       
   353 
       
   354 TBool DLargeMapping::MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex)
       
   355 	{
       
   356 	// this shouldn't ever be called as it's only used by ram defrag
       
   357 	__NK_ASSERT_DEBUG(EFalse);
       
   358 	return EFalse;
       
   359 	}
       
   360 
       
   361 
       
   362 TPte* DLargeMapping::FindPageTable(TLinAddr aLinAddr, TUint aMemoryIndex)
       
   363 	{
       
   364 	// this shouldn't ever be called as it's only used by ram defrag
       
   365 	__NK_ASSERT_DEBUG(EFalse);
       
   366 	return NULL;
       
   367 	}