kernel/eka/memmodel/epoc/flexible/mmu/mptalloc.cpp
changeset 0 a41df078684a
child 6 0173bcd7697c
equal deleted inserted replaced
-1:000000000000 0:a41df078684a
       
     1 // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
       
     2 // All rights reserved.
       
     3 // This component and the accompanying materials are made available
       
     4 // under the terms of the License "Eclipse Public License v1.0"
       
     5 // which accompanies this distribution, and is available
       
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
       
     7 //
       
     8 // Initial Contributors:
       
     9 // Nokia Corporation - initial contribution.
       
    10 //
       
    11 // Contributors:
       
    12 //
       
    13 // Description:
       
    14 //
       
    15 
       
    16 #include <plat_priv.h>
       
    17 #include "mm.h"
       
    18 #include "mmu.h"
       
    19 #include "mpager.h"
       
    20 
       
    21 #include "mmanager.h"
       
    22 #include "mmapping.h"
       
    23 #include "mobject.h"
       
    24 
       
    25 #include "mptalloc.h"
       
    26 #include "cache_maintenance.inl"
       
    27 
       
    28 /**
       
    29 @class PageTableAllocator
       
    30 @details
       
    31 
       
    32 NOTES
       
    33 
       
    34 Page tables are mapped into a sparse array in the virtual address range
       
    35 #KPageTableBase..#KPageTableEnd. For each present page table there is a
       
    36 corresponding #SPageTableInfo object mapped from #KPageTableInfoBase upwards.
       
    37 
       
    38 Page tables for demand paged content are kept separate from other page tables,
       
    39 this enables the memory for these to be freed when the page tables no longer map
       
    40 any memory i.e. when it has all been paged-out. Pages with these 'paged' page
       
    41 tables are stored in the demand paging live list, so it participates in the page
       
    42 aging process.
       
    43 
       
    44 The 'unpaged' page tables are allocated from the bottom of the array upwards,
       
    45 via TPtPageAllocator::iLowerAllocator; the 'paged' page tables are allocated
       
    46 from the top of the array downwards, via TPtPageAllocator::iUpperAllocator.
       
    47 These two regions are prevented from overlapping, or from coming close enough
       
    48 together so that the #SPageTableInfo struct for paged and unpaged page tables
       
    49 lie in the same page. This means that the SPageTableInfo memory for paged page
       
    50 tables can be discarded when it's page tables are discarded.
       
    51 
       
    52 Memory for page tables and page table info objects is managed by
       
    53 #ThePageTableMemoryManager. When allocating memory for demand paged use, this
       
    54 uses memory from #ThePager which will reclaim paged memory if necessary.
       
    55 Providing the live list always has #DPager::iMinYoungPages, this guarantees that
       
    56 handling page faults can never fail by running out of memory.
       
    57 
       
    58 TODO: In really pathological situations page table allocation can fail due to
       
    59 being out of virtual address space to map the table, this needs to be prevented
       
    60 from happening when handling demand paging faults.
       
    61 */
       
    62 
       
    63 
       
    64 PageTableAllocator PageTables;
       
    65 
       
    66 
       
    67 
       
    68 TBool PageTablesLockIsHeld()
       
    69 	{
       
    70 	return ::PageTables.LockIsHeld();
       
    71 	}
       
    72 
       
    73 
       
    74 /**
       
    75 Minimum number of page tables to keep in reserve.
       
    76 */
       
    77 const TUint KNumReservedPageTables = 0; // none needed - page tables for mapping page tables and infos are permanently allocated
       
    78 
       
    79 
       
    80 /**
       
    81 Manager for the memory object used to store all the MMU page tables.
       
    82 */
       
    83 class DPageTableMemoryManager : public DMemoryManager
       
    84 	{
       
    85 public:
       
    86 	/**
       
    87 	Not implemented - page table memory is never destroyed.
       
    88 	*/
       
    89 	virtual void Destruct(DMemoryObject* aMemory)
       
    90 		{}
       
    91 
       
    92 	virtual TInt StealPage(DMemoryObject* aMemory, SPageInfo* aPageInfo)
       
    93 		{ return PageTables.StealPage(aPageInfo); }
       
    94 
       
    95 	/**
       
    96 	Does nothing, returns KErrNone.
       
    97 	The RAM containing page tables does not need access restrictions applied for demand paging
       
    98 	purposes. Page table life-time is implicitly managed through the pages it maps.
       
    99 	*/
       
   100 	virtual TInt RestrictPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TRestrictPagesType aRestriction)
       
   101 		{ return KErrNone; }
       
   102 
       
   103 	/**
       
   104 	Does nothing, returns KErrNone.
       
   105 	The contents of page tables never need saving as their contents are dynamically generated.
       
   106 	*/
       
   107 	virtual TInt CleanPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TPhysAddr*& aPageArrayEntry)
       
   108 		{ return KErrNone; }
       
   109 
       
   110 	/**
       
   111 	Not implemented, returns KErrNotSupported.
       
   112 	*/
       
   113 	virtual TInt Pin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs)
       
   114 		{ return KErrNotSupported; }
       
   115 
       
   116 	/**
       
   117 	Not implemented.
       
   118 	*/
       
   119 	virtual void Unpin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs)
       
   120 		{ }
       
   121 
       
   122 
       
   123 	virtual TInt MovePage(	DMemoryObject* aMemory, SPageInfo* aOldPageInfo, 
       
   124 							TPhysAddr& aNewPage, TUint aBlockZoneId, TBool aBlockRest);
       
   125 public:
       
   126 	/**
       
   127 	Allocate a page of RAM for storing page tables in.
       
   128 
       
   129 	@param aMemory		A memory object associated with this manager.
       
   130 	@param aIndex		Page index, within the memory, to allocate the page at.
       
   131 	@param aDemandPaged	True if the memory is to be used for page tables mapping
       
   132 						demand paged content.
       
   133 
       
   134 	@return KErrNone if successful, otherwise one of the system wide error codes.
       
   135 	*/
       
   136 	TInt Alloc(DMemoryObject* aMemory, TUint aIndex, TBool aDemandPaged);
       
   137 
       
   138 	/**
       
   139 	Allocate a page of RAM being used for storing page tables in.
       
   140 
       
   141 	@param aMemory		A memory object associated with this manager.
       
   142 	@param aIndex		Page index, within the memory, to free the page from.
       
   143 	@param aDemandPaged	True if the memory is being used for page tables mapping
       
   144 						demand paged content.
       
   145 
       
   146 	@return KErrNone if successful, otherwise one of the system wide error codes.
       
   147 	*/
       
   148 	TInt Free(DMemoryObject* aMemory, TUint aIndex, TBool aDemandPaged);
       
   149 	};
       
   150 
       
   151 /**
       
   152 The single instance of the #DPageTableMemoryManager class.
       
   153 */
       
   154 DPageTableMemoryManager ThePageTableMemoryManager;
       
   155 
       
   156 
       
   157 TInt DPageTableMemoryManager::Alloc(DMemoryObject* aMemory, TUint aIndex, TBool aDemandPaged)
       
   158 	{
       
   159 	TRACE2(("DPageTableMemoryManager::Alloc(0x%08x,0x%x,%d)",aMemory, aIndex, aDemandPaged));
       
   160 	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
       
   161 
       
   162 	// allocate page array entry...
       
   163 	RPageArray::TIter pageList;
       
   164 	TPhysAddr* p = aMemory->iPages.AddPageStart(aIndex,pageList);
       
   165 	if(!p)
       
   166 		return KErrNoMemory;
       
   167 
       
   168 	// allocate RAM...
       
   169 	RamAllocLock::Lock();
       
   170 	TPhysAddr pagePhys;
       
   171 	TInt r;
       
   172 	if(aDemandPaged)
       
   173 		{
       
   174 		r = ThePager.PageInAllocPages(&pagePhys,1,aMemory->RamAllocFlags());
       
   175 		__NK_ASSERT_DEBUG(r!=KErrNoMemory);
       
   176 		}
       
   177 	else
       
   178 		{// Allocate fixed paged as page tables aren't movable.
       
   179 		r = TheMmu.AllocRam(&pagePhys, 1, aMemory->RamAllocFlags(), EPageFixed);
       
   180 		}
       
   181 	RamAllocLock::Unlock();
       
   182 
       
   183 	TUint usedNew = 0;
       
   184 	if(r==KErrNone)
       
   185 		{
       
   186 		// add RAM to page array...
       
   187 		MmuLock::Lock();
       
   188 		if(aDemandPaged)
       
   189 			ThePager.Event(DPager::EEventPagePageTableAlloc,SPageInfo::FromPhysAddr(pagePhys));
       
   190 		SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys);
       
   191 		pi->SetManaged(aMemory,aIndex,aMemory->PageInfoFlags());
       
   192 		RPageArray::AddPage(p,pagePhys);
       
   193 		MmuLock::Unlock();
       
   194 		usedNew = 1;
       
   195 
       
   196 		// map page...
       
   197 		r = aMemory->MapPages(pageList);
       
   198 		}
       
   199 
       
   200 	// release page array entry...
       
   201 	aMemory->iPages.AddPageEnd(aIndex,usedNew);
       
   202 
       
   203 	// revert if error...
       
   204 	if(r!=KErrNone)
       
   205 		Free(aMemory,aIndex,aDemandPaged);
       
   206 
       
   207 	return r;
       
   208 	}
       
   209 
       
   210 
       
   211 TInt DPageTableMemoryManager::Free(DMemoryObject* aMemory, TUint aIndex, TBool aDemandPaged)
       
   212 	{
       
   213 	TRACE2(("DPageTableMemoryManager::Free(0x%08x,0x%x,%d)",aMemory, aIndex, aDemandPaged));
       
   214 	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
       
   215 
       
   216 	// find page array entry...
       
   217 	RPageArray::TIter pageList;
       
   218 	TPhysAddr* p = aMemory->iPages.RemovePageStart(aIndex,pageList);
       
   219 	if(!p)
       
   220 		return KErrNoMemory;
       
   221 
       
   222 	// unmap page...
       
   223 	aMemory->UnmapPages(pageList,true);
       
   224 
       
   225 	RamAllocLock::Lock();
       
   226 
       
   227 	// remove page...
       
   228 	MmuLock::Lock();
       
   229 	TPhysAddr pagePhys = RPageArray::RemovePage(p);
       
   230 	MmuLock::Unlock();
       
   231 
       
   232 	TInt r;
       
   233 	if(pagePhys==KPhysAddrInvalid)
       
   234 		{
       
   235 		// no page removed...
       
   236 		r = 0;
       
   237 		}
       
   238 	else
       
   239 		{
       
   240 		// free the removed page...
       
   241 		if(aDemandPaged)
       
   242 			ThePager.PageInFreePages(&pagePhys,1);
       
   243 		else
       
   244 			TheMmu.FreeRam(&pagePhys, 1, EPageFixed);
       
   245 		r = 1;
       
   246 		}
       
   247 
       
   248 	RamAllocLock::Unlock();
       
   249 
       
   250 	// cleanup...
       
   251 	aMemory->iPages.RemovePageEnd(aIndex,r);
       
   252 	return r;
       
   253 	}
       
   254 
       
   255 TInt DPageTableMemoryManager::MovePage(	DMemoryObject* aMemory, SPageInfo* aOldPageInfo, 
       
   256 										TPhysAddr& aNewPage, TUint aBlockZoneId, TBool aBlockRest)
       
   257 		{
       
   258 		// This could be a demand paged page table info which can be discarded 
       
   259 		// but let the PageTableAllocator handle that.
       
   260 		return ::PageTables.MovePage(aMemory, aOldPageInfo, aBlockZoneId, aBlockRest);
       
   261 		}
       
   262 
       
   263 
       
   264 //
       
   265 // PageTableAllocator
       
   266 //
       
   267 
       
   268 void PageTableAllocator::Init2(DMutex* aLock)
       
   269 	{
       
   270 	TRACEB(("PageTableAllocator::Init2(0x%x)",aLock));
       
   271 	iLock = aLock;
       
   272 
       
   273 	__NK_ASSERT_DEBUG(iUnpagedAllocator.CheckFreeList());
       
   274 
       
   275 	// scan for already allocated page tables
       
   276 	// (assumes the first page table is used to map page tables)...
       
   277 	SPageTableInfo* pti = (SPageTableInfo*)KPageTableInfoBase;
       
   278 	TUint pages = 0;
       
   279 	for(;;)
       
   280 		{
       
   281 		TPte pte = ((TPte*)KPageTableBase)[pages];
       
   282 		if(pte==KPteUnallocatedEntry)
       
   283 			break; // end (assumes no gaps in page table allocation) 
       
   284 
       
   285 		// process free page tables in this page...
       
   286 		TUint freeCount = 0;
       
   287 		do
       
   288 			{
       
   289 			if(pti->IsUnused())
       
   290 				{
       
   291 				pti->PtClusterAlloc();
       
   292 				iUnpagedAllocator.iFreeList.Add(&pti->FreeLink());
       
   293 				++freeCount;
       
   294 				}
       
   295 #ifdef _DEBUG
       
   296 			else
       
   297 				__NK_ASSERT_DEBUG(pti->IsPtClusterAllocated());
       
   298 #endif
       
   299 			}
       
   300 		while(!(++pti)->IsFirstInPage());
       
   301 		iUnpagedAllocator.iFreeCount += freeCount;
       
   302 		__NK_ASSERT_DEBUG(iUnpagedAllocator.CheckFreeList());
       
   303 		TRACE2(("PT page 0x%08x has %d free tables",pti[-KPtClusterSize].PageTable(),freeCount));
       
   304 
       
   305 		// count page, and move on to next one...
       
   306 		++pages;
       
   307 		__NK_ASSERT_DEBUG(pages<KChunkSize/KPageSize); // we've assumed less than one page table of page tables
       
   308 		}
       
   309 
       
   310 	// construct allocator for page table pages...
       
   311 	iPtPageAllocator.Init2(pages);
       
   312 
       
   313 	// initialise allocator page table infos...
       
   314 	iPageTableGroupCounts[0] = pages;
       
   315 	__NK_ASSERT_DEBUG(pages/KPageTableGroupSize==0); // we've assumed less than 1 page of page table infos
       
   316 
       
   317 	// FOLLOWING CODE WILL USE THIS OBJECT TO ALLOCATE SOME PAGE TABLES,
       
   318 	// SO ALLOCATOR MUST BE INITIALISED TO A FIT STATE BEFORE THIS POINT!
       
   319 
       
   320 	// construct memory object for page tables...
       
   321 	TMappingCreateFlags mapFlags = (TMappingCreateFlags)(EMappingCreateFixedVirtual|EMappingCreateReserveAllResources);
       
   322 #if defined(__CPU_PAGE_TABLES_FULLY_CACHED)
       
   323 	TMemoryAttributes memAttr = EMemoryAttributeStandard;
       
   324 #else
       
   325 	TMemoryAttributes memAttr = (TMemoryAttributes)(EMemoryAttributeNormalUncached|EMemoryAttributeDefaultShareable);
       
   326 #endif
       
   327 	TMemoryCreateFlags createFlags = (TMemoryCreateFlags)(EMemoryCreateNoWipe|EMemoryCreateCustomManager);
       
   328 	TInt r = MM::InitFixedKernelMemory(iPageTableMemory, KPageTableBase, KPageTableEnd, pages<<KPageShift, (TMemoryObjectType)(T_UintPtr)&ThePageTableMemoryManager, createFlags, memAttr, mapFlags);
       
   329 	__NK_ASSERT_ALWAYS(r==KErrNone);
       
   330 	MM::MemorySetLock(iPageTableMemory,aLock);
       
   331 
       
   332 	// construct memory object for page table infos...
       
   333 	memAttr = EMemoryAttributeStandard;
       
   334 	TUint size = pages*KPtClusterSize*sizeof(SPageTableInfo);
       
   335 	size = (size+KPageMask)&~KPageMask;
       
   336 	r = MM::InitFixedKernelMemory(iPageTableInfoMemory, KPageTableInfoBase, KPageTableInfoEnd, size, (TMemoryObjectType)(T_UintPtr)&ThePageTableMemoryManager, createFlags, memAttr, mapFlags);
       
   337 	__NK_ASSERT_ALWAYS(r==KErrNone);
       
   338 	MM::MemorySetLock(iPageTableInfoMemory,aLock);
       
   339 
       
   340 	// make sure we have enough reserve page tables...
       
   341 	Lock();
       
   342 	iUnpagedAllocator.Init2(this,KNumReservedPageTables,false);
       
   343 	iPagedAllocator.Init2(this,0,true);
       
   344 	Unlock();
       
   345 
       
   346 	TRACEB(("PageTableAllocator::Init2 done"));
       
   347 	}
       
   348 
       
   349 
       
   350 void PageTableAllocator::Init2B()
       
   351 	{
       
   352 	TRACEB(("PageTableAllocator::Init2B()"));
       
   353 	TInt r = iPageTableMemory->iPages.PreallocateMemory();
       
   354 	__NK_ASSERT_ALWAYS(r==KErrNone);
       
   355 	r = iPageTableInfoMemory->iPages.PreallocateMemory();
       
   356 	__NK_ASSERT_ALWAYS(r==KErrNone);
       
   357 	TRACEB(("PageTableAllocator::Init2B done"));
       
   358 	}
       
   359 
       
   360 
       
   361 void PageTableAllocator::TSubAllocator::Init2(PageTableAllocator* aAllocator, TUint aReserveCount, TBool aDemandPaged)
       
   362 	{
       
   363 	iReserveCount = aReserveCount;
       
   364 	iDemandPaged = aDemandPaged;
       
   365 	while(iFreeCount<aReserveCount)
       
   366 		if(!aAllocator->AllocReserve(*this))
       
   367 			{
       
   368 			__NK_ASSERT_ALWAYS(0);
       
   369 			}
       
   370 	}
       
   371 
       
   372 
       
   373 void PageTableAllocator::TPtPageAllocator::Init2(TUint aNumInitPages)
       
   374 	{
       
   375 	iLowerAllocator = TBitMapAllocator::New(KMaxPageTablePages,ETrue);
       
   376 	__NK_ASSERT_ALWAYS(iLowerAllocator);
       
   377 	iLowerAllocator->Alloc(0,aNumInitPages);
       
   378 	iLowerWaterMark = aNumInitPages-1;
       
   379 
       
   380 	iUpperAllocator = TBitMapAllocator::New(KMaxPageTablePages,ETrue);
       
   381 	__NK_ASSERT_ALWAYS(iUpperAllocator);
       
   382 	iUpperWaterMark = KMaxPageTablePages;
       
   383 	}
       
   384 
       
   385 
       
   386 TInt PageTableAllocator::TPtPageAllocator::Alloc(TBool aDemandPaged)
       
   387 	{
       
   388 	__NK_ASSERT_DEBUG(PageTablesLockIsHeld());
       
   389 	TUint pageIndex;
       
   390 	if(aDemandPaged)
       
   391 		{
       
   392 		TInt bit = iUpperAllocator->Alloc();
       
   393 		if(bit<0)
       
   394 			return bit;
       
   395 		pageIndex = KMaxPageTablePages-1-bit;
       
   396 		if(pageIndex<iUpperWaterMark)
       
   397 			{
       
   398 			// new upper watermark...
       
   399 			if((pageIndex&~(KPageTableGroupSize-1))<=iLowerWaterMark)
       
   400 				{
       
   401 				// clashes with other bitmap allocator, so fail..
       
   402 				iUpperAllocator->Free(bit);
       
   403 				return -1;
       
   404 				}
       
   405 			iUpperWaterMark = pageIndex;
       
   406 			TRACE(("TPtPageAllocator::Alloc new iUpperWaterMark=%d",pageIndex));
       
   407 			}
       
   408 		}
       
   409 	else
       
   410 		{
       
   411 		TInt bit = iLowerAllocator->Alloc();
       
   412 		if(bit<0)
       
   413 			return bit;
       
   414 		pageIndex = bit;
       
   415 		if(pageIndex>iLowerWaterMark)
       
   416 			{
       
   417 			// new upper watermark...
       
   418 			if(pageIndex>=(iUpperWaterMark&~(KPageTableGroupSize-1)))
       
   419 				{
       
   420 				// clashes with other bitmap allocator, so fail..
       
   421 				iLowerAllocator->Free(bit);
       
   422 				return -1;
       
   423 				}
       
   424 			iLowerWaterMark = pageIndex;
       
   425 			TRACE(("TPtPageAllocator::Alloc new iLowerWaterMark=%d",pageIndex));
       
   426 			}
       
   427 		}
       
   428 	return pageIndex;
       
   429 	}
       
   430 
       
   431 
       
   432 void PageTableAllocator::TPtPageAllocator::Free(TUint aPageIndex, TBool aDemandPaged)
       
   433 	{
       
   434 	__NK_ASSERT_DEBUG(PageTablesLockIsHeld());
       
   435 	if(aDemandPaged)
       
   436 		iUpperAllocator->Free(KMaxPageTablePages-1-aPageIndex);
       
   437 	else
       
   438 		iLowerAllocator->Free(aPageIndex);
       
   439 	}
       
   440 
       
   441 
       
   442 void PageTableAllocator::Lock()
       
   443 	{
       
   444 	Kern::MutexWait(*iLock);
       
   445 	}
       
   446 
       
   447 
       
   448 void PageTableAllocator::Unlock()
       
   449 	{
       
   450 	Kern::MutexSignal(*iLock);
       
   451 	}
       
   452 
       
   453 
       
   454 TBool PageTableAllocator::LockIsHeld()
       
   455 	{
       
   456 	return iLock->iCleanup.iThread == &Kern::CurrentThread();
       
   457 	}
       
   458 
       
   459 
       
   460 TBool PageTableAllocator::AllocReserve(TSubAllocator& aSubAllocator)
       
   461 	{
       
   462 	__NK_ASSERT_DEBUG(LockIsHeld());
       
   463 
       
   464 	// allocate page...
       
   465 	TInt ptPageIndex = iPtPageAllocator.Alloc(aSubAllocator.iDemandPaged);
       
   466 	if(ptPageIndex<0)
       
   467 		return false;
       
   468 
       
   469 	// commit memory for page...
       
   470 	__NK_ASSERT_DEBUG(iPageTableMemory); // check we've initialised iPageTableMemory
       
   471 	TInt r = ThePageTableMemoryManager.Alloc(iPageTableMemory,ptPageIndex,aSubAllocator.iDemandPaged);
       
   472 	if(r==KErrNoMemory)
       
   473 		{
       
   474 		iPtPageAllocator.Free(ptPageIndex,aSubAllocator.iDemandPaged);
       
   475 		return false;
       
   476 		}
       
   477 	__NK_ASSERT_DEBUG(r==KErrNone);
       
   478 
       
   479 	// allocate page table info...
       
   480 	TUint ptgIndex = ptPageIndex/KPageTableGroupSize;
       
   481 	if(!iPageTableGroupCounts[ptgIndex])
       
   482 		{
       
   483 		__NK_ASSERT_DEBUG(iPageTableInfoMemory); // check we've initialised iPageTableInfoMemory
       
   484 		r = ThePageTableMemoryManager.Alloc(iPageTableInfoMemory,ptgIndex,aSubAllocator.iDemandPaged);
       
   485 
       
   486 		if(r==KErrNoMemory)
       
   487 			{
       
   488 			r = ThePageTableMemoryManager.Free(iPageTableMemory,ptPageIndex,aSubAllocator.iDemandPaged);
       
   489 			__NK_ASSERT_DEBUG(r==1);
       
   490 			iPtPageAllocator.Free(ptPageIndex,aSubAllocator.iDemandPaged);
       
   491 			return false;
       
   492 			}
       
   493 		__NK_ASSERT_DEBUG(r==KErrNone);
       
   494 		// For paged page tables set all the page table infos in this page as unused 
       
   495 		// and their page table clusters as not allocated.
       
   496 		if (aSubAllocator.iDemandPaged)
       
   497 			{
       
   498 			SPageTableInfo* ptiBase = (SPageTableInfo*)KPageTableInfoBase + (ptgIndex*KPageTableInfosPerPage);
       
   499 			memclr(ptiBase, KPageSize);
       
   500 			}
       
   501 		}
       
   502 	++iPageTableGroupCounts[ptgIndex];
       
   503 
       
   504 	SPageTableInfo* pti = (SPageTableInfo*)KPageTableInfoBase+ptPageIndex*KPtClusterSize;
       
   505 	aSubAllocator.AllocPage(pti);
       
   506 	return true;
       
   507 	}
       
   508 
       
   509 
       
   510 void PageTableAllocator::TSubAllocator::AllocPage(SPageTableInfo* aPageTableInfo)
       
   511 	{
       
   512 	SPageTableInfo* pti = aPageTableInfo;
       
   513 	__NK_ASSERT_DEBUG(pti->IsFirstInPage());
       
   514 
       
   515 	TRACE2(("Alloc PT page (%d) 0x%08x",iDemandPaged,pti->PageTable()));
       
   516 
       
   517 	// initialise page table infos...
       
   518 	do pti->New(iDemandPaged);
       
   519 	while(!(++pti)->IsFirstInPage());
       
   520 	pti -= KPtClusterSize;
       
   521 
       
   522 	// all page tables initially unused, so start them off on iCleanupList...
       
   523 	pti->AddToCleanupList(iCleanupList);
       
   524 	iFreeCount += KPtClusterSize;
       
   525 	__NK_ASSERT_DEBUG(CheckFreeList());
       
   526 	}
       
   527 
       
   528 
       
   529 SPageTableInfo* PageTableAllocator::TSubAllocator::FreePage()
       
   530 	{
       
   531 	if(!IsCleanupRequired())
       
   532 		return 0;
       
   533 
       
   534 	// get a completely free page...
       
   535 	SDblQueLink* link = iCleanupList.Last();
       
   536 	__NK_ASSERT_DEBUG(link);
       
   537 	SPageTableInfo* pti = SPageTableInfo::FromFreeLink(link);
       
   538 	__NK_ASSERT_DEBUG(pti->IsFirstInPage());
       
   539 	pti->RemoveFromCleanupList();
       
   540 	iFreeCount -= KPtClusterSize;
       
   541 	__NK_ASSERT_DEBUG(CheckFreeList());
       
   542 
       
   543 	TRACE2(("Free PT page (%d) 0x%08x",iDemandPaged,pti->PageTable()));
       
   544 
       
   545 	// Mark each page table info as no longer having its page table cluster allocated.
       
   546 	do 
       
   547 		{// make sure all page tables in page are unused...
       
   548 		__NK_ASSERT_DEBUG(pti->IsUnused());
       
   549 		pti->PtClusterFreed();
       
   550 		}
       
   551 	while(!(++pti)->IsFirstInPage());
       
   552 	pti -= KPtClusterSize;
       
   553 
       
   554 	return pti;
       
   555 	}
       
   556 
       
   557 
       
   558 TBool PageTableAllocator::FreeReserve(TSubAllocator& aSubAllocator)
       
   559 	{
       
   560 	__NK_ASSERT_DEBUG(LockIsHeld());
       
   561 
       
   562 	// get a page which needs freeing...
       
   563 	SPageTableInfo* pti = aSubAllocator.FreePage();
       
   564 	if(!pti)
       
   565 		return false;
       
   566 
       
   567 	// free the page...
       
   568 	TUint ptPageIndex = ((TLinAddr)pti-KPageTableInfoBase)>>(KPageTableInfoShift+KPtClusterShift);
       
   569 	iPtPageAllocator.Free(ptPageIndex,aSubAllocator.iDemandPaged);
       
   570 	TInt r = ThePageTableMemoryManager.Free(iPageTableMemory,ptPageIndex,aSubAllocator.iDemandPaged);
       
   571 	(void)r;
       
   572 	__NK_ASSERT_DEBUG(r==1);
       
   573 
       
   574 	// free page table info...
       
   575 	TUint ptgIndex = ptPageIndex/KPageTableGroupSize;
       
   576 	TUint groupCount = iPageTableGroupCounts[ptgIndex]; // compiler handles half-word values stupidly, so give it a hand
       
   577 	--groupCount;
       
   578 	iPageTableGroupCounts[ptgIndex] = groupCount;
       
   579 	if(!groupCount)
       
   580 		r = ThePageTableMemoryManager.Free(iPageTableInfoMemory,ptgIndex,aSubAllocator.iDemandPaged);
       
   581 
       
   582 	return true;
       
   583 	}
       
   584 
       
   585 
       
   586 TPte* PageTableAllocator::Alloc(TBool aDemandPaged)
       
   587 	{
       
   588 	TRACE(("PageTableAllocator::Alloc(%d)",(bool)aDemandPaged));
       
   589 	TPte* pt = DoAlloc(aDemandPaged);
       
   590 	TRACE(("PageTableAllocator::Alloc() returns 0x%08x phys=0x%08x",pt,pt?Mmu::PageTablePhysAddr(pt):KPhysAddrInvalid));
       
   591 	return pt;
       
   592 	}
       
   593 
       
   594 
       
   595 TPte* PageTableAllocator::DoAlloc(TBool aDemandPaged)
       
   596 	{
       
   597 	__NK_ASSERT_DEBUG(LockIsHeld());
       
   598 
       
   599 #ifdef _DEBUG
       
   600 	// simulated OOM, but not if demand paged as this can't fail under normal circumstances...
       
   601 	if(!aDemandPaged)
       
   602 		{
       
   603 		RamAllocLock::Lock();
       
   604 		TBool fail = K::CheckForSimulatedAllocFail();
       
   605 		RamAllocLock::Unlock();
       
   606 		if(fail)
       
   607 			return 0;
       
   608 		}
       
   609 #endif
       
   610 
       
   611 	TSubAllocator& allocator = aDemandPaged ? iPagedAllocator : iUnpagedAllocator;
       
   612 
       
   613 	__NK_ASSERT_DEBUG(!iAllocating || !aDemandPaged); // can't recursively allocate demand paged tables
       
   614 
       
   615 	__NK_ASSERT_DEBUG(iAllocating<=allocator.iReserveCount); // can't recursively allocate more than the reserve
       
   616 
       
   617 	// keep up enough spare page tables...
       
   618 	if(!iAllocating++) // if we haven't gone recursive...
       
   619 		{
       
   620 		// make sure we have a page table to allocate...
       
   621 		while(allocator.iFreeCount<=allocator.iReserveCount)
       
   622 			if(!AllocReserve(allocator))
       
   623 				{
       
   624 				--iAllocating;
       
   625 				return 0; // out of memory
       
   626 				}
       
   627 		}
       
   628 	else
       
   629 		{
       
   630 		TRACE(("PageTableAllocator::DoAlloc recurse=%d",iAllocating));
       
   631 		}
       
   632 
       
   633 	// allocate a page table...
       
   634 	SPageTableInfo* pti = allocator.Alloc();
       
   635 
       
   636 	// initialise page table info...
       
   637 	pti->Init();
       
   638 
       
   639 	// initialise page table...
       
   640 	TPte* pt = pti->PageTable();
       
   641 	memclr(pt,KPageTableSize);
       
   642 	CacheMaintenance::MultiplePtesUpdated((TLinAddr)pt,KPageTableSize);
       
   643 
       
   644 	// done...
       
   645 	--iAllocating;
       
   646 	return pt;
       
   647 	}
       
   648 
       
   649 
       
   650 SPageTableInfo* PageTableAllocator::TSubAllocator::Alloc()
       
   651 	{
       
   652 	__NK_ASSERT_DEBUG(PageTablesLockIsHeld());
       
   653 	__NK_ASSERT_DEBUG(iFreeCount);
       
   654 	__NK_ASSERT_DEBUG(CheckFreeList());
       
   655 
       
   656 	// get next free page table...
       
   657 	SDblQueLink* link = iFreeList.GetFirst();
       
   658 	SPageTableInfo* pti; 
       
   659 	if(link)
       
   660 		pti = SPageTableInfo::FromFreeLink(link);
       
   661 	else
       
   662 		{
       
   663 		// need to get one back from the cleanup list...
       
   664 		link = iCleanupList.First();
       
   665 		__NK_ASSERT_DEBUG(link); // we can't be out of page tables
       
   666 		pti = SPageTableInfo::FromFreeLink(link);
       
   667 		__NK_ASSERT_DEBUG(pti->IsFirstInPage());
       
   668 		pti->RemoveFromCleanupList();
       
   669 
       
   670 		// add other page tables in the page to the free list...
       
   671 		SPageTableInfo* free = pti+1;
       
   672 		while(!free->IsFirstInPage())
       
   673 			{
       
   674 			__NK_ASSERT_DEBUG(free->IsUnused());
       
   675 			iFreeList.Add(&free->FreeLink());
       
   676 			++free;
       
   677 			}
       
   678 		}
       
   679 
       
   680 	// count page as allocated...
       
   681 	--iFreeCount;
       
   682 	__NK_ASSERT_DEBUG(pti->IsUnused());
       
   683 	__NK_ASSERT_DEBUG(CheckFreeList());
       
   684 
       
   685 	return pti;
       
   686 	}
       
   687 
       
   688 
       
   689 void PageTableAllocator::Free(TPte* aPageTable)
       
   690 	{
       
   691 	TRACE(("PageTableAllocator::Free(0x%08x)",aPageTable));
       
   692 	DoFree(aPageTable);
       
   693 	}
       
   694 
       
   695 
       
   696 void PageTableAllocator::DoFree(TPte* aPageTable)
       
   697 	{
       
   698 	__NK_ASSERT_DEBUG(LockIsHeld());
       
   699 
       
   700 	// make sure page table isn't being aliased...
       
   701 	TPhysAddr pagePhys = Mmu::PageTablePhysAddr(aPageTable);
       
   702 	__NK_ASSERT_DEBUG(pagePhys!=KPhysAddrInvalid);
       
   703 	TheMmu.RemoveAliasesForPageTable(pagePhys);
       
   704 
       
   705 	// free page table...
       
   706 	SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPageTable);
       
   707 	TSubAllocator& allocator = pti->IsDemandPaged() ? iPagedAllocator : iUnpagedAllocator;
       
   708 	allocator.Free(pti);
       
   709 
       
   710 	// check for surplus pages...
       
   711 	if(allocator.IsCleanupRequired())
       
   712 		{
       
   713 		iCleanup.Add(CleanupTrampoline,this);
       
   714 		}
       
   715 	}
       
   716 
       
   717 
       
   718 void PageTableAllocator::TSubAllocator::Free(SPageTableInfo* aPageTableInfo)
       
   719 	{
       
   720 	__NK_ASSERT_DEBUG(PageTablesLockIsHeld());
       
   721 	__NK_ASSERT_DEBUG(CheckFreeList());
       
   722 
       
   723 	SPageTableInfo* pti = aPageTableInfo;
       
   724 
       
   725 	// clear the page table info...
       
   726 	MmuLock::Lock();
       
   727 	__NK_ASSERT_DEBUG(!pti->PermanenceCount());
       
   728 	pti->SetUnused();
       
   729 	MmuLock::Unlock();
       
   730 
       
   731 	// scan other page tables in same page...
       
   732 	SPageTableInfo* first = pti->FirstInPage();
       
   733 	SPageTableInfo* last = pti->LastInPage();
       
   734 	SPageTableInfo* prev;
       
   735 	SPageTableInfo* next;
       
   736 
       
   737 	// try insert page table after previous free page table in same page...
       
   738 	prev = pti;
       
   739 	while(prev>first)
       
   740 		{
       
   741 		--prev;
       
   742 		if(prev->IsUnused())
       
   743 			{
       
   744 			pti->FreeLink().InsertAfter(&prev->FreeLink());
       
   745 			goto inserted;
       
   746 			}
       
   747 		}
       
   748 
       
   749 	// try insert page table before next free page table in same page...
       
   750 	next = pti;
       
   751 	while(next<last)
       
   752 		{
       
   753 		++next;
       
   754 		if(next->IsUnused())
       
   755 			{
       
   756 			pti->FreeLink().InsertBefore(&next->FreeLink());
       
   757 			goto inserted;
       
   758 			}
       
   759 		}
       
   760 
       
   761 	// only free page table in page, so link into start of free list...
       
   762 	pti->FreeLink().InsertAfter(&iFreeList.iA);
       
   763 
       
   764 inserted:
       
   765 	++iFreeCount;
       
   766 	__NK_ASSERT_DEBUG(CheckFreeList());
       
   767 
       
   768 	// see if all page tables in page are empty...
       
   769 	pti = first;
       
   770 	do
       
   771 		{
       
   772 		if(!pti->IsUnused())
       
   773 			return; // some page tables still in use, so end
       
   774 		}
       
   775 	while(!(++pti)->IsFirstInPage());
       
   776 	pti -= KPtClusterSize;
       
   777 
       
   778 	// check if page with page table in is pinned...
       
   779 	MmuLock::Lock();
       
   780 	TPte* pt = pti->PageTable();
       
   781 	TPhysAddr pagePhys = Mmu::PageTablePhysAddr(pt);
       
   782 	SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys);
       
   783 	TBool pinned = pi->PagedState()==SPageInfo::EPagedPinned;
       
   784 	MmuLock::Unlock();
       
   785 	// Note, the pinned state can't change even though we've now released the MmuLock.
       
   786 	// This is because all page tables in the page are unused and we don't pin unused
       
   787 	// page tables. Also, the page table(s) can't become used again whilst this function
       
   788 	// executes as we hold the page table allocator lock.
       
   789 	if(pinned)
       
   790 		{
       
   791 		// return now and leave page table(s) in free list if their page is pinned...
       
   792 		// Note, when page is unpinned it will end up in the paging live list and
       
   793 		// eventually be reclaimed for other use (if the page tables in the page
       
   794 		// don't get reallocated before then).
       
   795 		__NK_ASSERT_DEBUG(pti->IsDemandPaged()); // only paged page tables should have been pinned
       
   796 		return; 
       
   797 		}
       
   798 
       
   799 	// the page with our page table in it is no longer in use...
       
   800 	MoveToCleanup(pti);
       
   801 	}
       
   802 
       
   803 
       
   804 void PageTableAllocator::TSubAllocator::MoveToCleanup(SPageTableInfo* aPageTableInfo)
       
   805 	{
       
   806 	__NK_ASSERT_DEBUG(PageTablesLockIsHeld());
       
   807 	__NK_ASSERT_DEBUG(CheckFreeList());
       
   808 
       
   809 	SPageTableInfo* pti = aPageTableInfo;
       
   810 	__NK_ASSERT_DEBUG(pti->IsFirstInPage());
       
   811 
       
   812 	TRACE2(("Cleanup PT page (%d) 0x%08x",iDemandPaged,pti->PageTable()));
       
   813 
       
   814 	// make sure all page tables in page are unused...
       
   815 #ifdef _DEBUG
       
   816 	do __NK_ASSERT_DEBUG(pti->IsUnused());
       
   817 	while(!(++pti)->IsFirstInPage());
       
   818 	pti -= KPtClusterSize;
       
   819 #endif
       
   820 
       
   821 	// unlink all page tables in page...
       
   822 	SDblQueLink* prev = pti->FreeLink().iPrev;
       
   823 	SDblQueLink* next = pti->LastInPage()->FreeLink().iNext;
       
   824 	prev->iNext = next;
       
   825 	next->iPrev = prev;
       
   826 
       
   827 	// add page tables to cleanup list...
       
   828 	__NK_ASSERT_DEBUG(!pti->IsOnCleanupList());
       
   829 	pti->AddToCleanupList(iCleanupList);
       
   830 	__NK_ASSERT_DEBUG(CheckFreeList());
       
   831 	}
       
   832 
       
   833 
       
   834 
       
   835 TBool PageTableAllocator::TSubAllocator::IsCleanupRequired()
       
   836 	{
       
   837 	return iFreeCount>=iReserveCount+KPtClusterSize && !iCleanupList.IsEmpty();
       
   838 	}
       
   839 
       
   840 
       
   841 #ifdef _DEBUG
       
   842 
       
   843 TBool PageTableAllocator::TSubAllocator::CheckFreeList()
       
   844 	{
       
   845 	TUint count = iFreeCount;
       
   846 
       
   847 	// count page tables in iCleanupList...
       
   848 	SDblQueLink* head = &iCleanupList.iA;
       
   849 	SDblQueLink* link = head;
       
   850 	for(;;)
       
   851 		{
       
   852 		link = link->iNext;
       
   853 		if(link==head)
       
   854 			break;
       
   855 		SPageTableInfo* pti = SPageTableInfo::FromFreeLink(link);
       
   856 		__NK_ASSERT_DEBUG(pti->IsFirstInPage());
       
   857 		__NK_ASSERT_DEBUG(pti->IsOnCleanupList());
       
   858 		if(count<(TUint)KPtClusterSize)
       
   859 			return false;
       
   860 		count -= KPtClusterSize;
       
   861 		}
       
   862 
       
   863 	// count page tables in iFreeList...	
       
   864 	head = &iFreeList.iA;
       
   865 	link = head;
       
   866 	while(count)
       
   867 		{
       
   868 		link = link->iNext;
       
   869 		if(link==head)
       
   870 			return false;
       
   871 
       
   872 		// check next free page table in page is linked in correct order...
       
   873 		SPageTableInfo* pti = SPageTableInfo::FromFreeLink(link);
       
   874 		SPageTableInfo* last = pti->LastInPage();
       
   875 		SPageTableInfo* next = pti;
       
   876 		while(next<last)
       
   877 			{
       
   878 			++next;
       
   879 			if(next->IsUnused())
       
   880 				{
       
   881 				__NK_ASSERT_DEBUG(pti->FreeLink().iNext==&next->FreeLink());
       
   882 				__NK_ASSERT_DEBUG(next->FreeLink().iPrev==&pti->FreeLink());
       
   883 				break;
       
   884 				}
       
   885 			}
       
   886 
       
   887 		--count;
       
   888 		}
       
   889 
       
   890 	return link->iNext==head;
       
   891 	}
       
   892 
       
   893 #endif
       
   894 
       
   895 
       
   896 
       
   897 //
       
   898 // Paged page table handling
       
   899 //
       
   900 
       
   901 TInt SPageTableInfo::ForcedFree()
       
   902 	{
       
   903 	__NK_ASSERT_DEBUG(PageTablesLockIsHeld());
       
   904 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
       
   905 	__NK_ASSERT_DEBUG(IsDemandPaged());
       
   906 
       
   907 	TUint type = iType;
       
   908 
       
   909 	if(type==EUnused)
       
   910 		return KErrNone;
       
   911 
       
   912 	__NK_ASSERT_DEBUG(iPermanenceCount==0);
       
   913 
       
   914 	// clear all PTEs in page table...
       
   915 	TPte* pt = PageTable();
       
   916 	memclr(pt,KPageTableSize);
       
   917 	__e32_memory_barrier(); // make sure all CPUs read zeros from pt so forcing a page-in (rather than a rejuvenate) if accessed
       
   918 	iPageCount = 0;
       
   919 
       
   920 	if(type==ECoarseMapping)
       
   921 		{
       
   922 		TRACE2(("SPageTableInfo::ForcedFree() coarse 0x%08x 0x%x %d",iCoarse.iMemoryObject,iCoarse.iChunkIndex,iCoarse.iPteType));
       
   923 		// mustn't release MmuLock between clearing page table and calling this
       
   924 		// (otherwise page table may get updated before its actually removed from
       
   925 		// the memory object)...
       
   926 		iCoarse.iMemoryObject->StealPageTable(iCoarse.iChunkIndex,iCoarse.iPteType);
       
   927 		}
       
   928 	else if(type==EFineMapping)
       
   929 		{
       
   930 		// need to remove page table from address spaces's page directory...
       
   931 		TLinAddr addr = iFine.iLinAddrAndOsAsid;
       
   932 		TUint osAsid = addr&KPageMask;
       
   933 		TPde* pPde = Mmu::PageDirectoryEntry(osAsid,addr);
       
   934 
       
   935 		TRACE2(("SPageTableInfo::ForcedFree() fine %d 0x%08x",osAsid,addr&~KPageMask));
       
   936 
       
   937 		TPde pde = KPdeUnallocatedEntry;
       
   938 		TRACE2(("!PDE %x=%x",pPde,pde));
       
   939 		*pPde = pde;
       
   940 		SinglePdeUpdated(pPde);
       
   941 		}
       
   942 	else
       
   943 		{
       
   944 		// invalid type...
       
   945 		__NK_ASSERT_DEBUG(0);
       
   946 		return KErrNotSupported;
       
   947 		}
       
   948 
       
   949 	MmuLock::Unlock();
       
   950 
       
   951 	// make sure page table updates visible to MMU...
       
   952 	CacheMaintenance::MultiplePtesUpdated((TLinAddr)pt,KPageTableSize);
       
   953 	InvalidateTLB();
       
   954 
       
   955 	// free the page table back to the allocator,
       
   956 	// this will also remove any IPC alias using it...
       
   957 	__NK_ASSERT_DEBUG(iPageCount==0); // should still be unused
       
   958 	::PageTables.Free(pt);
       
   959 
       
   960 	MmuLock::Lock();
       
   961 
       
   962 	return KErrNone;
       
   963 	}
       
   964 
       
   965 
       
   966 TInt PageTableAllocator::StealPage(SPageInfo* aPageInfo)
       
   967 	{
       
   968 	TRACE2(("PageTableAllocator::StealPage(0x%08x)",aPageInfo));
       
   969 	__NK_ASSERT_DEBUG(LockIsHeld()); // only works if PageTableAllocator lock is the RamAllocLock
       
   970 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
       
   971 
       
   972 	if (aPageInfo->Owner() == iPageTableInfoMemory)
       
   973 		return StealPageTableInfo(aPageInfo);
       
   974 
       
   975 	__UNLOCK_GUARD_START(MmuLock);
       
   976 
       
   977 	// This must be a page table page so steal it.
       
   978 	__NK_ASSERT_ALWAYS(aPageInfo->Owner()==iPageTableMemory);
       
   979 	TUint ptPageIndex = aPageInfo->Index();
       
   980 	SPageTableInfo* pti = (SPageTableInfo*)KPageTableInfoBase+ptPageIndex*KPtClusterSize;
       
   981 
       
   982 	aPageInfo->SetModifier(&pti);
       
   983 	__UNLOCK_GUARD_END(MmuLock);
       
   984 
       
   985 	// forcibly free each page table in the page...
       
   986 	TInt r;
       
   987 	do
       
   988 		{// Check for pinning, ForcedFree() releases MmuLock so must check for
       
   989 		// each page table.
       
   990 		if (aPageInfo->PagedState() == SPageInfo::EPagedPinned)
       
   991 			{// The page table page is pinned so can't steal it.
       
   992 			r = KErrInUse;
       
   993 			break;
       
   994 			}
       
   995 		r = pti->ForcedFree();
       
   996 		if(r!=KErrNone)
       
   997 			break;
       
   998 		if(aPageInfo->CheckModified(&pti))
       
   999 			{
       
  1000 			r = KErrInUse;
       
  1001 			break;
       
  1002 			}
       
  1003 		}
       
  1004 	while(!(++pti)->IsFirstInPage());
       
  1005 	pti -= KPtClusterSize; // restore pti back to first page table
       
  1006 
       
  1007 	if(r==KErrNone)
       
  1008 		{
       
  1009 		MmuLock::Unlock();
       
  1010 		if(!pti->IsOnCleanupList())
       
  1011 			{
       
  1012 			// the page might not already be on the cleanup list in the case where
       
  1013 			// it was previously freed whilst it was pinned.
       
  1014 			// In this case, a later unpinning would have put it back into the paging live
       
  1015 			// list from where it is now subsequently being stolen...
       
  1016 			iPagedAllocator.MoveToCleanup(pti);
       
  1017 			}
       
  1018 		// free the page from allocator so it ends up back in the paging pool as a free page...
       
  1019 		while(FreeReserve(iPagedAllocator))
       
  1020 			{}
       
  1021 		// return an 'error' to indicate page has not been stolen.
       
  1022 		// We have however achieved the main aim of making the page 'free' and
       
  1023 		// it will be available if page stealing attempts to steal the page again...
       
  1024 		r = KErrCompletion;
       
  1025 		MmuLock::Lock();
       
  1026 		}
       
  1027 
       
  1028 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
       
  1029 	TRACE2(("PageTableAllocator::StealPage returns %d",r));
       
  1030 	return r;
       
  1031 	}
       
  1032 
       
  1033 
       
  1034 TInt PageTableAllocator::StealPageTableInfo(SPageInfo* aPageInfo)
       
  1035 	{
       
  1036 	// Need to steal every page table for every page table info in this page.
       
  1037 	// This page can't be modified or removed as we hold the lock, however
       
  1038 	// the page table pages being freed may be rejuvenated and therefore their 
       
  1039 	// SPageInfos may be marked as modified.
       
  1040 	TInt r = KErrNone;
       
  1041 	TUint ptiOffset = aPageInfo->Index() * KPageTableInfosPerPage;
       
  1042 	SPageTableInfo* ptiBase = (SPageTableInfo*)KPageTableInfoBase + ptiOffset;
       
  1043 	SPageTableInfo* ptiEnd = ptiBase + KPageTableInfosPerPage;
       
  1044 	TUint flash = 0;
       
  1045 	for (SPageTableInfo* pti = ptiBase; pti < ptiEnd;)
       
  1046 		{// Free each page table cluster that is allocated.
       
  1047 		if (pti->IsPtClusterAllocated())
       
  1048 			{
       
  1049 			TPhysAddr ptPhysAddr = Mmu::LinearToPhysical((TLinAddr)pti->PageTable());
       
  1050 			SPageInfo* ptSPageInfo = SPageInfo::FromPhysAddr(ptPhysAddr);
       
  1051 			ptSPageInfo->SetModifier(&flash);
       
  1052 			do 
       
  1053 				{
       
  1054 				__NK_ASSERT_DEBUG(pti->IsPtClusterAllocated());
       
  1055 				if (aPageInfo->PagedState() == SPageInfo::EPagedPinned || 
       
  1056 					ptSPageInfo->PagedState() == SPageInfo::EPagedPinned)
       
  1057 					{// The page table or page table info is pinned so can't steal info page.
       
  1058 					r = KErrInUse;
       
  1059 					break;
       
  1060 					}
       
  1061 				r = pti->ForcedFree();
       
  1062 				if(r!=KErrNone)
       
  1063 					break;
       
  1064 				if(ptSPageInfo->CheckModified(&flash))
       
  1065 					{// The page table page has been rejunvenated so can't steal it.
       
  1066 					r = KErrInUse;
       
  1067 					break;
       
  1068 					}
       
  1069 				}
       
  1070 			while (!(++pti)->IsFirstInPage());
       
  1071 			if (r != KErrNone)
       
  1072 				break;
       
  1073 			SPageTableInfo* ptiTmp = pti - KPtClusterSize;
       
  1074 			MmuLock::Unlock();
       
  1075 			if(!ptiTmp->IsOnCleanupList())
       
  1076 				{
       
  1077 				// the page might not already be on the cleanup list in the case where
       
  1078 				// it was previously freed whilst it was pinned.
       
  1079 				// In this case, a later unpinning would have put it back into the paging live
       
  1080 				// list from where it is now subsequently being stolen...
       
  1081 				iPagedAllocator.MoveToCleanup(ptiTmp);
       
  1082 				}
       
  1083 			MmuLock::Lock();
       
  1084 			flash = 0;		// The MmuLock has been flashed at least once.
       
  1085 			}
       
  1086 		else
       
  1087 			{// Move onto the next cluster this page of page table infos refers to.
       
  1088 			__NK_ASSERT_DEBUG(pti->IsFirstInPage());
       
  1089 			pti += KPtClusterSize;
       
  1090 			MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo);
       
  1091 			}
       
  1092 		}
       
  1093 	// free the pages discarded from allocator so they end up back in the paging pool as free pages...
       
  1094 	MmuLock::Unlock();
       
  1095 	while(FreeReserve(iPagedAllocator))
       
  1096 		{}
       
  1097 	if (r == KErrNone)
       
  1098 		r = KErrCompletion;	// The pager needs to remove the page from the live list.
       
  1099 	MmuLock::Lock();
       
  1100 	return r;
       
  1101 	}
       
  1102 
       
  1103 
       
  1104 TInt PageTableAllocator::MovePage(DMemoryObject* aMemory, SPageInfo* aOldPageInfo, 
       
  1105 									TUint aBlockZoneId, TBool aBlockRest)
       
  1106 	{
       
  1107 	// We don't move page table or page table info pages, however, if this page 
       
  1108 	// is demand paged then we may be able to discard it.
       
  1109 	MmuLock::Lock();
       
  1110 	if (!(iPtPageAllocator.IsDemandPaged(aOldPageInfo)))
       
  1111 		{
       
  1112 		MmuLock::Unlock();
       
  1113 		return KErrNotSupported;
       
  1114 		}
       
  1115 	if (aOldPageInfo->PagedState() == SPageInfo::EPagedPinned)
       
  1116 		{// The page is pinned so don't attempt to discard it as pinned pages 
       
  1117 		// can't be discarded.  Also, the pager will invoke this method again.
       
  1118 		MmuLock::Unlock();
       
  1119 		return KErrInUse;
       
  1120 		}
       
  1121 	// Let the pager discard the page as it controls the size of the live list.
       
  1122 	// If the size of the live list allows then eventually 
       
  1123 	// PageTableAllocator::StealPage() will be invoked on this page.
       
  1124 	return ThePager.DiscardPage(aOldPageInfo, aBlockZoneId, aBlockRest);
       
  1125 	}
       
  1126 
       
  1127 
       
  1128 void PageTableAllocator::PinPageTable(TPte* aPageTable, TPinArgs& aPinArgs)
       
  1129 	{
       
  1130 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
       
  1131 	__NK_ASSERT_DEBUG(SPageTableInfo::FromPtPtr(aPageTable)->IsDemandPaged());
       
  1132 	__NK_ASSERT_DEBUG(!SPageTableInfo::FromPtPtr(aPageTable)->IsUnused());
       
  1133 	__NK_ASSERT_DEBUG(aPinArgs.HaveSufficientPages(KNumPagesToPinOnePageTable));
       
  1134 
       
  1135 	// pin page with page table in...
       
  1136 	TPhysAddr pagePhys = Mmu::PageTablePhysAddr(aPageTable);
       
  1137 	SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys);
       
  1138 	ThePager.Pin(pi,aPinArgs);
       
  1139 
       
  1140 	// pin page with page table info in...
       
  1141 	SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPageTable);
       
  1142 	pagePhys = Mmu::UncheckedLinearToPhysical((TLinAddr)pti,KKernelOsAsid);
       
  1143 	pi = SPageInfo::FromPhysAddr(pagePhys);
       
  1144 	ThePager.Pin(pi,aPinArgs);
       
  1145 	}
       
  1146 
       
  1147 
       
  1148 void PageTableAllocator::UnpinPageTable(TPte* aPageTable, TPinArgs& aPinArgs)
       
  1149 	{
       
  1150 	// unpin page with page table info in...
       
  1151 	SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPageTable);
       
  1152 	TPhysAddr pagePhys = Mmu::UncheckedLinearToPhysical((TLinAddr)pti,KKernelOsAsid);
       
  1153 	SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys);
       
  1154 	ThePager.Unpin(pi,aPinArgs);
       
  1155 
       
  1156 	// unpin page with page table in...
       
  1157 	pagePhys = Mmu::PageTablePhysAddr(aPageTable);
       
  1158 	pi = SPageInfo::FromPhysAddr(pagePhys);
       
  1159 	ThePager.Unpin(pi,aPinArgs);
       
  1160 	}
       
  1161 
       
  1162 
       
  1163 #ifdef _DEBUG
       
  1164 TBool IsPageTableUnpagedRemoveAllowed(SPageInfo* aPageInfo)
       
  1165 		{ return ::PageTables.IsPageTableUnpagedRemoveAllowed(aPageInfo); }
       
  1166 
       
  1167 TBool PageTableAllocator::IsPageTableUnpagedRemoveAllowed(SPageInfo* aPageInfo)
       
  1168 	{
       
  1169 	if (aPageInfo->Owner() == iPageTableInfoMemory)
       
  1170 		{// Page table info pages are never added to the live list but can be
       
  1171 		// stolen via DPager::StealPage()
       
  1172 		return ETrue;
       
  1173 		}
       
  1174 
       
  1175 	if (aPageInfo->Owner() == iPageTableMemory)
       
  1176 		{// Page table pages are added to the live list but only after the page they 
       
  1177 		// map has been paged in. Therefore, a pde can reference a pte before it has been
       
  1178 		// added to the live list so allow this but for uninitialised page table pages only.
       
  1179 		TUint ptPageIndex = aPageInfo->Index();
       
  1180 		SPageTableInfo* pti = (SPageTableInfo*)KPageTableInfoBase+ptPageIndex*KPtClusterSize;
       
  1181 		do
       
  1182 			{
       
  1183 			if (!pti->IsUnused())
       
  1184 				{
       
  1185 				TPte* pte = pti->PageTable();
       
  1186 				TPte* pteEnd = pte + (KPageTableSize/sizeof(TPte));
       
  1187 				while (pte < pteEnd)
       
  1188 					if (*pte++ != KPteUnallocatedEntry)
       
  1189 						return EFalse;
       
  1190 				}
       
  1191 			}
       
  1192 		while(!(++pti)->IsFirstInPage());
       
  1193 		return ETrue;
       
  1194 		}
       
  1195 	return EFalse;
       
  1196 	}
       
  1197 #endif
       
  1198 
       
  1199 
       
  1200 //
       
  1201 // Cleanup
       
  1202 //
       
  1203 
       
  1204 void PageTableAllocator::CleanupTrampoline(TAny* aSelf)
       
  1205 	{
       
  1206 	((PageTableAllocator*)aSelf)->Cleanup();
       
  1207 	}
       
  1208 
       
  1209 
       
  1210 void PageTableAllocator::Cleanup()
       
  1211 	{
       
  1212 	// free any surplus pages...
       
  1213 	Lock();
       
  1214 	while(FreeReserve(iPagedAllocator) || FreeReserve(iUnpagedAllocator))
       
  1215 		{}
       
  1216 	Unlock();
       
  1217 	}