kernel/eka/memmodel/epoc/flexible/mmu/mpager.cpp
branchRCL_3
changeset 28 5b5d147c7838
parent 26 c734af59ce98
child 36 bbf8bed59bcb
--- a/kernel/eka/memmodel/epoc/flexible/mmu/mpager.cpp	Tue May 11 17:28:22 2010 +0300
+++ b/kernel/eka/memmodel/epoc/flexible/mmu/mpager.cpp	Tue May 25 14:09:55 2010 +0300
@@ -43,21 +43,34 @@
 */
 const TUint	KAbsoluteMaxPageCount = (1u<<(32-KPageShift))-1u;
 
-/*
-Limit the maximum number of oldest pages to bound the time taken by SelectPagesToClean(), which is
-called with the MmuLock held.
+/**
+Default limit for the maximum number of oldest pages.
+
+If the data paging device sets iPreferredWriteShift, then this is increased if necessary to allow
+that many pages to be present.
+
+This limit exists to make our live list implementation a closer approximation to LRU, and to bound
+the time taken by SelectSequentialPagesToClean(), which is called with the MmuLock held.
 */
-const TUint KMaxOldestPages = 32;
+const TUint KDefaultMaxOldestPages = 32;
 
 static DMutex* ThePageCleaningLock = NULL;
 
 DPager ThePager;
 
 
-DPager::DPager()
-	: iMinimumPageCount(0), iMaximumPageCount(0), iYoungOldRatio(0),
-	  iYoungCount(0), iOldCount(0), iOldestCleanCount(0),
-	  iNumberOfFreePages(0), iReservePageCount(0), iMinimumPageLimit(0)
+DPager::DPager() :
+	iMinimumPageCount(0),
+	iMaximumPageCount(0),
+	iYoungOldRatio(0),
+	iYoungCount(0),
+	iOldCount(0),
+	iOldestCleanCount(0),
+	iMaxOldestPages(KDefaultMaxOldestPages),
+	iNumberOfFreePages(0),
+	iReservePageCount(0),
+	iMinimumPageLimit(0),
+	iPagesToClean(1)
 #ifdef __DEMAND_PAGING_BENCHMARKS__
 	, iBenchmarkLock(TSpinLock::EOrderGenericIrqHigh3)
 #endif	  
@@ -489,6 +502,10 @@
 	{
 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
+	
+	// The PageCleaningLock may or may not be held.  This method will release the RamAllocLock if it
+	// has to wait for the PageCleaningLock
+	TBool pageCleaningLockAcquired = EFalse;
 
 	// find oldest page in list...
 	SDblQueLink* link;
@@ -500,6 +517,37 @@
 	else if (iOldestDirtyCount)
 		{
 		__NK_ASSERT_DEBUG(!iOldestDirtyList.IsEmpty());
+
+		// see if we can clean multiple dirty pages in one go...
+		if (iPagesToClean > 1 && iOldestDirtyCount > 1)
+			{
+			if (!PageCleaningLock::IsHeld())
+				{
+				// temporarily release ram alloc mutex and acquire page cleaning mutex
+				MmuLock::Unlock();
+				RamAllocLock::Unlock();
+				PageCleaningLock::Lock();
+				MmuLock::Lock();
+				pageCleaningLockAcquired = ETrue;
+				}
+
+			// there may be clean pages now if we've waited on the page cleaning mutex, if so don't
+			// bother cleaning but just restart
+			if (iOldestCleanCount == 0 && iOldestDirtyCount >= 1)
+				CleanSomePages(EFalse);
+
+			if (pageCleaningLockAcquired)
+				{
+				// release page cleaning mutex and re-aquire ram alloc mutex
+				MmuLock::Unlock();
+				PageCleaningLock::Unlock();			
+				RamAllocLock::Lock();
+				MmuLock::Lock();
+				}
+			
+			return 1;  // tell caller to restart their operation
+			}
+		
 		link = iOldestDirtyList.Last();
 		}
 	else if (iOldCount)
@@ -515,9 +563,14 @@
 		}
 	SPageInfo* pageInfo = SPageInfo::FromLink(link);
 
-	if (pageInfo->IsDirty() && !PageCleaningLock::IsHeld())
-		return 1;
-
+	if (pageInfo->IsDirty())
+		{		
+		MmuLock::Unlock();
+		PageCleaningLock::Lock();
+		MmuLock::Lock();
+		pageCleaningLockAcquired = ETrue;
+		}
+	
 	// try to steal it from owning object...
 	TInt r = StealPage(pageInfo);	
 	if (r == KErrNone)
@@ -525,73 +578,49 @@
 		BalanceAges();
 		aPageInfoOut = pageInfo;
 		}
+
+	if (pageCleaningLockAcquired)
+		{		
+		MmuLock::Unlock();
+		PageCleaningLock::Unlock();
+		MmuLock::Lock();
+		}
 	
 	return r;
 	}
 
 
-SPageInfo* DPager::StealOldestPage()
-	{
-	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
-	TBool pageCleaningLockHeld = EFalse;
-	for(;;)
-		{
-		SPageInfo* pageInfo = NULL;
-		TInt r = TryStealOldestPage(pageInfo);
-		
-		if (r == KErrNone)
-			{
-			if (pageCleaningLockHeld)
-				{
-				MmuLock::Unlock();
-				PageCleaningLock::Unlock();
-				MmuLock::Lock();
-				}
-			return pageInfo;
-			}
-		else if (r == 1)
-			{
-			__NK_ASSERT_ALWAYS(!pageCleaningLockHeld);
-			MmuLock::Unlock();
-			PageCleaningLock::Lock();
-			MmuLock::Lock();
-			pageCleaningLockHeld = ETrue;
-			}
-		// else retry...
-		}
-	}
-
-#ifdef __CPU_CACHE_HAS_COLOUR
-
-template <class T, TInt maxObjects> class TSequentialColourSelector
+template <class T, TUint maxObjects> class TSequentialColourSelector
 	{
 public:
-	static const TInt KMaxLength = maxObjects;
-	static const TInt KArrayLength = _ALIGN_UP(KMaxLength, KPageColourCount);
+	static const TUint KMaxSearchLength = _ALIGN_UP(maxObjects, KPageColourCount);
 	
-	FORCE_INLINE TSequentialColourSelector()
+	FORCE_INLINE TSequentialColourSelector(TUint aTargetLength)
 		{
 		memclr(this, sizeof(*this));
+		__NK_ASSERT_DEBUG(aTargetLength <= maxObjects);
+		iTargetLength = aTargetLength;
+		iSearchLength = _ALIGN_UP(aTargetLength, KPageColourCount);
 		}
 
 	FORCE_INLINE TBool FoundLongestSequence()
 		{
-		return iLongestLength >= KMaxLength;
+		return iLongestLength >= iTargetLength;
 		}
 
-	FORCE_INLINE void AddCandidate(T* aObject, TInt aColour)
+	FORCE_INLINE void AddCandidate(T* aObject, TUint aColour)
 		{
 		// allocate objects to slots based on colour
-		for (TInt i = aColour ; i < KArrayLength ; i += KPageColourCount)
+		for (TUint i = aColour ; i < iSearchLength ; i += KPageColourCount)
 			{
 			if (!iSlot[i])
 				{
 				iSlot[i] = aObject;
 				iSeqLength[i] = i == 0 ? 1 : iSeqLength[i - 1] + 1;
-				TInt j = i + 1;
-				while(j < KArrayLength && iSeqLength[j])
+				TUint j = i + 1;
+				while(j < iSearchLength && iSeqLength[j])
 					iSeqLength[j++] += iSeqLength[i];
-				TInt currentLength = iSeqLength[j - 1];
+				TUint currentLength = iSeqLength[j - 1];
 				if (currentLength > iLongestLength)
 					{
 					iLongestLength = currentLength;
@@ -602,31 +631,31 @@
 			}
 		}
 
-	FORCE_INLINE TInt FindLongestRun(T** aObjectsOut)
+	FORCE_INLINE TUint FindLongestRun(T** aObjectsOut)
 		{
 		if (iLongestLength == 0)
 			return 0;
 
-		if (iLongestLength < KMaxLength && iSlot[0] && iSlot[KArrayLength - 1])
+		if (iLongestLength < iTargetLength && iSlot[0] && iSlot[iSearchLength - 1])
 			{
 			// check possibility of wrapping
 
 			TInt i = 1;
 			while (iSlot[i]) ++i;  // find first hole
-			TInt wrappedLength = iSeqLength[KArrayLength - 1] + iSeqLength[i - 1];
+			TUint wrappedLength = iSeqLength[iSearchLength - 1] + iSeqLength[i - 1];
 			if (wrappedLength > iLongestLength)
 				{
 				iLongestLength = wrappedLength;
-				iLongestStart = KArrayLength - iSeqLength[KArrayLength - 1];
+				iLongestStart = iSearchLength - iSeqLength[iSearchLength - 1];
 				}
 			}		
 
-		iLongestLength = Min(iLongestLength, KMaxLength);
-
-		__NK_ASSERT_DEBUG(iLongestStart >= 0 && iLongestStart < KArrayLength);
-		__NK_ASSERT_DEBUG(iLongestStart + iLongestLength < 2 * KArrayLength);
-
-		TInt len = Min(iLongestLength, KArrayLength - iLongestStart);
+		iLongestLength = MinU(iLongestLength, iTargetLength);
+
+		__NK_ASSERT_DEBUG(iLongestStart < iSearchLength);
+		__NK_ASSERT_DEBUG(iLongestStart + iLongestLength < 2 * iSearchLength);
+
+		TUint len = MinU(iLongestLength, iSearchLength - iLongestStart);
 		wordmove(aObjectsOut, &iSlot[iLongestStart], len * sizeof(T*));
 		wordmove(aObjectsOut + len, &iSlot[0], (iLongestLength - len) * sizeof(T*));
 		
@@ -634,19 +663,22 @@
 		}
 
 private:
-	T* iSlot[KArrayLength];
-	TInt8 iSeqLength[KArrayLength];
-	TInt iLongestStart;
-	TInt iLongestLength;
+	TUint iTargetLength;
+	TUint iSearchLength;
+	TUint iLongestStart;
+	TUint iLongestLength;
+	T* iSlot[KMaxSearchLength];
+	TUint8 iSeqLength[KMaxSearchLength];
 	};
 
-TInt DPager::SelectPagesToClean(SPageInfo** aPageInfosOut)
+
+TInt DPager::SelectSequentialPagesToClean(SPageInfo** aPageInfosOut)
 	{
-	// select up to KMaxPagesToClean oldest dirty pages with sequential page colours
+	// select up to iPagesToClean oldest dirty pages with sequential page colours
 	
 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
 
-	TSequentialColourSelector<SPageInfo, KMaxPagesToClean> selector;
+	TSequentialColourSelector<SPageInfo, KMaxPagesToClean> selector(iPagesToClean);
 
 	SDblQueLink* link = iOldestDirtyList.Last();
 	while (link != &iOldestDirtyList.iA)
@@ -668,15 +700,14 @@
 	return selector.FindLongestRun(aPageInfosOut);
 	}
 
-#else
-
-TInt DPager::SelectPagesToClean(SPageInfo** aPageInfosOut)
+
+TInt DPager::SelectOldestPagesToClean(SPageInfo** aPageInfosOut)
 	{
-	// no page colouring restrictions, so just take up to KMaxPagesToClean oldest dirty pages
+	// select up to iPagesToClean oldest dirty pages
 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
-	TInt pageCount = 0;
+	TUint pageCount = 0;
 	SDblQueLink* link = iOldestDirtyList.Last();
-	while (link != &iOldestDirtyList.iA && pageCount < KMaxPagesToClean)
+	while (link != &iOldestDirtyList.iA && pageCount < iPagesToClean)
 		{
 		SPageInfo* pi = SPageInfo::FromLink(link);
 		if (!pi->IsWritable())
@@ -691,20 +722,29 @@
 	return pageCount;
 	}
 
-#endif
-
 
 TInt DPager::CleanSomePages(TBool aBackground)
 	{
+	TRACE(("DPager::CleanSomePages"));
+	
 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
 	__NK_ASSERT_DEBUG(PageCleaningLock::IsHeld());
 	// ram alloc lock may or may not be held
 
 	SPageInfo* pageInfos[KMaxPagesToClean];
-	TInt pageCount = SelectPagesToClean(&pageInfos[0]);
+	TInt pageCount;
+	if (iCleanInSequence)
+		pageCount = SelectSequentialPagesToClean(&pageInfos[0]);
+	else
+		pageCount = SelectOldestPagesToClean(&pageInfos[0]);
 	
 	if (pageCount == 0)
+		{
+		TRACE2(("DPager::CleanSomePages no pages to clean", pageCount));
+		TRACE2(("  page counts %d, %d, %d, %d",
+				iYoungCount, iOldCount, iOldestCleanCount, iOldestDirtyCount));
 		return 0;
+		}
 	
 	TheDataPagedMemoryManager->CleanPages(pageCount, pageInfos, aBackground);
 
@@ -724,6 +764,8 @@
 			}
 		}
 
+	TRACE2(("DPager::CleanSomePages cleaned %d pages", pageCount));
+
 	return pageCount;
 	}
 
@@ -737,7 +779,6 @@
 
 TInt DPager::RestrictPage(SPageInfo* aPageInfo, TRestrictPagesType aRestriction)
 	{
-	TRACE(("DPager::RestrictPage(0x%08x,%d)",aPageInfo,aRestriction));
 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
 
 	TInt r;
@@ -763,7 +804,6 @@
 		MmuLock::Lock();
 		}
 
-	TRACE(("DPager::RestrictPage returns %d",r));
 	return r;
 	}
 
@@ -814,28 +854,25 @@
 
 TInt DPager::DiscardAndAllocPage(SPageInfo* aPageInfo, TZonePageType aPageType)
 	{
-	TInt r = DiscardPage(aPageInfo, KRamZoneInvalidId, EFalse);
+	TInt r = DiscardPage(aPageInfo, KRamZoneInvalidId, M::EMoveDisMoveDirty);
 	if (r == KErrNone)
 		{
 		TheMmu.MarkPageAllocated(aPageInfo->PhysAddr(), aPageType);
 		}
-	// Flash the ram alloc lock as we may have had to write a page out to swap.
-	RamAllocLock::Unlock();
-	RamAllocLock::Lock();
 	return r;
 	}
 
 
-static TBool DiscardCanStealPage(SPageInfo* aOldPageInfo, TBool aBlockRest)
+static TBool DiscardCanStealPage(SPageInfo* aOldPageInfo, TBool aMoveDirty)
 	{
  	// If the page is pinned or if the page is dirty and a general defrag is being performed then
 	// don't attempt to steal it
 	return aOldPageInfo->Type() == SPageInfo::EUnused ||
-		(aOldPageInfo->PagedState() != SPageInfo::EPagedPinned && (!aBlockRest || !aOldPageInfo->IsDirty()));	
+		(aOldPageInfo->PagedState() != SPageInfo::EPagedPinned && (!aMoveDirty || !aOldPageInfo->IsDirty()));	
 	}
 
 
-TInt DPager::DiscardPage(SPageInfo* aOldPageInfo, TUint aBlockZoneId, TBool aBlockRest)
+TInt DPager::DiscardPage(SPageInfo* aOldPageInfo, TUint aBlockZoneId, TUint aMoveDisFlags)
 	{
 	// todo: assert MmuLock not released
 	
@@ -843,8 +880,10 @@
 	
 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
-
-	if (!DiscardCanStealPage(aOldPageInfo, aBlockRest))
+	TBool moveDirty = (aMoveDisFlags & M::EMoveDisMoveDirty) != 0;
+	TBool blockRest = (aMoveDisFlags & M::EMoveDisBlockRest) != 0;
+
+	if (!DiscardCanStealPage(aOldPageInfo, moveDirty))
 		{
 		// The page is pinned or is dirty and this is a general defrag so move the page.
 		DMemoryObject* memory = aOldPageInfo->Owner();
@@ -854,7 +893,7 @@
 		MmuLock::Unlock();
 		TPhysAddr newAddr;
 		TRACE2(("DPager::DiscardPage delegating pinned/dirty page to manager"));
-		TInt r = memory->iManager->MovePage(memory, aOldPageInfo, newAddr, aBlockZoneId, aBlockRest);
+		TInt r = memory->iManager->MovePage(memory, aOldPageInfo, newAddr, aBlockZoneId, blockRest);
 		TRACE(("< DPager::DiscardPage %d", r));
 		return r;
 		}
@@ -875,7 +914,7 @@
 			{
 			// Allocate a new page for the live list as it has reached its minimum size.
 			TUint flags = EMemAttNormalCached | Mmu::EAllocNoWipe;
-			newPageInfo = GetPageFromSystem((Mmu::TRamAllocFlags)flags, aBlockZoneId, aBlockRest);
+			newPageInfo = GetPageFromSystem((Mmu::TRamAllocFlags)flags, aBlockZoneId, blockRest);
 			if (!newPageInfo)
 				{
 				TRACE(("< DPager::DiscardPage KErrNoMemory"));
@@ -894,7 +933,7 @@
 
 		// Re-acquire the mmulock and re-check that the page is not pinned or dirty.
 		MmuLock::Lock();
-		if (!DiscardCanStealPage(aOldPageInfo, aBlockRest))
+		if (!DiscardCanStealPage(aOldPageInfo, moveDirty))
 			{
 			// Page is now pinned or dirty so give up as it is in use.
 			r = KErrInUse;
@@ -1005,12 +1044,26 @@
 	}
 
 
-void DPager::ReturnPageToSystem()
+TBool DPager::TryReturnOldestPageToSystem()
 	{
 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
-
-	ReturnPageToSystem(*StealOldestPage());
+	__NK_ASSERT_DEBUG(iNumberOfFreePages>0);
+
+	SPageInfo* pageInfo = NULL;
+	if (TryStealOldestPage(pageInfo) == KErrNone)
+		{
+		// TryStealOldestPage may have released the MmuLock, so check there are still enough pages
+		// to remove one from the live list
+		if (iNumberOfFreePages>0)
+			{
+			ReturnPageToSystem(*pageInfo);
+			return ETrue;
+			}
+		else
+			AddAsFreePage(pageInfo);
+		}
+	return EFalse;
 	}
 
 
@@ -1039,7 +1092,6 @@
 
 SPageInfo* DPager::PageInAllocPage(Mmu::TRamAllocFlags aAllocFlags)
 	{
-	TBool pageCleaningLockHeld = EFalse;
 	SPageInfo* pageInfo;
 	TPhysAddr pagePhys;
 	TInt r = KErrGeneral;
@@ -1066,62 +1118,18 @@
 		MmuLock::Lock();
 		}
 
-	// try stealing a clean page...
-	if (iOldestCleanCount)
-		goto try_steal_oldest_page;
-
-	// see if we can clean multiple dirty pages in one go...
-	if (KMaxPagesToClean > 1 && iOldestDirtyCount > 1)
-		{
-		// if we don't hold the page cleaning mutex then temporarily release ram alloc mutex and
-		// acquire page cleaning mutex; if we hold it already just proceed
-		if (!pageCleaningLockHeld)
-			{
-			MmuLock::Unlock();
-			RamAllocLock::Unlock();
-			PageCleaningLock::Lock();			
-			MmuLock::Lock();
-			}
-		
-		// there may be clean pages now if we've waited on the page cleaning mutex, if so don't
-		// bother cleaning but just restart
-		if (iOldestCleanCount == 0)
-			CleanSomePages(EFalse);
-		
-		if (!pageCleaningLockHeld)
-			{
-			MmuLock::Unlock();
-			PageCleaningLock::Unlock();			
-			RamAllocLock::Lock();
-			MmuLock::Lock();
-			}
-		
-		if (iOldestCleanCount > 0)
-			goto find_a_page;
-		}
-
-	// as a last resort, steal a page from the live list...
-	
+	// otherwise steal a page from the live list...
 try_steal_oldest_page:
 	__NK_ASSERT_ALWAYS(iOldestCleanCount|iOldestDirtyCount|iOldCount|iYoungCount);
 	r = TryStealOldestPage(pageInfo);
-	// if this fails we restart whole process
-	if (r < KErrNone)
+	
+	// if this fails we restart whole process.
+	// failure can be either KErrInUse if the page was used while we were stealing, or 1 to indicate
+	// that some pages were cleaned and the operation should be restarted
+	if (r != KErrNone)
 		goto find_a_page;
 
-	// if we need to clean, acquire page cleaning mutex for life of this function
-	if (r == 1)
-		{
-		__NK_ASSERT_ALWAYS(!pageCleaningLockHeld);
-		MmuLock::Unlock();
-		PageCleaningLock::Lock();
-		MmuLock::Lock();
-		pageCleaningLockHeld = ETrue;
-		goto find_a_page;		
-		}
-
 	// otherwise we're done!
-	__NK_ASSERT_DEBUG(r == KErrNone);
 	MmuLock::Unlock();
 
 	// make page state same as a freshly allocated page...
@@ -1129,8 +1137,6 @@
 	TheMmu.PagesAllocated(&pagePhys,1,aAllocFlags);
 
 done:
-	if (pageCleaningLockHeld)
-		PageCleaningLock::Unlock();
 	RamAllocLock::Unlock();
 
 	return pageInfo;
@@ -1146,8 +1152,8 @@
 	MmuLock::Lock();
 	while(aNumPages>0 && (TInt)NumberOfFreePages()>=aNumPages)
 		{
-		ReturnPageToSystem();
-		--aNumPages;
+		if (TryReturnOldestPageToSystem())
+			--aNumPages;
 		}
 	MmuLock::Unlock();
 
@@ -1329,61 +1335,89 @@
 void DPager::BalanceAges()
 	{
 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
-	TBool restrictPage = EFalse;
-	SPageInfo* pageInfo = NULL;
-	TUint oldestCount = iOldestCleanCount + iOldestDirtyCount;
-	if((iOldCount + oldestCount) * iYoungOldRatio < iYoungCount)
+	TBool retry;
+	do
 		{
-		// Need more old pages so make one young page into an old page...
-		__NK_ASSERT_DEBUG(!iYoungList.IsEmpty());
-		__NK_ASSERT_DEBUG(iYoungCount);
-		SDblQueLink* link = iYoungList.Last()->Deque();
-		--iYoungCount;
-
-		pageInfo = SPageInfo::FromLink(link);
-		pageInfo->SetPagedState(SPageInfo::EPagedOld);
-
-		iOldList.AddHead(link);
-		++iOldCount;
-
-		Event(EEventPageAged,pageInfo);
-		// Delay restricting the page until it is safe to release the MmuLock.
-		restrictPage = ETrue;
-		}
-
-	// Check we have enough oldest pages.
-	if (oldestCount < KMaxOldestPages &&
-		oldestCount * iOldOldestRatio < iOldCount)
-		{
-		__NK_ASSERT_DEBUG(!iOldList.IsEmpty());
-		__NK_ASSERT_DEBUG(iOldCount);
-		SDblQueLink* link = iOldList.Last()->Deque();
-		--iOldCount;
-
-		SPageInfo* oldestPageInfo = SPageInfo::FromLink(link);
-		if (oldestPageInfo->IsDirty())
+		retry = EFalse;
+		TBool restrictPage = EFalse;
+		SPageInfo* pageInfo = NULL;
+		TUint oldestCount = iOldestCleanCount + iOldestDirtyCount;
+		if((iOldCount + oldestCount) * iYoungOldRatio < iYoungCount)
+			{
+			// Need more old pages so make one young page into an old page...
+			__NK_ASSERT_DEBUG(!iYoungList.IsEmpty());
+			__NK_ASSERT_DEBUG(iYoungCount);
+			SDblQueLink* link = iYoungList.Last()->Deque();
+			--iYoungCount;
+
+			pageInfo = SPageInfo::FromLink(link);
+			pageInfo->SetPagedState(SPageInfo::EPagedOld);
+
+			iOldList.AddHead(link);
+			++iOldCount;
+
+			Event(EEventPageAged,pageInfo);
+			// Delay restricting the page until it is safe to release the MmuLock.
+			restrictPage = ETrue;
+			}
+
+		// Check we have enough oldest pages.
+		if (oldestCount < iMaxOldestPages &&
+			oldestCount * iOldOldestRatio < iOldCount)
 			{
-			oldestPageInfo->SetPagedState(SPageInfo::EPagedOldestDirty);
-			iOldestDirtyList.AddHead(link);
-			++iOldestDirtyCount;
-			PageCleaner::NotifyPagesToClean();
-			Event(EEventPageAgedDirty,oldestPageInfo);
+			__NK_ASSERT_DEBUG(!iOldList.IsEmpty());
+			__NK_ASSERT_DEBUG(iOldCount);
+			SDblQueLink* link = iOldList.Last()->Deque();
+			--iOldCount;
+
+			SPageInfo* oldestPageInfo = SPageInfo::FromLink(link);
+			if (oldestPageInfo->IsDirty())
+				{
+				oldestPageInfo->SetOldestPage(SPageInfo::EPagedOldestDirty);
+				iOldestDirtyList.AddHead(link);
+				++iOldestDirtyCount;
+				PageCleaner::NotifyPagesToClean();
+				Event(EEventPageAgedDirty,oldestPageInfo);
+				}
+			else
+				{
+				oldestPageInfo->SetOldestPage(SPageInfo::EPagedOldestClean);
+				iOldestCleanList.AddHead(link);
+				++iOldestCleanCount;
+				Event(EEventPageAgedClean,oldestPageInfo);
+				}
 			}
-		else
+
+		if (restrictPage)
 			{
-			oldestPageInfo->SetPagedState(SPageInfo::EPagedOldestClean);
-			iOldestCleanList.AddHead(link);
-			++iOldestCleanCount;
-			Event(EEventPageAgedClean,oldestPageInfo);
+			// Make the recently aged old page inaccessible.  This is done last as it will release
+			// the MmuLock and therefore the page counts may otherwise change.
+			TInt r = RestrictPage(pageInfo,ERestrictPagesNoAccessForOldPage);
+
+			if (r == KErrInUse)
+				{
+				SPageInfo::TPagedState state = pageInfo->PagedState();
+				if (state == SPageInfo::EPagedOld ||
+					state == SPageInfo::EPagedOldestClean ||
+					state == SPageInfo::EPagedOldestDirty)
+					{
+					// The restrict operation failed, but the page was left in an old state.  This
+					// can happen when:
+					//  
+					//  - pages are in the process of being pinned - the mapping will veto the
+					//    restriction
+					//  - pages are rejuvenated and then become quickly become old again
+					//  
+					// In the second instance the page will be needlessly rejuvenated because we
+					// can't tell that it has actually been restricted by another thread
+					RemovePage(pageInfo);
+					AddAsYoungestPage(pageInfo);
+					retry = ETrue;
+					}
+				}
 			}
 		}
-
-	if (restrictPage)
-		{
-		// Make the recently aged old page inaccessible.  This is done last as it 
-		// will release the MmuLock and therefore the page counts may otherwise change.
-		RestrictPage(pageInfo,ERestrictPagesNoAccessForOldPage);
-		}
+	while (retry);
 	}
 
 
@@ -1392,7 +1426,7 @@
 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
 	while(HaveTooManyPages())
-		ReturnPageToSystem();
+		TryReturnOldestPageToSystem();
 	}
 
 
@@ -1684,6 +1718,8 @@
 
 void DPager::Pin(SPageInfo* aPageInfo, TPinArgs& aPinArgs)
 	{
+	TRACE(("DPager::Pin %08x", aPageInfo->PhysAddr()));
+	
 	__ASSERT_CRITICAL;
 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
 	__NK_ASSERT_DEBUG(aPinArgs.HaveSufficientPages(1));
@@ -1989,7 +2025,10 @@
 
 TInt DPager::ResizeLiveList(TUint aMinimumPageCount, TUint aMaximumPageCount)
 	{
-	TRACE(("DPager::ResizeLiveList(%d,%d) current young=%d old=%d min=%d free=%d max=%d",aMinimumPageCount,aMaximumPageCount,iYoungCount,iOldCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
+	TRACE(("DPager::ResizeLiveList(%d,%d) current: %d %d %d %d, %d %d %d",
+		   aMinimumPageCount,aMaximumPageCount,
+		   iYoungCount,iOldCount,iOldestCleanCount,iOldestDirtyCount,
+		   iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
 	__NK_ASSERT_DEBUG(CacheInitialised());
 
 	if(!aMaximumPageCount)
@@ -2003,10 +2042,15 @@
 	// Min must not be greater than max...
 	if(aMinimumPageCount>aMaximumPageCount)
 		return KErrArgument;
-
+	
 	NKern::ThreadEnterCS();
 	RamAllocLock::Lock();
 
+	// We must hold this otherwise TryStealOldestPage will release the RamAllocLock while waiting
+	// for it.  Note this method is not used in producton, so it's ok to hold both locks for longer
+	// than would otherwise happen.
+	PageCleaningLock::Lock();  
+
 	MmuLock::Lock();
 
 	__NK_ASSERT_ALWAYS(iYoungOldRatio);
@@ -2026,56 +2070,55 @@
 		iMaximumPageCount = aMaximumPageCount;
 
 	// Reduce iMinimumPageCount?
-	TInt spare = iMinimumPageCount-aMinimumPageCount;
-	if(spare>0)
+	if(aMinimumPageCount < iMinimumPageCount)
 		{
-		iMinimumPageCount -= spare;
-		iNumberOfFreePages += spare;
+		iNumberOfFreePages += iMinimumPageCount - aMinimumPageCount;
+		iMinimumPageCount = aMinimumPageCount;
 		}
 
 	// Increase iMinimumPageCount?
 	TInt r=KErrNone;
-	while(iMinimumPageCount<aMinimumPageCount)
+	while(aMinimumPageCount > iMinimumPageCount)
 		{
-		TUint newMin = aMinimumPageCount;
-		TUint maxMin = iMinimumPageCount+iNumberOfFreePages;
-		if(newMin>maxMin)
-			newMin = maxMin;
-
-		TUint delta = newMin-iMinimumPageCount;
-		if(delta)
+		TUint newMin = MinU(aMinimumPageCount, iMinimumPageCount + iNumberOfFreePages);
+		
+		if (newMin == iMinimumPageCount)
 			{
-			iMinimumPageCount = newMin;
-			iNumberOfFreePages -= delta;
-			continue;
+			// have to add pages before we can increase minimum page count
+			if(!TryGrowLiveList())
+				{
+				r=KErrNoMemory;
+				break;
+				}
 			}
-
-		if(!TryGrowLiveList())
+		else
 			{
-			r=KErrNoMemory;
-			break;
+			iNumberOfFreePages -= newMin - iMinimumPageCount;
+			iMinimumPageCount = newMin;
 			}
 		}
 
 	// Reduce iMaximumPageCount?
-	while(iMaximumPageCount>aMaximumPageCount)
+	while(aMaximumPageCount < iMaximumPageCount)
 		{
-		TUint newMax = aMaximumPageCount;
-		TUint minMax = iMinimumPageCount+iNumberOfFreePages;
-		if(newMax<minMax)
-			newMax = minMax;
-
-		TUint delta = iMaximumPageCount-newMax;
-		if(delta)
+		TUint newMax = MaxU(aMaximumPageCount, iMinimumPageCount + iNumberOfFreePages);
+
+		if (newMax == iMaximumPageCount)
+			{
+			// have to remove pages before we can reduce maximum page count
+			TryReturnOldestPageToSystem();
+			}
+		else
 			{
 			iMaximumPageCount = newMax;
-			continue;
 			}
-
-		ReturnPageToSystem();
 		}
-
-	TRACE(("DPager::ResizeLiveList end with young=%d old=%d min=%d free=%d max=%d",iYoungCount,iOldCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
+	
+	TRACE(("DPager::ResizeLiveList end: %d %d %d %d, %d %d %d",
+		   iYoungCount,iOldCount,iOldestCleanCount,iOldestDirtyCount,
+		   iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
+	
+	__NK_ASSERT_DEBUG((iMinimumPageCount + iNumberOfFreePages) <= iMaximumPageCount);
 
 #ifdef BTRACE_KERNEL_MEMORY
 	BTrace4(BTrace::EKernelMemory,BTrace::EKernelMemoryDemandPagingCache,iMinimumPageCount << KPageShift);
@@ -2083,6 +2126,7 @@
 
 	MmuLock::Unlock();
 
+	PageCleaningLock::Unlock();
 	RamAllocLock::Unlock();
 	NKern::ThreadLeaveCS();
 
@@ -2090,6 +2134,43 @@
 	}
 
 
+TUint RequiredOldestPages(TUint aPagesToClean, TBool aCleanInSequence)
+	{
+	return aCleanInSequence ? aPagesToClean * 8 : aPagesToClean;
+	}
+
+
+void DPager::SetPagesToClean(TUint aPagesToClean)
+	{
+	TRACE(("WDP: Pager will attempt to clean %d pages", aPagesToClean));
+	__NK_ASSERT_ALWAYS(aPagesToClean > 0 && aPagesToClean <= KMaxPagesToClean);
+	MmuLock::Lock();
+	iPagesToClean = aPagesToClean;
+	iMaxOldestPages = MaxU(KDefaultMaxOldestPages,
+						   RequiredOldestPages(iPagesToClean, iCleanInSequence));
+	MmuLock::Unlock();
+	TRACE(("WDP: Maximum %d oldest pages", iMaxOldestPages));	
+	}
+
+
+TUint DPager::PagesToClean()
+	{
+	return iPagesToClean;
+	}
+
+
+void DPager::SetCleanInSequence(TBool aCleanInSequence)
+	{
+	TRACE(("WDP: Sequential page colour set to %d", aCleanInSequence));
+	MmuLock::Lock();
+	iCleanInSequence = aCleanInSequence;
+	iMaxOldestPages = MaxU(KDefaultMaxOldestPages,
+						   RequiredOldestPages(iPagesToClean, iCleanInSequence));
+	MmuLock::Unlock();	
+	TRACE(("WDP: Maximum %d oldest pages", iMaxOldestPages));
+	}
+
+
 // WARNING THIS METHOD MAY HOLD THE RAM ALLOC LOCK FOR EXCESSIVE PERIODS.  DON'T USE THIS IN ANY PRODUCTION CODE.
 void DPager::FlushAll()
 	{
@@ -2141,13 +2222,13 @@
 		}
 	while(piMap<piMapEnd);
 	MmuLock::Unlock();
+	PageCleaningLock::Unlock();
 
 	// reduce live page list to a minimum
 	while(GetFreePages(1)) {}; 
 
 	TRACE(("DPager::FlushAll() end with young=%d old=%d min=%d free=%d max=%d",iYoungCount,iOldCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
 
-	PageCleaningLock::Unlock();
 	RamAllocLock::Unlock();
 	NKern::ThreadLeaveCS();
 	}
@@ -2338,6 +2419,43 @@
 		return KErrNone;
 #endif
 
+	case EVMHalGetPhysicalAccessSupported:
+		if ((K::MemModelAttributes & EMemModelAttrDataPaging) == 0)
+			return KErrNotSupported;
+		return GetPhysicalAccessSupported();
+		
+	case EVMHalGetUsePhysicalAccess:
+		if ((K::MemModelAttributes & EMemModelAttrDataPaging) == 0)
+			return KErrNotSupported;
+		return GetUsePhysicalAccess();
+
+	case EVMHalSetUsePhysicalAccess:
+		if(!TheCurrentThread->HasCapability(ECapabilityWriteDeviceData,__PLATSEC_DIAGNOSTIC_STRING("Checked by VMHalFunction(EVMHalSetUsePhysicalAccess)")))
+			K::UnlockedPlatformSecurityPanic();
+		if ((K::MemModelAttributes & EMemModelAttrDataPaging) == 0)
+			return KErrNotSupported;
+		if ((TUint)a1 > 1)
+			return KErrArgument;
+		SetUsePhysicalAccess((TBool)a1);
+		return KErrNone;
+		
+	case EVMHalGetPreferredDataWriteSize:
+		if ((K::MemModelAttributes & EMemModelAttrDataPaging) == 0)
+			return KErrNotSupported;
+		return GetPreferredDataWriteSize();
+		
+	case EVMHalGetDataWriteSize:
+		if ((K::MemModelAttributes & EMemModelAttrDataPaging) == 0)
+			return KErrNotSupported;
+		return __e32_find_ms1_32(ThePager.PagesToClean());
+		
+	case EVMHalSetDataWriteSize:
+		if(!TheCurrentThread->HasCapability(ECapabilityWriteDeviceData,__PLATSEC_DIAGNOSTIC_STRING("Checked by VMHalFunction(EVMHalSetDataWriteSize)")))
+			K::UnlockedPlatformSecurityPanic();
+		if ((K::MemModelAttributes & EMemModelAttrDataPaging) == 0)
+			return KErrNotSupported;
+		return SetDataWriteSize((TUint)a1);
+	
 	default:
 		return KErrNotSupported;
 		}
@@ -2390,151 +2508,45 @@
 //
 
 //
-// DPagingRequest
+// DPagingRequestBase
 //
 
-DPagingRequest::DPagingRequest()
-	: iMutex(NULL), iUseRegionCount(0)
-	{
-	}
-
-
-void DPagingRequest::SetUseContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
-	{
-	__ASSERT_SYSTEM_LOCK;
-	__NK_ASSERT_DEBUG(iUseRegionCount == 0);
-	__NK_ASSERT_DEBUG(aCount > 0 && aCount <= EMaxPages);
-	for (TUint i = 0 ; i < aCount ; ++i)
-		{
-		iUseRegionMemory[i] = aMemory;
-		iUseRegionIndex[i] = aIndex + i;		
-		}
-	iUseRegionCount = aCount;
-	}
-
-
-void DPagingRequest::SetUseDiscontiguous(DMemoryObject** aMemory, TUint* aIndex, TUint aCount)
-	{
-	__ASSERT_SYSTEM_LOCK;
-	__NK_ASSERT_DEBUG(iUseRegionCount == 0);
-	__NK_ASSERT_DEBUG(aCount > 0 && aCount <= EMaxPages);
-	for (TUint i = 0 ; i < aCount ; ++i)
-		{
-		iUseRegionMemory[i] = aMemory[i];
-		iUseRegionIndex[i] = aIndex[i];
-		}
-	iUseRegionCount = aCount;
-	}
-
-
-void DPagingRequest::ResetUse()
-	{
-	__ASSERT_SYSTEM_LOCK;
-	__NK_ASSERT_DEBUG(iUseRegionCount > 0);
-	iUseRegionCount = 0;
-	}
-
-
-TBool DPagingRequest::CheckUseContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
-	{
-	if (iUseRegionCount != aCount)
-		return EFalse;
-	for (TUint i = 0 ; i < iUseRegionCount ; ++i)
-		{
-		if (iUseRegionMemory[i] != aMemory || iUseRegionIndex[i] != aIndex + i)
-			return EFalse;
-		}
-	return ETrue;
-	}
-
-
-TBool DPagingRequest::CheckUseDiscontiguous(DMemoryObject** aMemory, TUint* aIndex, TUint aCount)
-	{
-	if (iUseRegionCount != aCount)
-		return EFalse;
-	for (TUint i = 0 ; i < iUseRegionCount ; ++i)
-		{
-		if (iUseRegionMemory[i] != aMemory[i] || iUseRegionIndex[i] != aIndex[i])
-			return EFalse;
-		}
-	return ETrue;
-	}
-
-
- TBool DPagingRequest::IsCollisionContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
-	{
-	// note this could be optimised as most of the time we will be checking read/read collusions,
-	// both of which will be contiguous
-	__ASSERT_SYSTEM_LOCK;
-	for (TUint i = 0 ; i < iUseRegionCount ; ++i)
-		{
-		if (iUseRegionMemory[i] == aMemory &&
-			TUint(iUseRegionIndex[i] - aIndex) < aCount)
-			return ETrue;
-		}
-	return EFalse;
-	}
-
-
-TLinAddr DPagingRequest::MapPages(TUint aColour, TUint aCount, TPhysAddr* aPages)
+
+TLinAddr DPagingRequestBase::MapPages(TUint aColour, TUint aCount, TPhysAddr* aPages)
 	{
 	__NK_ASSERT_DEBUG(iMutex->iCleanup.iThread == &Kern::CurrentThread());
 	return iTempMapping.Map(aPages,aCount,aColour);
 	}
 
 
-void DPagingRequest::UnmapPages(TBool aIMBRequired)
+void DPagingRequestBase::UnmapPages(TBool aIMBRequired)
 	{
 	__NK_ASSERT_DEBUG(iMutex->iCleanup.iThread == &Kern::CurrentThread());
 	iTempMapping.Unmap(aIMBRequired);
 	}
 
-//
-// DPoolPagingRequest
-//
-
-DPoolPagingRequest::DPoolPagingRequest(DPagingRequestPool::TGroup& aPoolGroup) :
-	iPoolGroup(aPoolGroup)
-	{
-	}
-
-
-void DPoolPagingRequest::Release()
-	{
-	NKern::LockSystem();
-	ResetUse();
-	Signal();
-	}
-
-
-void DPoolPagingRequest::Wait()
-	{
-	__ASSERT_SYSTEM_LOCK;
-	++iUsageCount;
-	TInt r = iMutex->Wait();
-	__NK_ASSERT_ALWAYS(r == KErrNone);
-	}
-
-
-void DPoolPagingRequest::Signal()
-	{
-	__ASSERT_SYSTEM_LOCK;
-	iPoolGroup.Signal(this);
-	}
 
 //
 // DPageReadRequest
 //
 
+
 TInt DPageReadRequest::iAllocNext = 0;
 
+
+TUint DPageReadRequest::ReservedPagesRequired()
+	{
+	return iAllocNext*EMaxPages;
+	}
+
+
 DPageReadRequest::DPageReadRequest(DPagingRequestPool::TGroup& aPoolGroup) :
-	DPoolPagingRequest(aPoolGroup)
+	iPoolGroup(aPoolGroup)
 	{
-	// allocate space for mapping pages whilst they're being loaded...
 	iTempMapping.Alloc(EMaxPages);
 	}
 
+
 TInt DPageReadRequest::Construct()
 	{
 	// allocate id and mutex...
@@ -2568,6 +2580,65 @@
 	}
 
 
+void DPageReadRequest::Release()
+	{
+	NKern::LockSystem();
+	ResetUse();
+	Signal();
+	}
+
+
+void DPageReadRequest::Wait()
+	{
+	__ASSERT_SYSTEM_LOCK;
+	++iUsageCount;
+	TInt r = iMutex->Wait();
+	__NK_ASSERT_ALWAYS(r == KErrNone);
+	}
+
+
+void DPageReadRequest::Signal()
+	{
+	__ASSERT_SYSTEM_LOCK;
+	__NK_ASSERT_DEBUG(iUsageCount > 0);
+	if (--iUsageCount == 0)
+		iPoolGroup.iFreeList.AddHead(&iLink);
+	iMutex->Signal();
+	}
+
+
+void DPageReadRequest::SetUseContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
+	{
+	__ASSERT_SYSTEM_LOCK;
+	__NK_ASSERT_DEBUG(aMemory != NULL && aCount <= EMaxPages);
+	__NK_ASSERT_DEBUG(iMemory == NULL);
+	iMemory = aMemory;
+	iIndex = aIndex;
+	iCount = aCount;
+	}
+
+
+void DPageReadRequest::ResetUse()
+	{
+	__ASSERT_SYSTEM_LOCK;
+	__NK_ASSERT_DEBUG(iMemory != NULL);
+	iMemory = NULL;
+	}
+
+
+ TBool DPageReadRequest::IsCollisionContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
+	{
+	__ASSERT_SYSTEM_LOCK;
+	return iMemory == aMemory && aIndex < iIndex + iCount && aIndex + aCount > iIndex;
+	}
+
+
+TBool DPageReadRequest::CheckUseContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
+	{
+	return iMemory == aMemory && iIndex == aIndex && iCount == aCount;
+	}
+
+
 //
 // DPageWriteRequest
 //
@@ -2576,8 +2647,7 @@
 DPageWriteRequest::DPageWriteRequest()
 	{
 	iMutex = ThePageCleaningLock;
-	// allocate space for mapping pages whilst they're being loaded...
-	iTempMapping.Alloc(KMaxPagesToClean);
+	iTempMapping.Alloc(EMaxPages);
 	}
 
 
@@ -2589,6 +2659,55 @@
 	}
 
 
+void DPageWriteRequest::SetUseDiscontiguous(DMemoryObject** aMemory, TUint* aIndex, TUint aCount)
+	{
+	__ASSERT_SYSTEM_LOCK;
+	__NK_ASSERT_DEBUG(iUseRegionCount == 0);
+	__NK_ASSERT_DEBUG(aCount > 0 && aCount <= EMaxPages);
+	for (TUint i = 0 ; i < aCount ; ++i)
+		{
+		iUseRegionMemory[i] = aMemory[i];
+		iUseRegionIndex[i] = aIndex[i];
+		}
+	iUseRegionCount = aCount;
+	}
+
+
+void DPageWriteRequest::ResetUse()
+	{
+	__ASSERT_SYSTEM_LOCK;
+	__NK_ASSERT_DEBUG(iUseRegionCount > 0);
+	iUseRegionCount = 0;
+	}
+
+
+TBool DPageWriteRequest::CheckUseContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
+	{
+	if (iUseRegionCount != aCount)
+		return EFalse;
+	for (TUint i = 0 ; i < iUseRegionCount ; ++i)
+		{
+		if (iUseRegionMemory[i] != aMemory || iUseRegionIndex[i] != aIndex + i)
+			return EFalse;
+		}
+	return ETrue;
+	}
+
+
+ TBool DPageWriteRequest::IsCollisionContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
+	{
+	// note this could be optimised as most of the time we will be checking read/read collusions,
+	// both of which will be contiguous
+	__ASSERT_SYSTEM_LOCK;
+	for (TUint i = 0 ; i < iUseRegionCount ; ++i)
+		{
+		if (iUseRegionMemory[i] == aMemory &&
+			TUint(iUseRegionIndex[i] - aIndex) < aCount)
+			return ETrue;
+		}
+	return EFalse;
+	}
+
 //
 // DPagingRequestPool
 //
@@ -2604,7 +2723,7 @@
 		TInt r = req->Construct();
 		__NK_ASSERT_ALWAYS(r==KErrNone);
 		iPageReadRequests.iRequests[i] = req;
-		iPageReadRequests.iFreeList.Add(req);
+		iPageReadRequests.iFreeList.Add(&req->iLink);
 		}
 
 	if (aWriteRequest)
@@ -2625,7 +2744,7 @@
 	{
 	NKern::LockSystem();
 
-	DPoolPagingRequest* req;
+	DPageReadRequest* req;
 	
 	// check for collision with existing write
 	if(iPageWriteRequest && iPageWriteRequest->IsCollisionContiguous(aMemory,aIndex,aCount))
@@ -2686,19 +2805,19 @@
 DPagingRequestPool::TGroup::TGroup(TUint aNumRequests)
 	{
 	iNumRequests = aNumRequests;
-	iRequests = new DPoolPagingRequest*[aNumRequests];
+	iRequests = new DPageReadRequest*[aNumRequests];
 	__NK_ASSERT_ALWAYS(iRequests);
 	}
 
 
-DPoolPagingRequest* DPagingRequestPool::TGroup::FindCollisionContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
+DPageReadRequest* DPagingRequestPool::TGroup::FindCollisionContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
 	{
 	__ASSERT_SYSTEM_LOCK;
-	DPoolPagingRequest** ptr = iRequests;
-	DPoolPagingRequest** ptrEnd = ptr+iNumRequests;
+	DPageReadRequest** ptr = iRequests;
+	DPageReadRequest** ptrEnd = ptr+iNumRequests;
 	while(ptr<ptrEnd)
 		{
-		DPoolPagingRequest* req = *ptr++;
+		DPageReadRequest* req = *ptr++;
 		if(req->IsCollisionContiguous(aMemory,aIndex,aCount))
 			return req;
 		}
@@ -2708,20 +2827,21 @@
 
 static TUint32 RandomSeed = 33333;
 
-DPoolPagingRequest* DPagingRequestPool::TGroup::GetRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
+DPageReadRequest* DPagingRequestPool::TGroup::GetRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
 	{
 	__NK_ASSERT_DEBUG(iNumRequests > 0);
 
 	// try using an existing request which collides with this region...
-	DPoolPagingRequest* req  = FindCollisionContiguous(aMemory,aIndex,aCount);
+	DPageReadRequest* req  = FindCollisionContiguous(aMemory,aIndex,aCount);
 	if(!req)
 		{
 		// use a free request...
-		req = (DPoolPagingRequest*)iFreeList.GetFirst();
-		if(req)
+		SDblQueLink* first = iFreeList.GetFirst();
+		if(first)
 			{
 			// free requests aren't being used...
-			__NK_ASSERT_DEBUG(req->iUsageCount == 0);
+			req = _LOFF(first, DPageReadRequest, iLink);
+			__NK_ASSERT_DEBUG(req->ThreadsWaiting() == 0);
 			}
 		else
 			{
@@ -2729,7 +2849,7 @@
 			RandomSeed = RandomSeed*69069+1; // next 'random' number
 			TUint index = (TUint64(RandomSeed) * TUint64(iNumRequests)) >> 32;
 			req = iRequests[index];
-			__NK_ASSERT_DEBUG(req->iUsageCount > 0); // we only pick random when none are free
+			__NK_ASSERT_DEBUG(req->ThreadsWaiting() > 0); // we only pick random when none are free
 			}
 		}
 
@@ -2740,17 +2860,6 @@
 	}
 
 
-void DPagingRequestPool::TGroup::Signal(DPoolPagingRequest* aRequest)
-	{
-	// if there are no threads waiting on the mutex then return it to the free pool...
-	__NK_ASSERT_DEBUG(aRequest->iUsageCount > 0);
-	if (--aRequest->iUsageCount==0)
-		iFreeList.AddHead(aRequest);
-
-	aRequest->iMutex->Signal();
-	}
-
-
 /**
 Register the specified paging device with the kernel.