kernel/eka/memmodel/epoc/flexible/mmu/mpager.cpp
branchRCL_3
changeset 26 c734af59ce98
parent 24 41f0cfe18c80
child 28 5b5d147c7838
--- a/kernel/eka/memmodel/epoc/flexible/mmu/mpager.cpp	Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/memmodel/epoc/flexible/mmu/mpager.cpp	Tue May 11 17:28:22 2010 +0300
@@ -27,14 +27,14 @@
 #include "mpagearray.h"
 #include "mswap.h"
 #include "mthrash.h"
+#include "mpagecleaner.h"
+
 #include "cache_maintenance.inl"
 
 
 const TUint16 KDefaultYoungOldRatio = 3;
 const TUint16 KDefaultMinPages = 256;
-#ifdef _USE_OLDEST_LISTS
 const TUint16 KDefaultOldOldestRatio = 3;
-#endif
 
 const TUint KMinOldPages = 1;
 
@@ -43,18 +43,24 @@
 */
 const TUint	KAbsoluteMaxPageCount = (1u<<(32-KPageShift))-1u;
 
-
+/*
+Limit the maximum number of oldest pages to bound the time taken by SelectPagesToClean(), which is
+called with the MmuLock held.
+*/
+const TUint KMaxOldestPages = 32;
+
+static DMutex* ThePageCleaningLock = NULL;
 
 DPager ThePager;
 
 
 DPager::DPager()
 	: iMinimumPageCount(0), iMaximumPageCount(0), iYoungOldRatio(0),
-	  iYoungCount(0),iOldCount(0),
-#ifdef _USE_OLDEST_LISTS
-	  iOldestCleanCount(0),
-#endif
+	  iYoungCount(0), iOldCount(0), iOldestCleanCount(0),
 	  iNumberOfFreePages(0), iReservePageCount(0), iMinimumPageLimit(0)
+#ifdef __DEMAND_PAGING_BENCHMARKS__
+	, iBenchmarkLock(TSpinLock::EOrderGenericIrqHigh3)
+#endif	  
 	{
 	}
 
@@ -102,13 +108,8 @@
 
 #ifdef __SMP__
 	// Adjust min page count so that all CPUs are guaranteed to make progress.
-	// NOTE: Can't use NKern::NumberOfCpus here because we haven't finished booting yet and will
-	// always have only one CPU running at this point...
-
-	// TODO: Before we can enable this the base test configuration needs
-	// updating to have a sufficient minimum page size...
-	//
-	// iMinYoungPages *= KMaxCpus;
+	TInt numberOfCpus = NKern::NumberOfCpus();
+	iMinYoungPages *= numberOfCpus;
 #endif
 
 	// A minimum young/old ratio of 1 means that we need at least twice iMinYoungPages pages...
@@ -123,11 +124,9 @@
 	iYoungOldRatio = KDefaultYoungOldRatio;
 	if(config.iYoungOldRatio)
 		iYoungOldRatio = config.iYoungOldRatio;
-#ifdef _USE_OLDEST_LISTS
 	iOldOldestRatio = KDefaultOldOldestRatio;
 	if(config.iSpare[2])
 		iOldOldestRatio = config.iSpare[2];
-#endif
 
 	// Set the minimum page counts...
 	iMinimumPageLimit = iMinYoungPages * (1 + iYoungOldRatio) / iYoungOldRatio
@@ -161,7 +160,6 @@
 		iMaximumPageCount = KAbsoluteMaxPageCount;
 	iInitMaximumPageCount = iMaximumPageCount;
 
-
 	TRACEB(("DPager::InitCache() live list min=%d max=%d ratio=%d",iMinimumPageCount,iMaximumPageCount,iYoungOldRatio));
 
 	// Verify the page counts are valid.
@@ -179,11 +177,9 @@
 	TInt ratioLimit = (iMinimumPageCount-KMinOldPages)/KMinOldPages;
 	__NK_ASSERT_ALWAYS(iYoungOldRatio <= ratioLimit);
 
-#ifdef _USE_OLDEST_LISTS
 	// There should always be enough old pages to allow the oldest lists ratio.
 	TUint oldestCount = minOldAndOldest / (1 + iOldOldestRatio);
 	__NK_ASSERT_ALWAYS(oldestCount);
-#endif
 
 	iNumberOfFreePages = 0;
 	iNumberOfDirtyPages = 0;
@@ -193,13 +189,9 @@
 	// old list so don't allocate them again.
 	RamAllocLock::Lock();
 	iYoungCount = 0;
-#ifdef _USE_OLDEST_LISTS
 	iOldCount = 0;
 	iOldestDirtyCount = 0;
 	__NK_ASSERT_DEBUG(iOldestCleanCount == iReservePageCount);
-#else
-	__NK_ASSERT_DEBUG(iOldCount == iReservePageCount);
-#endif
 	Mmu& m = TheMmu;
 	for(TUint i = iReservePageCount; i < iMinimumPageCount; i++)
 		{
@@ -216,11 +208,7 @@
 	RamAllocLock::Unlock();
 
 	__NK_ASSERT_DEBUG(CacheInitialised());
-#ifdef _USE_OLDEST_LISTS
 	TRACEB(("DPager::InitCache() end with young=%d old=%d oldClean=%d oldDirty=%d min=%d free=%d max=%d",iYoungCount,iOldCount,iOldestCleanCount,iOldestDirtyCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
-#else
-	TRACEB(("DPager::InitCache() end with young=%d old=%d min=%d free=%d max=%d",iYoungCount,iOldCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
-#endif
 	}
 
 
@@ -250,17 +238,12 @@
 		return EFalse;
 	if (!CheckList(&iYoungList.iA, iYoungCount))
 		return EFalse;
-
-#ifdef _USE_OLDEST_LISTS
 	if (!CheckList(&iOldestCleanList.iA, iOldestCleanCount))
 		return EFalse;
 	if (!CheckList(&iOldestDirtyList.iA, iOldestDirtyCount))
 		return EFalse;
 	TRACEP(("DP: y=%d o=%d oc=%d od=%d f=%d", iYoungCount, iOldCount, 
 			iOldestCleanCount, iOldestDirtyCount, iNumberOfFreePages));
-#else
-	TRACEP(("DP: y=%d o=%d f=%d", iYoungCount, iOldCount, iNumberOfFreePages));
-#endif //#ifdef _USE_OLDEST_LISTS
 	TraceCounts();
 #endif // #ifdef FMM_PAGER_CHECK_LISTS
 	return true;
@@ -268,16 +251,10 @@
 
 void DPager::TraceCounts()
 	{
-#ifdef _USE_OLDEST_LISTS
 	TRACEP(("DP: y=%d o=%d oc=%d od=%d f=%d min=%d max=%d ml=%d res=%d",
 		iYoungCount, iOldCount, iOldestCleanCount, iOldestDirtyCount, 
 		iNumberOfFreePages, iMinimumPageCount, iMaximumPageCount,
 		iMinimumPageLimit, iReservePageCount));
-#else
-	TRACEP(("DP: y=%d o=%d f=%d min=%d max=%d ml=%d res=%d",
-		iYoungCount, iOldCount, iNumberOfFreePages, iMinimumPageCount,
-		iMaximumPageCount, iMinimumPageLimit, iReservePageCount));
-#endif //#ifdef _USE_OLDEST_LISTS
 	}
 #endif //#ifdef _DEBUG
 
@@ -320,15 +297,9 @@
 	__NK_ASSERT_DEBUG(aPageInfo->PagedState()==SPageInfo::EUnpaged);
 
 	// add as oldest page...
-#ifdef _USE_OLDEST_LISTS
 	aPageInfo->SetPagedState(SPageInfo::EPagedOldestClean);
 	iOldestCleanList.Add(&aPageInfo->iLink);
 	++iOldestCleanCount;
-#else
-	aPageInfo->SetPagedState(SPageInfo::EPagedOld);
-	iOldList.Add(&aPageInfo->iLink);
-	++iOldCount;
-#endif
 
 	Event(EEventPageInFree,aPageInfo);
 	}
@@ -357,7 +328,6 @@
 		--iOldCount;
 		break;
 
-#ifdef _USE_OLDEST_LISTS
 	case SPageInfo::EPagedOldestClean:
 		__NK_ASSERT_DEBUG(iOldestCleanCount);
 		aPageInfo->iLink.Deque();
@@ -369,7 +339,6 @@
 		aPageInfo->iLink.Deque();
 		--iOldestDirtyCount;
 		break;
-#endif
 
 	case SPageInfo::EPagedPinned:
 		// this can occur if a pinned mapping is being unmapped when memory is decommitted.
@@ -392,7 +361,10 @@
 
 	// Update the dirty page count as required...
 	if (aPageInfo->IsDirty())
+		{
+		aPageInfo->SetReadOnly();
 		SetClean(*aPageInfo);
+		}
 
 	if (iNumberOfFreePages > 0)
 		{// The paging cache is not at the minimum size so safe to let the 
@@ -403,15 +375,9 @@
 		}
 	// Need to hold onto this page as have reached the page cache limit.
 	// add as oldest page...
-#ifdef _USE_OLDEST_LISTS
 	aPageInfo->SetPagedState(SPageInfo::EPagedOldestClean);
 	iOldestCleanList.Add(&aPageInfo->iLink);
 	++iOldestCleanCount;
-#else
-	aPageInfo->SetPagedState(SPageInfo::EPagedOld);
-	iOldList.Add(&aPageInfo->iLink);
-	++iOldCount;
-#endif
 
 	return KErrNone;
 	}
@@ -438,7 +404,6 @@
 		--iOldCount;
 		break;
 
-#ifdef _USE_OLDEST_LISTS
 	case SPageInfo::EPagedOldestClean:
 		__NK_ASSERT_DEBUG(iOldestCleanCount);
 		aPageInfo->iLink.Deque();
@@ -450,7 +415,6 @@
 		aPageInfo->iLink.Deque();
 		--iOldestDirtyCount;
 		break;
-#endif
 
 	case SPageInfo::EPagedPinned:
 		__NK_ASSERT_DEBUG(0);
@@ -521,52 +485,253 @@
 	}
 
 
-SPageInfo* DPager::StealOldestPage()
+TInt DPager::TryStealOldestPage(SPageInfo*& aPageInfoOut)
 	{
 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
 
+	// find oldest page in list...
+	SDblQueLink* link;
+	if (iOldestCleanCount)
+		{
+		__NK_ASSERT_DEBUG(!iOldestCleanList.IsEmpty());
+		link = iOldestCleanList.Last();
+		}
+	else if (iOldestDirtyCount)
+		{
+		__NK_ASSERT_DEBUG(!iOldestDirtyList.IsEmpty());
+		link = iOldestDirtyList.Last();
+		}
+	else if (iOldCount)
+		{
+		__NK_ASSERT_DEBUG(!iOldList.IsEmpty());
+		link = iOldList.Last();
+		}
+	else
+		{
+		__NK_ASSERT_DEBUG(iYoungCount);
+		__NK_ASSERT_ALWAYS(!iYoungList.IsEmpty());
+		link = iYoungList.Last();
+		}
+	SPageInfo* pageInfo = SPageInfo::FromLink(link);
+
+	if (pageInfo->IsDirty() && !PageCleaningLock::IsHeld())
+		return 1;
+
+	// try to steal it from owning object...
+	TInt r = StealPage(pageInfo);	
+	if (r == KErrNone)
+		{
+		BalanceAges();
+		aPageInfoOut = pageInfo;
+		}
+	
+	return r;
+	}
+
+
+SPageInfo* DPager::StealOldestPage()
+	{
+	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
+	TBool pageCleaningLockHeld = EFalse;
 	for(;;)
 		{
-		// find oldest page in list...
-		SDblQueLink* link;
-#ifdef _USE_OLDEST_LISTS
-		if (iOldestCleanCount)
+		SPageInfo* pageInfo = NULL;
+		TInt r = TryStealOldestPage(pageInfo);
+		
+		if (r == KErrNone)
 			{
-			__NK_ASSERT_DEBUG(!iOldestCleanList.IsEmpty());
-			link = iOldestCleanList.Last();
+			if (pageCleaningLockHeld)
+				{
+				MmuLock::Unlock();
+				PageCleaningLock::Unlock();
+				MmuLock::Lock();
+				}
+			return pageInfo;
+			}
+		else if (r == 1)
+			{
+			__NK_ASSERT_ALWAYS(!pageCleaningLockHeld);
+			MmuLock::Unlock();
+			PageCleaningLock::Lock();
+			MmuLock::Lock();
+			pageCleaningLockHeld = ETrue;
 			}
-		else if (iOldestDirtyCount)
+		// else retry...
+		}
+	}
+
+#ifdef __CPU_CACHE_HAS_COLOUR
+
+template <class T, TInt maxObjects> class TSequentialColourSelector
+	{
+public:
+	static const TInt KMaxLength = maxObjects;
+	static const TInt KArrayLength = _ALIGN_UP(KMaxLength, KPageColourCount);
+	
+	FORCE_INLINE TSequentialColourSelector()
+		{
+		memclr(this, sizeof(*this));
+		}
+
+	FORCE_INLINE TBool FoundLongestSequence()
+		{
+		return iLongestLength >= KMaxLength;
+		}
+
+	FORCE_INLINE void AddCandidate(T* aObject, TInt aColour)
+		{
+		// allocate objects to slots based on colour
+		for (TInt i = aColour ; i < KArrayLength ; i += KPageColourCount)
 			{
-			__NK_ASSERT_DEBUG(!iOldestDirtyList.IsEmpty());
-			link = iOldestDirtyList.Last();
+			if (!iSlot[i])
+				{
+				iSlot[i] = aObject;
+				iSeqLength[i] = i == 0 ? 1 : iSeqLength[i - 1] + 1;
+				TInt j = i + 1;
+				while(j < KArrayLength && iSeqLength[j])
+					iSeqLength[j++] += iSeqLength[i];
+				TInt currentLength = iSeqLength[j - 1];
+				if (currentLength > iLongestLength)
+					{
+					iLongestLength = currentLength;
+					iLongestStart = j - currentLength;
+					}
+				break;
+				}
 			}
-		else if (iOldCount)
-#else
-		if (iOldCount)
-#endif
+		}
+
+	FORCE_INLINE TInt FindLongestRun(T** aObjectsOut)
+		{
+		if (iLongestLength == 0)
+			return 0;
+
+		if (iLongestLength < KMaxLength && iSlot[0] && iSlot[KArrayLength - 1])
 			{
-			__NK_ASSERT_DEBUG(!iOldList.IsEmpty());
-			link = iOldList.Last();
-			}
-		else
+			// check possibility of wrapping
+
+			TInt i = 1;
+			while (iSlot[i]) ++i;  // find first hole
+			TInt wrappedLength = iSeqLength[KArrayLength - 1] + iSeqLength[i - 1];
+			if (wrappedLength > iLongestLength)
+				{
+				iLongestLength = wrappedLength;
+				iLongestStart = KArrayLength - iSeqLength[KArrayLength - 1];
+				}
+			}		
+
+		iLongestLength = Min(iLongestLength, KMaxLength);
+
+		__NK_ASSERT_DEBUG(iLongestStart >= 0 && iLongestStart < KArrayLength);
+		__NK_ASSERT_DEBUG(iLongestStart + iLongestLength < 2 * KArrayLength);
+
+		TInt len = Min(iLongestLength, KArrayLength - iLongestStart);
+		wordmove(aObjectsOut, &iSlot[iLongestStart], len * sizeof(T*));
+		wordmove(aObjectsOut + len, &iSlot[0], (iLongestLength - len) * sizeof(T*));
+		
+		return iLongestLength;
+		}
+
+private:
+	T* iSlot[KArrayLength];
+	TInt8 iSeqLength[KArrayLength];
+	TInt iLongestStart;
+	TInt iLongestLength;
+	};
+
+TInt DPager::SelectPagesToClean(SPageInfo** aPageInfosOut)
+	{
+	// select up to KMaxPagesToClean oldest dirty pages with sequential page colours
+	
+	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
+
+	TSequentialColourSelector<SPageInfo, KMaxPagesToClean> selector;
+
+	SDblQueLink* link = iOldestDirtyList.Last();
+	while (link != &iOldestDirtyList.iA)
+		{
+		SPageInfo* pi = SPageInfo::FromLink(link);
+		if (!pi->IsWritable())  
 			{
-			__NK_ASSERT_DEBUG(iYoungCount);
-			__NK_ASSERT_ALWAYS(!iYoungList.IsEmpty());
-			link = iYoungList.Last();
+			// the page may be in the process of being restricted, stolen or decommitted, but don't
+			// check for this as it will occur infrequently and will be detected by CheckModified
+			// anyway
+			TInt colour = pi->Index() & KPageColourMask;
+			selector.AddCandidate(pi, colour);
+			if (selector.FoundLongestSequence())
+				break;
 			}
-		SPageInfo* pageInfo = SPageInfo::FromLink(link);
-
-		// steal it from owning object...
-		TInt r = StealPage(pageInfo);
-
-		BalanceAges();
-
-		if(r==KErrNone)
-			return pageInfo; // done
-
-		// loop back and try again
+		link = link->iPrev;
+		}
+	
+	return selector.FindLongestRun(aPageInfosOut);
+	}
+
+#else
+
+TInt DPager::SelectPagesToClean(SPageInfo** aPageInfosOut)
+	{
+	// no page colouring restrictions, so just take up to KMaxPagesToClean oldest dirty pages
+	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
+	TInt pageCount = 0;
+	SDblQueLink* link = iOldestDirtyList.Last();
+	while (link != &iOldestDirtyList.iA && pageCount < KMaxPagesToClean)
+		{
+		SPageInfo* pi = SPageInfo::FromLink(link);
+		if (!pi->IsWritable())
+			{
+			// the page may be in the process of being restricted, stolen or decommitted, but don't
+			// check for this as it will occur infrequently and will be detected by CheckModified
+			// anyway
+			aPageInfosOut[pageCount++] = pi;
+			}
+		link = link->iPrev;
 		}
+	return pageCount;
+	}
+
+#endif
+
+
+TInt DPager::CleanSomePages(TBool aBackground)
+	{
+	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
+	__NK_ASSERT_DEBUG(PageCleaningLock::IsHeld());
+	// ram alloc lock may or may not be held
+
+	SPageInfo* pageInfos[KMaxPagesToClean];
+	TInt pageCount = SelectPagesToClean(&pageInfos[0]);
+	
+	if (pageCount == 0)
+		return 0;
+	
+	TheDataPagedMemoryManager->CleanPages(pageCount, pageInfos, aBackground);
+
+	for (TInt i = 0 ; i < pageCount ; ++i)
+		{
+		SPageInfo* pi = pageInfos[i];
+		if (pi)
+			{
+			__NK_ASSERT_DEBUG(pi->PagedState() == SPageInfo::EPagedOldestDirty && iOldestDirtyCount);
+			__NK_ASSERT_DEBUG(!pi->IsDirty() && !pi->IsWritable());
+		
+			pi->iLink.Deque();
+			iOldestCleanList.AddHead(&pi->iLink);
+			--iOldestDirtyCount;
+			++iOldestCleanCount;
+			pi->SetPagedState(SPageInfo::EPagedOldestClean);
+			}
+		}
+
+	return pageCount;
+	}
+
+
+TBool DPager::HasPagesToClean()
+	{
+	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
+	return iOldestDirtyCount > 0;
 	}
 
 
@@ -647,97 +812,158 @@
 	}
 
 
+TInt DPager::DiscardAndAllocPage(SPageInfo* aPageInfo, TZonePageType aPageType)
+	{
+	TInt r = DiscardPage(aPageInfo, KRamZoneInvalidId, EFalse);
+	if (r == KErrNone)
+		{
+		TheMmu.MarkPageAllocated(aPageInfo->PhysAddr(), aPageType);
+		}
+	// Flash the ram alloc lock as we may have had to write a page out to swap.
+	RamAllocLock::Unlock();
+	RamAllocLock::Lock();
+	return r;
+	}
+
+
+static TBool DiscardCanStealPage(SPageInfo* aOldPageInfo, TBool aBlockRest)
+	{
+ 	// If the page is pinned or if the page is dirty and a general defrag is being performed then
+	// don't attempt to steal it
+	return aOldPageInfo->Type() == SPageInfo::EUnused ||
+		(aOldPageInfo->PagedState() != SPageInfo::EPagedPinned && (!aBlockRest || !aOldPageInfo->IsDirty()));	
+	}
+
+
 TInt DPager::DiscardPage(SPageInfo* aOldPageInfo, TUint aBlockZoneId, TBool aBlockRest)
 	{
+	// todo: assert MmuLock not released
+	
+	TRACE(("> DPager::DiscardPage %08x", aOldPageInfo));
+	
 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
 
-	TInt r;
-	// If the page is pinned or if the page is dirty and a general defrag is being 
-	// performed then don't attempt to steal it.
-	if (aOldPageInfo->Type() != SPageInfo::EUnused && 
-		(aOldPageInfo->PagedState() == SPageInfo::EPagedPinned ||
-		(aBlockRest && aOldPageInfo->IsDirty())))
-		{// The page is pinned or is dirty and this is a general defrag so move the page.
+	if (!DiscardCanStealPage(aOldPageInfo, aBlockRest))
+		{
+		// The page is pinned or is dirty and this is a general defrag so move the page.
 		DMemoryObject* memory = aOldPageInfo->Owner();
 		// Page must be managed if it is pinned or dirty.
 		__NK_ASSERT_DEBUG(aOldPageInfo->Type()==SPageInfo::EManaged);
 		__NK_ASSERT_DEBUG(memory);
 		MmuLock::Unlock();
 		TPhysAddr newAddr;
-		return memory->iManager->MovePage(memory, aOldPageInfo, newAddr, aBlockZoneId, aBlockRest);
+		TRACE2(("DPager::DiscardPage delegating pinned/dirty page to manager"));
+		TInt r = memory->iManager->MovePage(memory, aOldPageInfo, newAddr, aBlockZoneId, aBlockRest);
+		TRACE(("< DPager::DiscardPage %d", r));
+		return r;
 		}
 
-	if (!iNumberOfFreePages)
+	TInt r = KErrNone;
+	SPageInfo* newPageInfo = NULL;
+	TBool havePageCleaningLock = EFalse;
+
+	TBool needNewPage;
+	TBool needPageCleaningLock;
+	while(needNewPage = (iNumberOfFreePages == 0 && newPageInfo == NULL),
+		  needPageCleaningLock = (aOldPageInfo->IsDirty() && !havePageCleaningLock),
+		  needNewPage || needPageCleaningLock)
 		{
-		// Allocate a new page for the live list as it has reached its minimum size.
 		MmuLock::Unlock();
-		SPageInfo* newPageInfo = GetPageFromSystem((Mmu::TRamAllocFlags)(EMemAttNormalCached|Mmu::EAllocNoWipe),
-													aBlockZoneId, aBlockRest);
-		 if (!newPageInfo)
-			return KErrNoMemory;
+
+		if (needNewPage)
+			{
+			// Allocate a new page for the live list as it has reached its minimum size.
+			TUint flags = EMemAttNormalCached | Mmu::EAllocNoWipe;
+			newPageInfo = GetPageFromSystem((Mmu::TRamAllocFlags)flags, aBlockZoneId, aBlockRest);
+			if (!newPageInfo)
+				{
+				TRACE(("< DPager::DiscardPage KErrNoMemory"));
+				r = KErrNoMemory;
+				MmuLock::Lock();
+				break;
+				}
+			}
+
+		if (needPageCleaningLock)
+			{
+			// Acquire the page cleaning mutex so StealPage can clean it
+			PageCleaningLock::Lock();
+			havePageCleaningLock = ETrue;
+			}
 
 		// Re-acquire the mmulock and re-check that the page is not pinned or dirty.
 		MmuLock::Lock();
-		if (aOldPageInfo->Type() != SPageInfo::EUnused && 
-			(aOldPageInfo->PagedState() == SPageInfo::EPagedPinned ||
-			(aBlockRest && aOldPageInfo->IsDirty())))
-			{// Page is now pinned or dirty so give up as it is inuse.
-			ReturnPageToSystem(*newPageInfo);
-			MmuLock::Unlock();
-			return KErrInUse;
-			}
-
-		// Attempt to steal the page
-		r = StealPage(aOldPageInfo);
-		__NK_ASSERT_DEBUG(MmuLock::IsHeld());
-
-		if (r == KErrCompletion)
-			{// This was a page table that has been freed but added to the 
-			// live list as a free page.  Remove from live list and continue.
-			__NK_ASSERT_DEBUG(!aOldPageInfo->IsDirty());
-			RemovePage(aOldPageInfo);
-			r = KErrNone;
-			}
-
-		if (r == KErrNone)
-			{// Add the new page to the live list as discarding the old page 
-			// will reduce the live list below the minimum.
-			AddAsFreePage(newPageInfo);
-			// We've successfully discarded the page so return it to the free pool.
-			ReturnPageToSystem(*aOldPageInfo);
-			BalanceAges();
-			}
-		 else
+		if (!DiscardCanStealPage(aOldPageInfo, aBlockRest))
 			{
-			// New page not required so just return it to the system.  This is safe as 
-			// iNumberOfFreePages will have this page counted but as it is not on the live list
-			// noone else can touch it.
-			ReturnPageToSystem(*newPageInfo);
+			// Page is now pinned or dirty so give up as it is in use.
+			r = KErrInUse;
+			break;
 			}
 		}
-	else
+
+	if (r == KErrNone)
 		{
 		// Attempt to steal the page
-		r = StealPage(aOldPageInfo);
-
-		__NK_ASSERT_DEBUG(MmuLock::IsHeld());
-
-		if (r == KErrCompletion)
-			{// This was a page table that has been freed but added to the 
-			// live list as a free page.  Remove from live list.
-			__NK_ASSERT_DEBUG(!aOldPageInfo->IsDirty());
-			RemovePage(aOldPageInfo);
-			r = KErrNone;
+		r = StealPage(aOldPageInfo);  // temporarily releases MmuLock if page is dirty
+		}
+	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
+
+	if (r == KErrCompletion)
+		{// This was a page table that has been freed but added to the 
+		// live list as a free page.  Remove from live list and continue.
+		__NK_ASSERT_DEBUG(!aOldPageInfo->IsDirty());
+		RemovePage(aOldPageInfo);
+		r = KErrNone;
+		}
+
+	if (r == KErrNone && iNumberOfFreePages == 0)
+		{
+		if (newPageInfo)
+			{
+			// Add a new page to the live list if we have one as discarding the old page will reduce
+			// the live list below the minimum.
+			AddAsFreePage(newPageInfo);
+			newPageInfo = NULL;
 			}
-
-		if (r == KErrNone)
-			{// We've successfully discarded the page so return it to the free pool.
-			ReturnPageToSystem(*aOldPageInfo);
-			BalanceAges();
+		else
+			{
+			// Otherwise the live list shrank when page was being cleaned so have to give up
+			AddAsFreePage(aOldPageInfo);
+			BalanceAges();                  // temporarily releases MmuLock
+			r = KErrInUse;
 			}
 		}
+
+	if (r == KErrNone)
+		{
+		// We've successfully discarded the page and ensured the live list is large enough, so
+		// return it to the free pool.
+		ReturnPageToSystem(*aOldPageInfo);  // temporarily releases MmuLock
+		BalanceAges();                      // temporarily releases MmuLock
+		}
+
+	if (newPageInfo)
+		{
+		// New page not required so just return it to the system.  This is safe as
+		// iNumberOfFreePages will have this page counted but as it is not on the live list noone
+		// else can touch it.
+		if (iNumberOfFreePages == 0)
+			AddAsFreePage(newPageInfo);
+		else
+			ReturnPageToSystem(*newPageInfo);   // temporarily releases MmuLock
+		}
+
+	if (havePageCleaningLock)
+		{
+		// Release the page cleaning mutex
+		MmuLock::Unlock();
+		PageCleaningLock::Unlock();
+		MmuLock::Lock();
+		}	
+	
 	MmuLock::Unlock();
+	TRACE(("< DPager::DiscardPage returns %d", r));
 	return r;	
 	}
 
@@ -793,6 +1019,9 @@
 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
 
+	// should be unpaged at this point, otherwise Mmu::FreeRam will just give it back to us
+	__NK_ASSERT_DEBUG(aPageInfo.PagedState() == SPageInfo::EUnpaged);
+
 	__NK_ASSERT_DEBUG(iNumberOfFreePages>0);
 	--iNumberOfFreePages;
 
@@ -810,28 +1039,22 @@
 
 SPageInfo* DPager::PageInAllocPage(Mmu::TRamAllocFlags aAllocFlags)
 	{
+	TBool pageCleaningLockHeld = EFalse;
 	SPageInfo* pageInfo;
 	TPhysAddr pagePhys;
-
+	TInt r = KErrGeneral;
+	
 	RamAllocLock::Lock();
 	MmuLock::Lock();
 
+find_a_page:
 	// try getting a free page from our live list...
-#ifdef _USE_OLDEST_LISTS
 	if (iOldestCleanCount)
 		{
 		pageInfo = SPageInfo::FromLink(iOldestCleanList.Last());
 		if(pageInfo->Type()==SPageInfo::EUnused)
-			goto get_oldest;
+			goto try_steal_oldest_page;
 		}
-#else
-	if(iOldCount)
-		{
-		pageInfo = SPageInfo::FromLink(iOldList.Last());
-		if(pageInfo->Type()==SPageInfo::EUnused)
-			goto get_oldest;
-		}
-#endif
 
 	// try getting a free page from the system pool...
 	if(!HaveMaximumPages())
@@ -843,14 +1066,62 @@
 		MmuLock::Lock();
 		}
 
+	// try stealing a clean page...
+	if (iOldestCleanCount)
+		goto try_steal_oldest_page;
+
+	// see if we can clean multiple dirty pages in one go...
+	if (KMaxPagesToClean > 1 && iOldestDirtyCount > 1)
+		{
+		// if we don't hold the page cleaning mutex then temporarily release ram alloc mutex and
+		// acquire page cleaning mutex; if we hold it already just proceed
+		if (!pageCleaningLockHeld)
+			{
+			MmuLock::Unlock();
+			RamAllocLock::Unlock();
+			PageCleaningLock::Lock();			
+			MmuLock::Lock();
+			}
+		
+		// there may be clean pages now if we've waited on the page cleaning mutex, if so don't
+		// bother cleaning but just restart
+		if (iOldestCleanCount == 0)
+			CleanSomePages(EFalse);
+		
+		if (!pageCleaningLockHeld)
+			{
+			MmuLock::Unlock();
+			PageCleaningLock::Unlock();			
+			RamAllocLock::Lock();
+			MmuLock::Lock();
+			}
+		
+		if (iOldestCleanCount > 0)
+			goto find_a_page;
+		}
+
 	// as a last resort, steal a page from the live list...
-get_oldest:
-#ifdef _USE_OLDEST_LISTS
+	
+try_steal_oldest_page:
 	__NK_ASSERT_ALWAYS(iOldestCleanCount|iOldestDirtyCount|iOldCount|iYoungCount);
-#else
-	__NK_ASSERT_ALWAYS(iOldCount|iYoungCount);
-#endif
-	pageInfo = StealOldestPage();
+	r = TryStealOldestPage(pageInfo);
+	// if this fails we restart whole process
+	if (r < KErrNone)
+		goto find_a_page;
+
+	// if we need to clean, acquire page cleaning mutex for life of this function
+	if (r == 1)
+		{
+		__NK_ASSERT_ALWAYS(!pageCleaningLockHeld);
+		MmuLock::Unlock();
+		PageCleaningLock::Lock();
+		MmuLock::Lock();
+		pageCleaningLockHeld = ETrue;
+		goto find_a_page;		
+		}
+
+	// otherwise we're done!
+	__NK_ASSERT_DEBUG(r == KErrNone);
 	MmuLock::Unlock();
 
 	// make page state same as a freshly allocated page...
@@ -858,7 +1129,10 @@
 	TheMmu.PagesAllocated(&pagePhys,1,aAllocFlags);
 
 done:
+	if (pageCleaningLockHeld)
+		PageCleaningLock::Unlock();
 	RamAllocLock::Unlock();
+
 	return pageInfo;
 	}
 
@@ -915,10 +1189,8 @@
 
 		case SPageInfo::EPagedYoung:
 		case SPageInfo::EPagedOld:
-#ifdef _USE_OLDEST_LISTS
 		case SPageInfo::EPagedOldestDirty:
 		case SPageInfo::EPagedOldestClean:
-#endif
 			continue; // discard already been allowed
 
 		case SPageInfo::EPagedPinned:
@@ -977,10 +1249,8 @@
 
 		case SPageInfo::EPagedYoung:
 		case SPageInfo::EPagedOld:
-#ifdef _USE_OLDEST_LISTS
 		case SPageInfo::EPagedOldestClean:
 		case SPageInfo::EPagedOldestDirty:
-#endif
 			changeType = ETrue;
 			break; // remove from live list
 
@@ -1046,6 +1316,7 @@
 	TheCodePagedMemoryManager->Init3();
 	TInt r = Kern::AddHalEntry(EHalGroupVM, VMHalFunction, 0);
 	__NK_ASSERT_ALWAYS(r==KErrNone);
+	PageCleaningLock::Init();
 	}
 
 
@@ -1060,12 +1331,8 @@
 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
 	TBool restrictPage = EFalse;
 	SPageInfo* pageInfo = NULL;
-#ifdef _USE_OLDEST_LISTS
 	TUint oldestCount = iOldestCleanCount + iOldestDirtyCount;
 	if((iOldCount + oldestCount) * iYoungOldRatio < iYoungCount)
-#else
-	if (iOldCount * iYoungOldRatio < iYoungCount)
-#endif
 		{
 		// Need more old pages so make one young page into an old page...
 		__NK_ASSERT_DEBUG(!iYoungList.IsEmpty());
@@ -1084,9 +1351,9 @@
 		restrictPage = ETrue;
 		}
 
-#ifdef _USE_OLDEST_LISTS
 	// Check we have enough oldest pages.
-	if (oldestCount * iOldOldestRatio < iOldCount)
+	if (oldestCount < KMaxOldestPages &&
+		oldestCount * iOldOldestRatio < iOldCount)
 		{
 		__NK_ASSERT_DEBUG(!iOldList.IsEmpty());
 		__NK_ASSERT_DEBUG(iOldCount);
@@ -1099,6 +1366,7 @@
 			oldestPageInfo->SetPagedState(SPageInfo::EPagedOldestDirty);
 			iOldestDirtyList.AddHead(link);
 			++iOldestDirtyCount;
+			PageCleaner::NotifyPagesToClean();
 			Event(EEventPageAgedDirty,oldestPageInfo);
 			}
 		else
@@ -1109,7 +1377,7 @@
 			Event(EEventPageAgedClean,oldestPageInfo);
 			}
 		}
-#endif
+
 	if (restrictPage)
 		{
 		// Make the recently aged old page inaccessible.  This is done last as it 
@@ -1144,10 +1412,8 @@
 		{
 	case SPageInfo::EPagedYoung:
 	case SPageInfo::EPagedOld:
-#ifdef _USE_OLDEST_LISTS
 	case SPageInfo::EPagedOldestClean:
 	case SPageInfo::EPagedOldestDirty:
-#endif
 		RemovePage(pi);
 		AddAsYoungestPage(pi);
 		BalanceAges();
@@ -1167,6 +1433,7 @@
 		}
 	}
 
+
 TInt DPager::PteAndInfoFromLinAddr(	TInt aOsAsid, TLinAddr aAddress, DMemoryMappingBase* aMapping, 
 									TUint aMapInstanceCount, TPte*& aPte, SPageInfo*& aPageInfo)
 	{
@@ -1192,11 +1459,13 @@
 	return KErrNone;
 	}
 
+
 TInt DPager::TryRejuvenate(	TInt aOsAsid, TLinAddr aAddress, TUint aAccessPermissions, TLinAddr aPc,
 							DMemoryMappingBase* aMapping, TUint aMapInstanceCount, DThread* aThread, 
 							TAny* aExceptionInfo)
 	{
 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
+	START_PAGING_BENCHMARK;
 
 	SPageInfo* pi;
 	TPte* pPte;
@@ -1292,12 +1561,8 @@
 	Event(EEventPageRejuvenate,pi,aPc,aAddress,aAccessPermissions);
 
 	TBool balance = false;
-#ifdef _USE_OLDEST_LISTS
 	if(	state==SPageInfo::EPagedYoung || state==SPageInfo::EPagedOld || 
 		state==SPageInfo::EPagedOldestClean || state==SPageInfo::EPagedOldestDirty)
-#else
-	if(state==SPageInfo::EPagedYoung || state==SPageInfo::EPagedOld)
-#endif
 		{
 		RemovePage(pi);
 		AddAsYoungestPage(pi);
@@ -1318,6 +1583,7 @@
 	if(balance)
 		BalanceAges();
 
+	END_PAGING_BENCHMARK(EPagingBmRejuvenate);
 	return KErrNone;
 	}
 
@@ -1349,10 +1615,8 @@
 			{
 		case SPageInfo::EPagedYoung:
 		case SPageInfo::EPagedOld:
-#ifdef _USE_OLDEST_LISTS
 		case SPageInfo::EPagedOldestClean:
 		case SPageInfo::EPagedOldestDirty:
-#endif
 			RemovePage(pi);
 			// fall through...
 		case SPageInfo::EUnpaged:
@@ -1386,10 +1650,8 @@
 		{
 	case SPageInfo::EPagedYoung:
 	case SPageInfo::EPagedOld:
-#ifdef _USE_OLDEST_LISTS
 	case SPageInfo::EPagedOldestClean:
 	case SPageInfo::EPagedOldestDirty:
-#endif
 		RemovePage(aPageInfo);
 		AddAsYoungestPage(aPageInfo);
 		BalanceAges();
@@ -1446,7 +1708,6 @@
 		__NK_ASSERT_DEBUG(aPageInfo->PinCount()==1);
 		break;
 
-#ifdef _USE_OLDEST_LISTS
 	case SPageInfo::EPagedOldestClean:
 		__NK_ASSERT_DEBUG(iOldestCleanCount);
 		aPageInfo->iLink.Deque();
@@ -1460,7 +1721,6 @@
 		--iOldestDirtyCount;
 		__NK_ASSERT_DEBUG(aPageInfo->PinCount()==1);
 		break;
-#endif
 
 	case SPageInfo::EPagedPinned:
 		// nothing more to do...
@@ -1749,7 +2009,7 @@
 
 	MmuLock::Lock();
 
-	__NK_ASSERT_ALWAYS(iYoungOldRatio!=0);
+	__NK_ASSERT_ALWAYS(iYoungOldRatio);
 
 	// Make sure aMinimumPageCount is not less than absolute minimum we can cope with...
 	iMinimumPageLimit = iMinYoungPages * (1 + iYoungOldRatio) / iYoungOldRatio
@@ -1830,10 +2090,12 @@
 	}
 
 
+// WARNING THIS METHOD MAY HOLD THE RAM ALLOC LOCK FOR EXCESSIVE PERIODS.  DON'T USE THIS IN ANY PRODUCTION CODE.
 void DPager::FlushAll()
 	{
 	NKern::ThreadEnterCS();
 	RamAllocLock::Lock();
+	PageCleaningLock::Lock();
 
 	TRACE(("DPager::FlushAll() live list young=%d old=%d min=%d free=%d max=%d",iYoungCount,iOldCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
 
@@ -1856,12 +2118,8 @@
 			do
 				{
 				SPageInfo::TPagedState state = pi->PagedState();
-#ifdef _USE_OLDEST_LISTS
 				if (state==SPageInfo::EPagedYoung || state==SPageInfo::EPagedOld ||
 					state==SPageInfo::EPagedOldestClean || state==SPageInfo::EPagedOldestDirty)
-#else
-				if(state==SPageInfo::EPagedYoung || state==SPageInfo::EPagedOld)
-#endif
 					{
 					if (pi->Type() != SPageInfo::EUnused)
 						{
@@ -1874,10 +2132,7 @@
 				++pi;
 				if(((TUint)pi&(0xf<<KPageInfoShift))==0)
 					{
-					MmuLock::Unlock(); // every 16 page infos
-					RamAllocLock::Unlock();
-					RamAllocLock::Lock();
-					MmuLock::Lock();
+					MmuLock::Flash(); // every 16 page infos
 					}
 				}
 			while(pi<piEnd);
@@ -1892,6 +2147,7 @@
 
 	TRACE(("DPager::FlushAll() end with young=%d old=%d min=%d free=%d max=%d",iYoungCount,iOldCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
 
+	PageCleaningLock::Unlock();
 	RamAllocLock::Unlock();
 	NKern::ThreadLeaveCS();
 	}
@@ -2066,9 +2322,8 @@
 		TUint index = (TInt) a1;
 		if (index >= EMaxPagingBm)
 			return KErrNotFound;
-		NKern::LockSystem();
-		SPagingBenchmarkInfo info = ThePager.iBenchmarkInfo[index];
-		NKern::UnlockSystem();
+		SPagingBenchmarkInfo info;
+		ThePager.ReadBenchmarkData((TPagingBenchmark)index, info);
 		kumemput32(a2,&info,sizeof(info));
 		}		
 		return KErrNone;
@@ -2078,9 +2333,7 @@
 		TUint index = (TInt) a1;
 		if (index >= EMaxPagingBm)
 			return KErrNotFound;
-		NKern::LockSystem();
 		ThePager.ResetBenchmarkData((TPagingBenchmark)index);
-		NKern::UnlockSystem();
 		}
 		return KErrNone;
 #endif
@@ -2096,28 +2349,39 @@
 void DPager::ResetBenchmarkData(TPagingBenchmark aBm)
     {
     SPagingBenchmarkInfo& info = iBenchmarkInfo[aBm];
+	__SPIN_LOCK_IRQ(iBenchmarkLock);
     info.iCount = 0;
     info.iTotalTime = 0;
     info.iMaxTime = 0;
     info.iMinTime = KMaxTInt;
+	__SPIN_UNLOCK_IRQ(iBenchmarkLock);
     }
  
-void DPager::RecordBenchmarkData(TPagingBenchmark aBm, TUint32 aStartTime, TUint32 aEndTime)
+void DPager::RecordBenchmarkData(TPagingBenchmark aBm, TUint32 aStartTime, TUint32 aEndTime, TUint aCount)
     {
     SPagingBenchmarkInfo& info = iBenchmarkInfo[aBm];
-    ++info.iCount;
 #if !defined(HIGH_RES_TIMER) || defined(HIGH_RES_TIMER_COUNTS_UP)
     TInt64 elapsed = aEndTime - aStartTime;
 #else
     TInt64 elapsed = aStartTime - aEndTime;
 #endif
+	__SPIN_LOCK_IRQ(iBenchmarkLock);
+    info.iCount +=  aCount;
     info.iTotalTime += elapsed;
     if (elapsed > info.iMaxTime)
         info.iMaxTime = elapsed;
     if (elapsed < info.iMinTime)
         info.iMinTime = elapsed;
+	__SPIN_UNLOCK_IRQ(iBenchmarkLock);
     }
 
+void DPager::ReadBenchmarkData(TPagingBenchmark aBm, SPagingBenchmarkInfo& aDataOut)
+	{
+	__SPIN_LOCK_IRQ(iBenchmarkLock);
+	aDataOut = iBenchmarkInfo[aBm];
+	__SPIN_UNLOCK_IRQ(iBenchmarkLock);
+	}
+
 #endif //__DEMAND_PAGING_BENCHMARKS__
 
 
@@ -2129,62 +2393,86 @@
 // DPagingRequest
 //
 
-DPagingRequest::DPagingRequest(DPagingRequestPool::TGroup& aPoolGroup)
-	: iPoolGroup(aPoolGroup), iUseRegionMemory(0), iUseRegionIndex(0), iUseRegionCount(0)
+DPagingRequest::DPagingRequest()
+	: iMutex(NULL), iUseRegionCount(0)
 	{
 	}
 
 
-FORCE_INLINE void DPagingRequest::SetUse(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
+void DPagingRequest::SetUseContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
 	{
 	__ASSERT_SYSTEM_LOCK;
-	iUseRegionMemory = aMemory;
-	iUseRegionIndex = aIndex;
+	__NK_ASSERT_DEBUG(iUseRegionCount == 0);
+	__NK_ASSERT_DEBUG(aCount > 0 && aCount <= EMaxPages);
+	for (TUint i = 0 ; i < aCount ; ++i)
+		{
+		iUseRegionMemory[i] = aMemory;
+		iUseRegionIndex[i] = aIndex + i;		
+		}
+	iUseRegionCount = aCount;
+	}
+
+
+void DPagingRequest::SetUseDiscontiguous(DMemoryObject** aMemory, TUint* aIndex, TUint aCount)
+	{
+	__ASSERT_SYSTEM_LOCK;
+	__NK_ASSERT_DEBUG(iUseRegionCount == 0);
+	__NK_ASSERT_DEBUG(aCount > 0 && aCount <= EMaxPages);
+	for (TUint i = 0 ; i < aCount ; ++i)
+		{
+		iUseRegionMemory[i] = aMemory[i];
+		iUseRegionIndex[i] = aIndex[i];
+		}
 	iUseRegionCount = aCount;
 	}
 
 
-TBool DPagingRequest::CheckUse(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
-	{
-	return aMemory==iUseRegionMemory
-		&& TUint(aIndex-iUseRegionIndex) < iUseRegionCount
-		&& TUint(iUseRegionCount-TUint(aIndex-iUseRegionIndex)) <= aCount;
-	}
-
-
-void DPagingRequest::Release()
-	{
-	NKern::LockSystem();
-	SetUse(0,0,0);
-	Signal();
-	}
-
-
-void DPagingRequest::Wait()
+void DPagingRequest::ResetUse()
 	{
 	__ASSERT_SYSTEM_LOCK;
-	++iUsageCount;
-	TInt r = iMutex->Wait();
-	__NK_ASSERT_ALWAYS(r == KErrNone);
+	__NK_ASSERT_DEBUG(iUseRegionCount > 0);
+	iUseRegionCount = 0;
 	}
 
 
-void DPagingRequest::Signal()
+TBool DPagingRequest::CheckUseContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
 	{
-	__ASSERT_SYSTEM_LOCK;
-	iPoolGroup.Signal(this);
+	if (iUseRegionCount != aCount)
+		return EFalse;
+	for (TUint i = 0 ; i < iUseRegionCount ; ++i)
+		{
+		if (iUseRegionMemory[i] != aMemory || iUseRegionIndex[i] != aIndex + i)
+			return EFalse;
+		}
+	return ETrue;
 	}
 
 
-FORCE_INLINE TBool DPagingRequest::IsCollision(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
+TBool DPagingRequest::CheckUseDiscontiguous(DMemoryObject** aMemory, TUint* aIndex, TUint aCount)
 	{
+	if (iUseRegionCount != aCount)
+		return EFalse;
+	for (TUint i = 0 ; i < iUseRegionCount ; ++i)
+		{
+		if (iUseRegionMemory[i] != aMemory[i] || iUseRegionIndex[i] != aIndex[i])
+			return EFalse;
+		}
+	return ETrue;
+	}
+
+
+ TBool DPagingRequest::IsCollisionContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
+	{
+	// note this could be optimised as most of the time we will be checking read/read collusions,
+	// both of which will be contiguous
 	__ASSERT_SYSTEM_LOCK;
-	DMemoryObject* memory = iUseRegionMemory;
-	TUint index = iUseRegionIndex;
-	TUint count = iUseRegionCount;
-	// note, this comparison would fail if either region includes page number KMaxTUint,
-	// but it isn't possible to create a memory object which is > KMaxTUint pages...
-	return (memory == aMemory) && ((index + count) > aIndex) && (index < (aIndex + aCount));
+	for (TUint i = 0 ; i < iUseRegionCount ; ++i)
+		{
+		if (iUseRegionMemory[i] == aMemory &&
+			TUint(iUseRegionIndex[i] - aIndex) < aCount)
+			return ETrue;
+		}
+	return EFalse;
 	}
 
 
@@ -2201,6 +2489,38 @@
 	iTempMapping.Unmap(aIMBRequired);
 	}
 
+//
+// DPoolPagingRequest
+//
+
+DPoolPagingRequest::DPoolPagingRequest(DPagingRequestPool::TGroup& aPoolGroup) :
+	iPoolGroup(aPoolGroup)
+	{
+	}
+
+
+void DPoolPagingRequest::Release()
+	{
+	NKern::LockSystem();
+	ResetUse();
+	Signal();
+	}
+
+
+void DPoolPagingRequest::Wait()
+	{
+	__ASSERT_SYSTEM_LOCK;
+	++iUsageCount;
+	TInt r = iMutex->Wait();
+	__NK_ASSERT_ALWAYS(r == KErrNone);
+	}
+
+
+void DPoolPagingRequest::Signal()
+	{
+	__ASSERT_SYSTEM_LOCK;
+	iPoolGroup.Signal(this);
+	}
 
 //
 // DPageReadRequest
@@ -2208,6 +2528,13 @@
 
 TInt DPageReadRequest::iAllocNext = 0;
 
+DPageReadRequest::DPageReadRequest(DPagingRequestPool::TGroup& aPoolGroup) :
+	DPoolPagingRequest(aPoolGroup)
+	{
+	// allocate space for mapping pages whilst they're being loaded...
+	iTempMapping.Alloc(EMaxPages);
+	}
+
 TInt DPageReadRequest::Construct()
 	{
 	// allocate id and mutex...
@@ -2219,9 +2546,6 @@
 	if(r!=KErrNone)
 		return r;
 
-	// allocate space for mapping pages whilst they're being loaded...
-	iTempMapping.Alloc(EMaxPages);
-
 	// create memory buffer...
 	TUint bufferSize = EMaxPages+1;
 	DMemoryObject* bufferMemory;
@@ -2248,23 +2572,20 @@
 // DPageWriteRequest
 //
 
-TInt DPageWriteRequest::iAllocNext = 0;
-
-TInt DPageWriteRequest::Construct()
+
+DPageWriteRequest::DPageWriteRequest()
 	{
-	// allocate id and mutex...
-	TUint id = (TUint)__e32_atomic_add_ord32(&iAllocNext, 1);
-	_LIT(KLitPagingRequest,"PageWriteRequest-");
-	TBuf<sizeof("PageWriteRequest-")+10> mutexName(KLitPagingRequest);
-	mutexName.AppendNum(id);
-	TInt r = K::MutexCreate(iMutex, mutexName, NULL, EFalse, KMutexOrdPageOut);
-	if(r!=KErrNone)
-		return r;
-
+	iMutex = ThePageCleaningLock;
 	// allocate space for mapping pages whilst they're being loaded...
-	iTempMapping.Alloc(EMaxPages);
-
-	return r;
+	iTempMapping.Alloc(KMaxPagesToClean);
+	}
+
+
+void DPageWriteRequest::Release()
+	{
+	NKern::LockSystem();
+	ResetUse();
+	NKern::UnlockSystem();
 	}
 
 
@@ -2272,11 +2593,10 @@
 // DPagingRequestPool
 //
 
-DPagingRequestPool::DPagingRequestPool(TUint aNumPageReadRequest,TUint aNumPageWriteRequest)
-	: iPageReadRequests(aNumPageReadRequest), iPageWriteRequests(aNumPageWriteRequest)
+DPagingRequestPool::DPagingRequestPool(TUint aNumPageReadRequest, TBool aWriteRequest)
+	: iPageReadRequests(aNumPageReadRequest)
 	{
 	TUint i;
-
 	for(i=0; i<aNumPageReadRequest; ++i)
 		{
 		DPageReadRequest* req = new DPageReadRequest(iPageReadRequests);
@@ -2287,14 +2607,10 @@
 		iPageReadRequests.iFreeList.Add(req);
 		}
 
-	for(i=0; i<aNumPageWriteRequest; ++i)
+	if (aWriteRequest)
 		{
-		DPageWriteRequest* req = new DPageWriteRequest(iPageWriteRequests);
-		__NK_ASSERT_ALWAYS(req);
-		TInt r = req->Construct();
-		__NK_ASSERT_ALWAYS(r==KErrNone);
-		iPageWriteRequests.iRequests[i] = req;
-		iPageWriteRequests.iFreeList.Add(req);
+		iPageWriteRequest = new DPageWriteRequest();
+		__NK_ASSERT_ALWAYS(iPageWriteRequest);
 		}
 	}
 
@@ -2309,24 +2625,23 @@
 	{
 	NKern::LockSystem();
 
-	DPagingRequest* req;
-
-	// if we collide with page write operation...
-	req = iPageWriteRequests.FindCollision(aMemory,aIndex,aCount);
-	if(req)
+	DPoolPagingRequest* req;
+	
+	// check for collision with existing write
+	if(iPageWriteRequest && iPageWriteRequest->IsCollisionContiguous(aMemory,aIndex,aCount))
 		{
-		// wait until write completes...
-		req->Wait();
-		req->Signal();
+		NKern::UnlockSystem();
+		PageCleaningLock::Lock();
+		PageCleaningLock::Unlock();
 		return 0; // caller expected to retry if needed
 		}
 
 	// get a request object to use...
 	req = iPageReadRequests.GetRequest(aMemory,aIndex,aCount);
 
-	// check no new requests collide with us...
-	if(iPageWriteRequests.FindCollision(aMemory,aIndex,aCount)
-		|| iPageReadRequests.FindCollision(aMemory,aIndex,aCount))
+	// check no new read or write requests collide with us...
+	if ((iPageWriteRequest && iPageWriteRequest->IsCollisionContiguous(aMemory,aIndex,aCount)) ||
+		iPageReadRequests.FindCollisionContiguous(aMemory,aIndex,aCount))
 		{
 		// another operation is colliding with this region, give up and retry...
 		req->Signal();
@@ -2334,61 +2649,57 @@
 		}
 
 	// we have a request object which we can use...
-	req->SetUse(aMemory,aIndex,aCount);
+	req->SetUseContiguous(aMemory,aIndex,aCount);
 
 	NKern::UnlockSystem();
 	return (DPageReadRequest*)req;
 	}
 
 
-DPageWriteRequest* DPagingRequestPool::AcquirePageWriteRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
+DPageWriteRequest* DPagingRequestPool::AcquirePageWriteRequest(DMemoryObject** aMemory, TUint* aIndex, TUint aCount)
 	{
+	__NK_ASSERT_DEBUG(iPageWriteRequest);
+	__NK_ASSERT_DEBUG(PageCleaningLock::IsHeld());
+
 	NKern::LockSystem();
 
-	DPagingRequest* req;
-
-	for(;;)
-		{
-		// get a request object to use...
-		req = iPageWriteRequests.GetRequest(aMemory,aIndex,aCount);
-
-		if(iPageWriteRequests.FindCollision(aMemory,aIndex,aCount))
-			{
-			// another write operation is colliding with this region, give up and retry...
-			req->Signal();
-			// Reacquire the system lock as Signal() above will release it.
-			NKern::LockSystem();
-			continue;
-			}
-
-		break;
-		}
-
-	// we have a request object which we can use...
-	req->SetUse(aMemory,aIndex,aCount);
-
+	// Collision with existing read requests is not possible here.  For a page to be read it must
+	// not be present, and for it to be written it must be present and dirty.  There is no way for a
+	// page to go between these states without an intervening read on an uninitialised (freshly
+	// committed) page, which will wait on the first read request.  In other words something like
+	// this:
+	//
+	//   read (blocks), decommit, re-commit, read (waits on mutex), write (now no pending reads!)
+	//
+	// Note that a read request can be outstanding and appear to collide with this write, but only
+	// in the case when the thread making the read has blocked just after acquiring the request but
+	// before it checks whether the read is still necessasry.  This makes it difficult to assert
+	// that no collisions take place.
+	
+	iPageWriteRequest->SetUseDiscontiguous(aMemory,aIndex,aCount);
 	NKern::UnlockSystem();
-	return (DPageWriteRequest*)req;
+	
+	return iPageWriteRequest;
 	}
 
 
 DPagingRequestPool::TGroup::TGroup(TUint aNumRequests)
 	{
 	iNumRequests = aNumRequests;
-	iRequests = new DPagingRequest*[aNumRequests];
+	iRequests = new DPoolPagingRequest*[aNumRequests];
 	__NK_ASSERT_ALWAYS(iRequests);
 	}
 
 
-DPagingRequest* DPagingRequestPool::TGroup::FindCollision(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
+DPoolPagingRequest* DPagingRequestPool::TGroup::FindCollisionContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
 	{
 	__ASSERT_SYSTEM_LOCK;
-	DPagingRequest** ptr = iRequests;
-	DPagingRequest** ptrEnd = ptr+iNumRequests;
+	DPoolPagingRequest** ptr = iRequests;
+	DPoolPagingRequest** ptrEnd = ptr+iNumRequests;
 	while(ptr<ptrEnd)
 		{
-		DPagingRequest* req = *ptr++;
-		if(req->IsCollision(aMemory,aIndex,aCount))
+		DPoolPagingRequest* req = *ptr++;
+		if(req->IsCollisionContiguous(aMemory,aIndex,aCount))
 			return req;
 		}
 	return 0;
@@ -2397,16 +2708,16 @@
 
 static TUint32 RandomSeed = 33333;
 
-DPagingRequest* DPagingRequestPool::TGroup::GetRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
+DPoolPagingRequest* DPagingRequestPool::TGroup::GetRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
 	{
 	__NK_ASSERT_DEBUG(iNumRequests > 0);
 
 	// try using an existing request which collides with this region...
-	DPagingRequest* req  = FindCollision(aMemory,aIndex,aCount);
+	DPoolPagingRequest* req  = FindCollisionContiguous(aMemory,aIndex,aCount);
 	if(!req)
 		{
 		// use a free request...
-		req = (DPagingRequest*)iFreeList.GetFirst();
+		req = (DPoolPagingRequest*)iFreeList.GetFirst();
 		if(req)
 			{
 			// free requests aren't being used...
@@ -2429,7 +2740,7 @@
 	}
 
 
-void DPagingRequestPool::TGroup::Signal(DPagingRequest* aRequest)
+void DPagingRequestPool::TGroup::Signal(DPoolPagingRequest* aRequest)
 	{
 	// if there are no threads waiting on the mutex then return it to the free pool...
 	__NK_ASSERT_DEBUG(aRequest->iUsageCount > 0);
@@ -2457,8 +2768,8 @@
 	TInt r = KErrNotSupported;	// Will return this if unsupported device type is installed
 
 	// create the pools of page out and page in requests...
-	const TInt writeReqs = (aDevice->iType & DPagingDevice::EData) ? KPagingRequestsPerDevice : 0;
-	aDevice->iRequestPool = new DPagingRequestPool(KPagingRequestsPerDevice,writeReqs);
+	const TBool writeReq = (aDevice->iType & DPagingDevice::EData) != 0;
+	aDevice->iRequestPool = new DPagingRequestPool(KPagingRequestsPerDevice, writeReq);
 	if(!aDevice->iRequestPool)
 		{
 		r = KErrNoMemory;
@@ -2488,6 +2799,9 @@
 
  	if (K::MemModelAttributes & (EMemModelAttrRomPaging | EMemModelAttrCodePaging | EMemModelAttrDataPaging))
 		TheThrashMonitor.Start();
+	
+ 	if (K::MemModelAttributes & EMemModelAttrDataPaging)
+		PageCleaner::Start();
 
 exit:
 	TRACEB(("Kern::InstallPagingDevice returns %d",r));
@@ -2637,3 +2951,32 @@
 	}
 
 
+
+//
+// PageCleaningLock
+//
+
+_LIT(KLitPageCleaningLock,"PageCleaningLock");
+
+void PageCleaningLock::Init()
+	{
+	__NK_ASSERT_DEBUG(!ThePageCleaningLock);
+	TInt r = Kern::MutexCreate(ThePageCleaningLock, KLitPageCleaningLock, KMutexOrdPageOut);
+	__NK_ASSERT_ALWAYS(r == KErrNone);
+	}
+
+void PageCleaningLock::Lock()
+	{
+	Kern::MutexWait(*ThePageCleaningLock);
+	}
+
+
+void PageCleaningLock::Unlock()
+	{
+	Kern::MutexSignal(*ThePageCleaningLock);
+	}
+
+TBool PageCleaningLock::IsHeld()
+	{
+	return ThePageCleaningLock->iCleanup.iThread == &Kern::CurrentThread();
+	}