kernel/eka/memmodel/epoc/flexible/mmu/mpager.cpp
changeset 121 661475905584
parent 90 947f0dc9f7a8
child 132 e4a7b1cbe40c
--- a/kernel/eka/memmodel/epoc/flexible/mmu/mpager.cpp	Fri Apr 23 22:02:01 2010 +0100
+++ b/kernel/eka/memmodel/epoc/flexible/mmu/mpager.cpp	Fri Apr 23 22:08:41 2010 +0100
@@ -92,7 +92,7 @@
 
 #elif defined(__CPU_X86)
 
-/*	Need at least 6 mapped pages to guarantee to be able to execute all ARM instructions,
+/*	Need at least 6 mapped pages to guarantee to be able to execute all X86 instructions,
 	plus enough pages for 6 page tables to map those pages, plus enough pages for the
 	page table info structures of those page tables.
 	(Worst case is (?) a MOV [X],[Y] instruction with instruction, 'X' and 'Y' all
@@ -200,8 +200,7 @@
 		TInt r = m.AllocRam(&pagePhys, 1, 
 							(Mmu::TRamAllocFlags)(EMemAttNormalCached|Mmu::EAllocNoWipe|Mmu::EAllocNoPagerReclaim), 
 							EPageDiscard);
-		if(r!=KErrNone)
-			__NK_ASSERT_ALWAYS(0);
+		__NK_ASSERT_ALWAYS(r == KErrNone);
 		MmuLock::Lock();
 		AddAsFreePage(SPageInfo::FromPhysAddr(pagePhys));
 		MmuLock::Unlock();
@@ -214,50 +213,50 @@
 
 
 #ifdef _DEBUG
-TBool DPager::CheckLists()
+#ifdef FMM_PAGER_CHECK_LISTS
+TBool CheckList(SDblQueLink* aHead, TUint aCount)
 	{
-#if 0
-	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
-	SDblQueLink* head = &iOldList.iA;
-	TInt n = iOldCount;
-	SDblQueLink* link = head;
-	while(n--)
+	SDblQueLink* link = aHead;
+	while(aCount--)
 		{
 		link = link->iNext;
-		if(link==head)
-			return false;
+		if(link == aHead)
+			return EFalse;
 		}
 	link = link->iNext;
-	if(link!=head)
-		return false;
-
-	head = &iYoungList.iA;
-	n = iYoungCount;
-	link = head;
-	while(n--)
-		{
-		link = link->iNext;
-		if(link==head)
-			return false;
-		}
-	link = link->iNext;
-	if(link!=head)
-		return false;
-
-//	TRACEP(("DP: y=%d o=%d f=%d",iYoungCount,iOldCount,iNumberOfFreePages));
-#endif
-//	TraceCounts();
+	if(link != aHead)
+		return EFalse;
+	return ETrue;
+	}
+#endif // #ifdef FMM_PAGER_CHECK_LISTS
+
+TBool DPager::CheckLists()
+	{
+#ifdef FMM_PAGER_CHECK_LISTS
+	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
+	if (!CheckList(&iOldList.iA, iOldCount))
+		return EFalse;
+	if (!CheckList(&iYoungList.iA, iYoungCount))
+		return EFalse;
+	if (!CheckList(&iOldestCleanList.iA, iOldestCleanCount))
+		return EFalse;
+	if (!CheckList(&iOldestDirtyList.iA, iOldestDirtyCount))
+		return EFalse;
+	TRACEP(("DP: y=%d o=%d oc=%d od=%d f=%d", iYoungCount, iOldCount, 
+			iOldestCleanCount, iOldestDirtyCount, iNumberOfFreePages));
+	TraceCounts();
+#endif // #ifdef FMM_PAGER_CHECK_LISTS
 	return true;
 	}
 
 void DPager::TraceCounts()
 	{
-	TRACEP(("DP: y=%d o=%d f=%d min=%d max=%d ml=%d res=%d",
-		iYoungCount,iOldCount,iNumberOfFreePages,iMinimumPageCount,
-		iMaximumPageCount,iMinimumPageLimit,iReservePageCount));
+	TRACEP(("DP: y=%d o=%d oc=%d od=%d f=%d min=%d max=%d ml=%d res=%d",
+		iYoungCount, iOldCount, iOldestCleanCount, iOldestDirtyCount, 
+		iNumberOfFreePages, iMinimumPageCount, iMaximumPageCount,
+		iMinimumPageLimit, iReservePageCount));
 	}
-
-#endif
+#endif //#ifdef _DEBUG
 
 
 TBool DPager::HaveTooManyPages()
@@ -1292,6 +1291,10 @@
 	TInt r = Kern::AddHalEntry(EHalGroupVM, VMHalFunction, 0);
 	__NK_ASSERT_ALWAYS(r==KErrNone);
 	PageCleaningLock::Init();
+#ifdef __DEMAND_PAGING_BENCHMARKS__
+	for (TInt i = 0 ; i < EMaxPagingBm ; ++i)
+		ResetBenchmarkData((TPagingBenchmark)i);
+#endif
 	}
 
 
@@ -1984,7 +1987,7 @@
 
 	MmuLock::Lock();
 
-	__NK_ASSERT_ALWAYS(iYoungOldRatio!=0);
+	__NK_ASSERT_ALWAYS(iYoungOldRatio);
 
 	// Make sure aMinimumPageCount is not less than absolute minimum we can cope with...
 	iMinimumPageLimit = iMinYoungPages * (1 + iYoungOldRatio) / iYoungOldRatio
@@ -1997,9 +2000,8 @@
 		aMaximumPageCount=aMinimumPageCount;
 
 	// Increase iMaximumPageCount?
-	TInt extra = aMaximumPageCount-iMaximumPageCount;
-	if(extra>0)
-		iMaximumPageCount += extra;
+	if(aMaximumPageCount > iMaximumPageCount)
+		iMaximumPageCount = aMaximumPageCount;
 
 	// Reduce iMinimumPageCount?
 	TInt spare = iMinimumPageCount-aMinimumPageCount;
@@ -2126,6 +2128,100 @@
 	}
 
 
+TInt DPager::FlushRegion(DMemModelProcess* aProcess, TLinAddr aStartAddress, TUint aSize)
+	{
+	if (aSize == 0)
+		return KErrNone;
+
+	// find mapping
+	NKern::ThreadEnterCS();
+	TUint offsetInMapping;
+	TUint mapInstanceCount;
+	DMemoryMapping* mapping = MM::FindMappingInProcess(aProcess, aStartAddress, aSize,
+													   offsetInMapping, mapInstanceCount);
+	if (!mapping)
+		{
+		NKern::ThreadLeaveCS();
+		return KErrBadDescriptor;
+		}
+
+	// check whether memory is demand paged
+	MmuLock::Lock();
+	DMemoryObject* memory = mapping->Memory();
+	if(mapInstanceCount != mapping->MapInstanceCount() || memory == NULL || !memory->IsDemandPaged())
+		{
+		MmuLock::Unlock();
+		mapping->Close();
+		NKern::ThreadLeaveCS();
+		return KErrNone;
+		}
+
+	TRACE(("DPager::FlushRegion: %O %08x +%d", aProcess, aStartAddress, aSize));
+	if (!K::Initialising)
+		TRACE2(("  context %T %d", NCurrentThread(), NKern::CurrentContext()));
+
+	// why did we not get assertion failures before I added this?
+	__NK_ASSERT_DEBUG(!Kern::CurrentThread().IsRealtime());
+
+	// acquire necessary locks
+	MmuLock::Unlock();
+	RamAllocLock::Lock();
+	PageCleaningLock::Lock();
+	MmuLock::Lock();
+
+	// find region in memory object
+	TUint startPage = (offsetInMapping >> KPageShift) + mapping->iStartIndex;
+	TUint sizeInPages = ((aStartAddress & KPageMask) + aSize - 1) >> KPageShift;
+	TUint endPage = startPage + sizeInPages;
+	TRACE2(("DPager::FlushRegion: page range is %d to %d", startPage, endPage));
+	
+	// attempt to flush each page
+	TUint index = startPage;
+	while (mapping->MapInstanceCount() == mapInstanceCount &&
+		   mapping->Memory() && index <= endPage)
+		{
+		TRACE2(("DPager::FlushRegion: flushing page %d", index));
+		TPhysAddr physAddr = memory->iPages.PhysAddr(index);
+		
+		if (physAddr != KPhysAddrInvalid)
+			{
+			TRACE2(("DPager::FlushRegion: phys addr is %08x", physAddr));
+			SPageInfo* pi = SPageInfo::SafeFromPhysAddr(physAddr);
+			if (pi)
+				{
+				__NK_ASSERT_DEBUG(pi->Type() == SPageInfo::EManaged);
+				SPageInfo::TPagedState state = pi->PagedState();
+				if (state==SPageInfo::EPagedYoung || state==SPageInfo::EPagedOld ||
+					state==SPageInfo::EPagedOldestClean || state==SPageInfo::EPagedOldestDirty)
+					{
+					TRACE2(("DPager::FlushRegion: attempt to steal page"));
+					TInt r = StealPage(pi);
+					if(r==KErrNone)
+						{
+						TRACE2(("DPager::FlushRegion: attempt to page out %08x", physAddr));
+						AddAsFreePage(pi);
+						TRACE2(("DPager::FlushRegion: paged out %08x", physAddr));
+						}
+					else
+						TRACE2(("DPager::FlushRegion: page out %08x failed with %d", physAddr, r));
+					}
+				}
+			}
+		
+		MmuLock::Flash();
+		++index;
+		}
+	
+	MmuLock::Unlock();
+	PageCleaningLock::Unlock();
+	RamAllocLock::Unlock();
+	mapping->Close();
+	NKern::ThreadLeaveCS();
+	TRACE2(("DPager::FlushRegion: done"));
+	return KErrNone;
+	}
+
+
 void DPager::GetLiveListInfo(SVMCacheInfo& aInfo)
 	{
 	MmuLock::Lock(); // ensure consistent set of values are read...
@@ -2837,15 +2933,15 @@
 EXPORT_C TInt DDemandPagingLock::Lock(DThread* aThread, TLinAddr aStart, TInt aSize)
 	{
 //	TRACEP(("DDemandPagingLock[0x%08x]::Lock(0x%08x,0x%08x,0x%08x)",this,aThread,aStart,aSize));
-	if(iLockedPageCount)
-		__NK_ASSERT_ALWAYS(0); // lock already used
+	__NK_ASSERT_ALWAYS(!iLockedPageCount); // lock already used
 
 	// calculate the number of pages that need to be locked...
 	TUint mask=KPageMask;
 	TUint offset=aStart&mask;
 	TInt numPages = (aSize+offset+mask)>>KPageShift;
-	if(numPages>iMaxPageCount)
-		__NK_ASSERT_ALWAYS(0);
+
+	// Should never be asked to lock more pages than are allocated to this object.
+	__NK_ASSERT_ALWAYS(numPages <= iMaxPageCount);
 
 	NKern::ThreadEnterCS();