kernel/eka/memmodel/epoc/flexible/mmu/mmanager.cpp
branchRCL_3
changeset 26 c734af59ce98
parent 22 2f92ad2dc5db
child 28 5b5d147c7838
--- a/kernel/eka/memmodel/epoc/flexible/mmu/mmanager.cpp	Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/memmodel/epoc/flexible/mmu/mmanager.cpp	Tue May 11 17:28:22 2010 +0300
@@ -108,12 +108,10 @@
 	}
 
 
-TInt DMemoryManager::CleanPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TPhysAddr*& /*aPageArrayEntry*/)
+void DMemoryManager::CleanPages(TUint aPageCount, SPageInfo** aPageInfos, TBool /*aBackground*/)
 	{
-	if(aPageInfo->IsDirty()==false)
-		return KErrNone;
-	__NK_ASSERT_DEBUG(0);
-	return KErrNotSupported;
+	for (TUint i = 0 ; i < aPageCount ; ++i)
+		__NK_ASSERT_DEBUG(!aPageInfos[i]->IsDirty());
 	}
 
 
@@ -136,6 +134,13 @@
 	return KErrNotSupported;
 	}
 
+
+TInt DMemoryManager::MoveAndAllocPage(DMemoryObject*, SPageInfo*, TZonePageType)
+	{
+	return KErrNotSupported;
+	}
+
+
 TZonePageType DMemoryManager::PageType()
 	{// This should not be invoked on memory managers that do not use the methods
 	// AllocPages() and FreePages().
@@ -723,6 +728,7 @@
 public:
 	// from DMemoryManager...
 	virtual TInt MovePage(DMemoryObject* aMemory, SPageInfo* aOldPageInfo, TPhysAddr& aNewPage, TUint aBlockZoneId, TBool aBlockRest);
+	virtual TInt MoveAndAllocPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TZonePageType aPageType);
 	virtual TInt HandleFault(	DMemoryObject* aMemory, TUint aIndex, DMemoryMapping* aMapping, 
 								TUint aMapInstanceCount, TUint aAccessPermissions);
 	virtual TZonePageType PageType();
@@ -889,6 +895,18 @@
 	}
 
 
+TInt DMovableMemoryManager::MoveAndAllocPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TZonePageType aPageType)
+	{
+	TPhysAddr newPage;
+	TInt r = MovePage(aMemory, aPageInfo, newPage, KRamZoneInvalidId, EFalse);
+	if (r == KErrNone)
+		{
+		TheMmu.MarkPageAllocated(aPageInfo->PhysAddr(), aPageType);
+		}
+	return r;
+	}
+
+
 TInt DMovableMemoryManager::HandleFault(DMemoryObject* aMemory, TUint aIndex, DMemoryMapping* aMapping, 
 										TUint aMapInstanceCount, TUint aAccessPermissions)
 	{
@@ -1039,6 +1057,9 @@
 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
 	__UNLOCK_GUARD_START(MmuLock);
 
+	// must always hold the PageCleaningLock if the page needs to be cleaned
+	__NK_ASSERT_DEBUG(!aPageInfo->IsDirty() || PageCleaningLock::IsHeld());
+
 	TUint index = aPageInfo->Index();
 	TInt r;
 
@@ -1096,10 +1117,11 @@
 			// page successfully unmapped...
 			aPageInfo->SetReadOnly(); // page not mapped, so must be read-only
 
-			// if the page can be made clean...
-			r = aMemory->iManager->CleanPage(aMemory,aPageInfo,p);
+			// attempt to clean the page if it is dirty...
+			if (aPageInfo->IsDirty())
+				aMemory->iManager->CleanPages(1, &aPageInfo, EFalse);
 
-			if(r==KErrNone)
+			if(aPageInfo)
 				{
 				// page successfully stolen...
 				__NK_ASSERT_DEBUG((*p^page)<(TUint)KPageSize); // sanity check, page should still be allocated to us
@@ -1111,13 +1133,10 @@
 				__NK_ASSERT_ALWAYS((pagerInfo&(RPageArray::EFlagsMask|RPageArray::EStateMask)) == RPageArray::ENotPresent);
 
 				TheMmu.PageFreed(aPageInfo);
+				r = KErrNone;
 				}
 			else
-				{
-				// only legitimate reason for failing the clean is if the page state was changed
-				// by a page fault or by pinning, this should return KErrInUse...
-				__NK_ASSERT_DEBUG(r==KErrInUse);
-				}
+				r = KErrInUse;
 			}
 		}
 
@@ -2030,14 +2049,6 @@
 	__NK_ASSERT_ALWAYS(0);
 	return KErrNotSupported;
 	}
-
-
-TInt DPagedMemoryManager::WritePages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages, DPageWriteRequest* aRequest)
-	{
-	__NK_ASSERT_ALWAYS(0);
-	return KErrNotSupported;
-	}
-
 TZonePageType DPagedMemoryManager::PageType()
 	{// Paged manager's pages should be discardable and will actaully be freed by 
 	// the pager so this value won't be used.