kernel/eka/memmodel/epoc/flexible/mmu/mmanager.cpp
branchRCL_3
changeset 28 5b5d147c7838
parent 26 c734af59ce98
child 41 0ffb4e86fcc9
equal deleted inserted replaced
26:c734af59ce98 28:5b5d147c7838
   135 	}
   135 	}
   136 
   136 
   137 
   137 
   138 TInt DMemoryManager::MoveAndAllocPage(DMemoryObject*, SPageInfo*, TZonePageType)
   138 TInt DMemoryManager::MoveAndAllocPage(DMemoryObject*, SPageInfo*, TZonePageType)
   139 	{
   139 	{
       
   140 	__NK_ASSERT_DEBUG(0);	// This should only be invoked on managers that can move or discard pages.
   140 	return KErrNotSupported;
   141 	return KErrNotSupported;
   141 	}
   142 	}
   142 
   143 
   143 
   144 
   144 TZonePageType DMemoryManager::PageType()
   145 TZonePageType DMemoryManager::PageType()
   814 	// Verify that page restricting wasn't interrupted, if it was then the page 
   815 	// Verify that page restricting wasn't interrupted, if it was then the page 
   815 	// can't be moved so remap it.
   816 	// can't be moved so remap it.
   816 	// If the page array entry (*movingPageArrayPtr) has been modified then a pinning 
   817 	// If the page array entry (*movingPageArrayPtr) has been modified then a pinning 
   817 	// veto'd the preparation.
   818 	// veto'd the preparation.
   818 	MmuLock::Lock();
   819 	MmuLock::Lock();
   819 	if (aOldPageInfo->CheckModified(&pageIter) || oldPageEntry != *movingPageArrayPtr)
   820 	if (aOldPageInfo->CheckModified(&pageIter) ||
       
   821 		oldPageEntry != *movingPageArrayPtr)
   820 		{// Page is pinned or has been modified by another operation.
   822 		{// Page is pinned or has been modified by another operation.
   821 		MmuLock::Unlock();
   823 		MmuLock::Unlock();
   822 		TheMmu.FreeRam(&newPage, 1, aMemory->iManager->PageType());
   824 		TheMmu.FreeRam(&newPage, 1, aMemory->iManager->PageType());
   823 		goto remap;
   825 		goto remap;
   824 		}
   826 		}
   839 #ifndef _DEBUG
   841 #ifndef _DEBUG
   840 	TheMmu.UnmapTemp(KOldMappingSlot);
   842 	TheMmu.UnmapTemp(KOldMappingSlot);
   841 #endif
   843 #endif
   842 	
   844 	
   843 	MmuLock::Lock();
   845 	MmuLock::Lock();
   844 	if (!aOldPageInfo->CheckModified(&pageIter) && oldPageEntry == *movingPageArrayPtr &&
   846 	if (!aOldPageInfo->CheckModified(&pageIter) &&
       
   847 		oldPageEntry == *movingPageArrayPtr &&
   845 		!aMemory->MappingAddedFlag())
   848 		!aMemory->MappingAddedFlag())
   846 		{
   849 		{
   847 		// The page has been copied without anyone modifying it so set the page 
   850 		// The page has been copied without anyone modifying it so set the page 
   848 		// array entry to new physical address and map the page.
   851 		// array entry to new physical address and map the page.
   849 		RPageArray::PageMoveNewAddr(*movingPageArrayPtr, newPage);
   852 		RPageArray::PageMoveNewAddr(*movingPageArrayPtr, newPage);
  1117 			// page successfully unmapped...
  1120 			// page successfully unmapped...
  1118 			aPageInfo->SetReadOnly(); // page not mapped, so must be read-only
  1121 			aPageInfo->SetReadOnly(); // page not mapped, so must be read-only
  1119 
  1122 
  1120 			// attempt to clean the page if it is dirty...
  1123 			// attempt to clean the page if it is dirty...
  1121 			if (aPageInfo->IsDirty())
  1124 			if (aPageInfo->IsDirty())
       
  1125 				{
       
  1126 				//Kern::Printf("WDP: Cleaning single page in StealPage");
  1122 				aMemory->iManager->CleanPages(1, &aPageInfo, EFalse);
  1127 				aMemory->iManager->CleanPages(1, &aPageInfo, EFalse);
       
  1128 				}
  1123 
  1129 
  1124 			if(aPageInfo)
  1130 			if(aPageInfo)
  1125 				{
  1131 				{
  1126 				// page successfully stolen...
  1132 				// page successfully stolen...
  1127 				__NK_ASSERT_DEBUG((*p^page)<(TUint)KPageSize); // sanity check, page should still be allocated to us
  1133 				__NK_ASSERT_DEBUG((*p^page)<(TUint)KPageSize); // sanity check, page should still be allocated to us
  1533 
  1539 
  1534 	MmuLock::Lock();
  1540 	MmuLock::Lock();
  1535 
  1541 
  1536 	page = *p;
  1542 	page = *p;
  1537 	if(aPageInfo->CheckModified(&pageList) || page!=originalPage/*page state changed*/)
  1543 	if(aPageInfo->CheckModified(&pageList) || page!=originalPage/*page state changed*/)
  1538 		{
       
  1539 		// page state was changed by someone else...
       
  1540 		r = KErrInUse;
  1544 		r = KErrInUse;
  1541 		}
       
  1542 	else
  1545 	else
  1543 		{
  1546 		{
  1544 		// nobody else has modified page state, so restrictions successfully applied...
  1547 		// nobody else has modified page state, so restrictions successfully applied...
  1545 		*p = (page&~RPageArray::EStateMask)|RPageArray::ECommitted; // restore state
  1548 		*p = (page&~RPageArray::EStateMask)|RPageArray::ECommitted; // restore state
  1546 		aPageInfo->SetReadOnly();
  1549 		aPageInfo->SetReadOnly();
  1675 TInt DPagedMemoryManager::DoPin(DMemoryObject* aMemory, TUint aIndex, TUint aCount, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs)
  1678 TInt DPagedMemoryManager::DoPin(DMemoryObject* aMemory, TUint aIndex, TUint aCount, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs)
  1676 	{
  1679 	{
  1677 	TRACE(("DPagedMemoryManager::DoPin(0x%08x,0x%08x,0x%08x,0x%08x)",aMemory, aIndex, aCount, aMapping));
  1680 	TRACE(("DPagedMemoryManager::DoPin(0x%08x,0x%08x,0x%08x,0x%08x)",aMemory, aIndex, aCount, aMapping));
  1678 	__ASSERT_CRITICAL;
  1681 	__ASSERT_CRITICAL;
  1679 	__NK_ASSERT_DEBUG(aPinArgs.HaveSufficientPages(aCount));
  1682 	__NK_ASSERT_DEBUG(aPinArgs.HaveSufficientPages(aCount));
       
  1683 	__NK_ASSERT_DEBUG(aMapping->IsPinned());
       
  1684 	__NK_ASSERT_DEBUG(!aMapping->PagesPinned());
  1680 
  1685 
  1681 	// check and allocate page array entries...
  1686 	// check and allocate page array entries...
  1682 	RPageArray::TIter pageList;
  1687 	RPageArray::TIter pageList;
  1683 	TInt r = aMemory->iPages.AddStart(aIndex,aCount,pageList,true);
  1688 	TInt r = aMemory->iPages.AddStart(aIndex,aCount,pageList,true);
  1684 	if(r!=KErrNone)
  1689 	if(r!=KErrNone)