kernel/eka/memmodel/epoc/flexible/mmu/mmu.cpp
branchRCL_3
changeset 41 0ffb4e86fcc9
parent 28 5b5d147c7838
child 43 c1f20ce4abcf
equal deleted inserted replaced
39:2bb754abd467 41:0ffb4e86fcc9
  1636 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1636 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1637  	__NK_ASSERT_DEBUG(aBlankPte!=KPteUnallocatedEntry);
  1637  	__NK_ASSERT_DEBUG(aBlankPte!=KPteUnallocatedEntry);
  1638 
  1638 
  1639 	// get page to remap...
  1639 	// get page to remap...
  1640 	TPhysAddr pagePhys = aPage;
  1640 	TPhysAddr pagePhys = aPage;
  1641 	
  1641 
  1642 	// Only remap the page if it is committed or it is being moved and
  1642 	// Only remap the page if it is committed or it is being moved and
  1643 	// no other operation has been performed on the page.
  1643 	// no other operation has been performed on the page.
  1644 	if(!RPageArray::TargetStateIsCommitted(pagePhys))
  1644 	if(!RPageArray::TargetStateIsCommitted(pagePhys))
  1645 		return; // page no longer needs mapping
  1645 		return; // page no longer needs mapping
  1646 	
  1646 
  1647 	// Only remap the page if it is currently mapped, i.e. doesn't have an unallocated pte.
  1647 	// Only remap the page if it is currently mapped, i.e. doesn't have an unallocated pte.
  1648 	// This will only be true if a new mapping is being added but it hasn't yet updated 
  1648 	// This will only be true if a new mapping is being added but it hasn't yet updated 
  1649 	// all the ptes for the pages that it maps.
  1649 	// all the ptes for the pages that it maps.
  1650 	TPte pte = *aPtePtr;
  1650 	TPte pte = *aPtePtr;
  1651 	if (pte == KPteUnallocatedEntry)
  1651 	if (pte == KPteUnallocatedEntry)
  1652 		return;
  1652 		return;
  1653 	
  1653 
  1654 	// clear type flags...
  1654 	// clear type flags...
  1655 	pagePhys &= ~KPageMask;
  1655 	pagePhys &= ~KPageMask;
  1656 
  1656 
       
  1657 	// Get the SPageInfo of the page to map.  Allow pages without SPageInfos to
       
  1658 	// be mapped as when freeing a shadow page may need to remap an unpaged ROM 
       
  1659 	// page which won't have an SPageInfo.
  1657 	SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pagePhys);
  1660 	SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pagePhys);
  1658 	if (pi)
  1661 	if (pi)
  1659 		{
  1662 		{
  1660 		SPageInfo::TPagedState pagedState = pi->PagedState();
  1663 		SPageInfo::TPagedState pagedState = pi->PagedState();
  1661 		if (pagedState != SPageInfo::EUnpaged)
  1664 		if (pagedState != SPageInfo::EUnpaged)
  1662 			{
  1665 			{
  1663 			// The page is demand paged.  Only remap the page if it is pinned or is currently
  1666 			// For paged pages only update the pte if the pte points to the wrong physical
  1664 			// accessible but to the old physical page.
  1667 			// address or the page is pinned.
  1665 			if (pagedState != SPageInfo::EPagedPinned &&
  1668 			if (pagedState != SPageInfo::EPagedPinned)
  1666 				 (Mmu::IsPteInaccessible(pte) || (pte^pagePhys) < TPte(KPageSize)))
       
  1667 				return;
       
  1668 			if (!pi->IsDirty())
       
  1669 				{
  1669 				{
  1670 				// Ensure that the page is mapped as read only to prevent pages being marked dirty
  1670 				if ((pte^pagePhys) < TPte(KPageSize))
  1671 				// by page moving despite not having been written to
  1671 					return;
  1672 				Mmu::MakePteInaccessible(aBlankPte, EFalse);
  1672 				if (Mmu::IsPteInaccessible(pte))
       
  1673 					{
       
  1674 					// Updating this pte shouldn't be necessary but it stops random data 
       
  1675 					// corruption in stressed cases???
       
  1676 					Mmu::MakePteInaccessible(aBlankPte, EFalse);
       
  1677 					}
       
  1678 				else if (!pi->IsDirty())
       
  1679 					{
       
  1680 					// Ensure that the page is mapped as read only to prevent pages being writable 
       
  1681 					// without having been marked dirty.
       
  1682 					Mmu::MakePteInaccessible(aBlankPte, ETrue);
       
  1683 					}
  1673 				}
  1684 				}
  1674 			}
  1685 			else if (!pi->IsDirty())
  1675 		}
  1686 				{
  1676 	
  1687 				// Ensure that the page is mapped as read only to prevent pages being writable 
       
  1688 				// without having been marked dirty.
       
  1689 				Mmu::MakePteInaccessible(aBlankPte, ETrue);
       
  1690 				}
       
  1691 			}
       
  1692 		}
       
  1693 
  1677 	// Map the page in the page array entry as this is always the physical
  1694 	// Map the page in the page array entry as this is always the physical
  1678 	// page that the memory object's page should be mapped to.
  1695 	// page that the memory object's page should be mapped to.
  1679 	pte = pagePhys|aBlankPte;
  1696 	pte = pagePhys|aBlankPte;
  1680 	TRACE2(("!PTE %x=%x",aPtePtr,pte));
  1697 	TRACE2(("!PTE %x=%x",aPtePtr,pte));
  1681 	*aPtePtr = pte;
  1698 	*aPtePtr = pte;
  1682 	
  1699 
  1683 	// clean cache...
  1700 	// clean cache...
  1684 	CacheMaintenance::SinglePteUpdated((TLinAddr)aPtePtr);
  1701 	CacheMaintenance::SinglePteUpdated((TLinAddr)aPtePtr);
  1685 	}
  1702 	}
  1686 
  1703 
  1687 
  1704 
  1956 				__NK_ASSERT_DEBUG(0);
  1973 				__NK_ASSERT_DEBUG(0);
  1957 				}
  1974 				}
  1958 			}
  1975 			}
  1959 #endif
  1976 #endif
  1960 		if(!Mmu::IsPteMoreAccessible(aBlankPte,pte))
  1977 		if(!Mmu::IsPteMoreAccessible(aBlankPte,pte))
       
  1978 			{
       
  1979 			__NK_ASSERT_DEBUG((pte^page) < (TUint)KPageSize); // Must be the same physical addr.
  1961 			return true; // return true to keep page table (it already had at least page mapped)
  1980 			return true; // return true to keep page table (it already had at least page mapped)
       
  1981 			}
  1962 
  1982 
  1963 		// remap page with new increased permissions...
  1983 		// remap page with new increased permissions...
  1964 		if(pte==KPteUnallocatedEntry)
  1984 		if(pte==KPteUnallocatedEntry)
  1965 			count = 1; // we'll be adding a new pte entry, count it
  1985 			count = 1; // we'll be adding a new pte entry, count it
  1966 		if(!Mmu::IsPteReadOnly(aBlankPte))
  1986 		if(!Mmu::IsPteReadOnly(aBlankPte))
  2009 						ThePager.SetWritable(*SPageInfo::FromPhysAddr(page));
  2029 						ThePager.SetWritable(*SPageInfo::FromPhysAddr(page));
  2010 					pte = (page&~KPageMask)|aBlankPte;
  2030 					pte = (page&~KPageMask)|aBlankPte;
  2011 					TRACE2(("!PTE %x=%x",pPte-1,pte));
  2031 					TRACE2(("!PTE %x=%x",pPte-1,pte));
  2012 					pPte[-1] = pte;
  2032 					pPte[-1] = pte;
  2013 					}
  2033 					}
       
  2034 				else
       
  2035 					__NK_ASSERT_DEBUG((pte^page) < (TUint)KPageSize); // Must be the same physical addr.	
  2014 				}
  2036 				}
  2015 			}
  2037 			}
  2016 		while(pPte!=pPteEnd);
  2038 		while(pPte!=pPteEnd);
  2017 
  2039 
  2018 		// clean cache...
  2040 		// clean cache...