kernel/eka/memmodel/epoc/flexible/mmu/mmu.cpp
changeset 201 43365a9b78a3
parent 152 657f875b013e
equal deleted inserted replaced
200:73ea206103e6 201:43365a9b78a3
   857 		__KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocRam returns simulated OOM %d",KErrNoMemory));
   857 		__KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocRam returns simulated OOM %d",KErrNoMemory));
   858 		return KErrNoMemory;
   858 		return KErrNoMemory;
   859 		}
   859 		}
   860 #endif
   860 #endif
   861 	TInt missing = iRamPageAllocator->AllocRamPages(aPages, aCount, aZonePageType, aBlockZoneId, aBlockRest);
   861 	TInt missing = iRamPageAllocator->AllocRamPages(aPages, aCount, aZonePageType, aBlockZoneId, aBlockRest);
   862 	if(missing && !(aFlags&EAllocNoPagerReclaim) && ThePager.GetFreePages(missing))
   862 	if(missing && !(aFlags&EAllocNoPagerReclaim))
   863 		missing = iRamPageAllocator->AllocRamPages(aPages, aCount, aZonePageType, aBlockZoneId, aBlockRest);
   863 		{
       
   864 		// taking the page cleaning lock here prevents the pager releasing the ram alloc lock
       
   865 		PageCleaningLock::Lock();  
       
   866 		if (ThePager.GetFreePages(missing))
       
   867 			missing = iRamPageAllocator->AllocRamPages(aPages, aCount, aZonePageType, aBlockZoneId, aBlockRest);
       
   868 		PageCleaningLock::Unlock();  
       
   869 		}
   864 	TInt r = missing ? KErrNoMemory : KErrNone;
   870 	TInt r = missing ? KErrNoMemory : KErrNone;
   865 	if(r!=KErrNone)
   871 	if(r!=KErrNone)
   866 		iRamAllocFailed = ETrue;
   872 		iRamAllocFailed = ETrue;
   867 	else
   873 	else
   868 		PagesAllocated(aPages,aCount,aFlags);
   874 		PagesAllocated(aPages,aCount,aFlags);
  1642 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1648 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1643  	__NK_ASSERT_DEBUG(aBlankPte!=KPteUnallocatedEntry);
  1649  	__NK_ASSERT_DEBUG(aBlankPte!=KPteUnallocatedEntry);
  1644 
  1650 
  1645 	// get page to remap...
  1651 	// get page to remap...
  1646 	TPhysAddr pagePhys = aPage;
  1652 	TPhysAddr pagePhys = aPage;
  1647 	
  1653 
  1648 	// Only remap the page if it is committed or it is being moved and
  1654 	// Only remap the page if it is committed or it is being moved and
  1649 	// no other operation has been performed on the page.
  1655 	// no other operation has been performed on the page.
  1650 	if(!RPageArray::TargetStateIsCommitted(pagePhys))
  1656 	if(!RPageArray::TargetStateIsCommitted(pagePhys))
  1651 		return; // page no longer needs mapping
  1657 		return; // page no longer needs mapping
  1652 	
  1658 
  1653 	// Only remap the page if it is currently mapped, i.e. doesn't have an unallocated pte.
  1659 	// Only remap the page if it is currently mapped, i.e. doesn't have an unallocated pte.
  1654 	// This will only be true if a new mapping is being added but it hasn't yet updated 
  1660 	// This will only be true if a new mapping is being added but it hasn't yet updated 
  1655 	// all the ptes for the pages that it maps.
  1661 	// all the ptes for the pages that it maps.
  1656 	TPte pte = *aPtePtr;
  1662 	TPte pte = *aPtePtr;
  1657 	if (pte == KPteUnallocatedEntry)
  1663 	if (pte == KPteUnallocatedEntry)
  1658 		return;
  1664 		return;
  1659 	
  1665 
  1660 	// clear type flags...
  1666 	// clear type flags...
  1661 	pagePhys &= ~KPageMask;
  1667 	pagePhys &= ~KPageMask;
  1662 
  1668 
       
  1669 	// Get the SPageInfo of the page to map.  Allow pages without SPageInfos to
       
  1670 	// be mapped as when freeing a shadow page may need to remap an unpaged ROM 
       
  1671 	// page which won't have an SPageInfo.
  1663 	SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pagePhys);
  1672 	SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pagePhys);
  1664 	if (pi)
  1673 	if (pi)
  1665 		{
  1674 		{
  1666 		SPageInfo::TPagedState pagedState = pi->PagedState();
  1675 		SPageInfo::TPagedState pagedState = pi->PagedState();
  1667 		if (pagedState != SPageInfo::EUnpaged)
  1676 		if (pagedState != SPageInfo::EUnpaged)
  1668 			{
  1677 			{
  1669 			// The page is demand paged.  Only remap the page if it is pinned or is currently
  1678 			// For paged pages only update the pte if the pte points to the wrong physical
  1670 			// accessible but to the old physical page.
  1679 			// address or the page is pinned.
  1671 			if (pagedState != SPageInfo::EPagedPinned &&
  1680 			if (pagedState != SPageInfo::EPagedPinned)
  1672 				 (Mmu::IsPteInaccessible(pte) || (pte^pagePhys) < TPte(KPageSize)))
       
  1673 				return;
       
  1674 			if (!pi->IsDirty())
       
  1675 				{
  1681 				{
  1676 				// Ensure that the page is mapped as read only to prevent pages being marked dirty
  1682 				if ((pte^pagePhys) < TPte(KPageSize))
  1677 				// by page moving despite not having been written to
  1683 					return;
  1678 				Mmu::MakePteInaccessible(aBlankPte, EFalse);
  1684 				if (Mmu::IsPteInaccessible(pte))
       
  1685 					{
       
  1686 					// Updating this pte shouldn't be necessary but it stops random data 
       
  1687 					// corruption in stressed cases???
       
  1688 					Mmu::MakePteInaccessible(aBlankPte, EFalse);
       
  1689 					}
       
  1690 				else if (!pi->IsDirty())
       
  1691 					{
       
  1692 					// Ensure that the page is mapped as read only to prevent pages being writable 
       
  1693 					// without having been marked dirty.
       
  1694 					Mmu::MakePteInaccessible(aBlankPte, ETrue);
       
  1695 					}
  1679 				}
  1696 				}
  1680 			}
  1697 			else if (!pi->IsDirty())
  1681 		}
  1698 				{
  1682 	
  1699 				// Ensure that the page is mapped as read only to prevent pages being writable 
       
  1700 				// without having been marked dirty.
       
  1701 				Mmu::MakePteInaccessible(aBlankPte, ETrue);
       
  1702 				}
       
  1703 			}
       
  1704 		}
       
  1705 
  1683 	// Map the page in the page array entry as this is always the physical
  1706 	// Map the page in the page array entry as this is always the physical
  1684 	// page that the memory object's page should be mapped to.
  1707 	// page that the memory object's page should be mapped to.
  1685 	pte = pagePhys|aBlankPte;
  1708 	pte = pagePhys|aBlankPte;
  1686 	TRACE2(("!PTE %x=%x",aPtePtr,pte));
  1709 	TRACE2(("!PTE %x=%x",aPtePtr,pte));
  1687 	*aPtePtr = pte;
  1710 	*aPtePtr = pte;
  1688 	
  1711 
  1689 	// clean cache...
  1712 	// clean cache...
  1690 	CacheMaintenance::SinglePteUpdated((TLinAddr)aPtePtr);
  1713 	CacheMaintenance::SinglePteUpdated((TLinAddr)aPtePtr);
  1691 	}
  1714 	}
  1692 
  1715 
  1693 
  1716 
  1962 				__NK_ASSERT_DEBUG(0);
  1985 				__NK_ASSERT_DEBUG(0);
  1963 				}
  1986 				}
  1964 			}
  1987 			}
  1965 #endif
  1988 #endif
  1966 		if(!Mmu::IsPteMoreAccessible(aBlankPte,pte))
  1989 		if(!Mmu::IsPteMoreAccessible(aBlankPte,pte))
       
  1990 			{
       
  1991 			__NK_ASSERT_DEBUG((pte^page) < (TUint)KPageSize); // Must be the same physical addr.
  1967 			return true; // return true to keep page table (it already had at least page mapped)
  1992 			return true; // return true to keep page table (it already had at least page mapped)
       
  1993 			}
  1968 
  1994 
  1969 		// remap page with new increased permissions...
  1995 		// remap page with new increased permissions...
  1970 		if(pte==KPteUnallocatedEntry)
  1996 		if(pte==KPteUnallocatedEntry)
  1971 			count = 1; // we'll be adding a new pte entry, count it
  1997 			count = 1; // we'll be adding a new pte entry, count it
  1972 		if(!Mmu::IsPteReadOnly(aBlankPte))
  1998 		if(!Mmu::IsPteReadOnly(aBlankPte))
  2015 						ThePager.SetWritable(*SPageInfo::FromPhysAddr(page));
  2041 						ThePager.SetWritable(*SPageInfo::FromPhysAddr(page));
  2016 					pte = (page&~KPageMask)|aBlankPte;
  2042 					pte = (page&~KPageMask)|aBlankPte;
  2017 					TRACE2(("!PTE %x=%x",pPte-1,pte));
  2043 					TRACE2(("!PTE %x=%x",pPte-1,pte));
  2018 					pPte[-1] = pte;
  2044 					pPte[-1] = pte;
  2019 					}
  2045 					}
       
  2046 				else
       
  2047 					__NK_ASSERT_DEBUG((pte^page) < (TUint)KPageSize); // Must be the same physical addr.	
  2020 				}
  2048 				}
  2021 			}
  2049 			}
  2022 		while(pPte!=pPteEnd);
  2050 		while(pPte!=pPteEnd);
  2023 
  2051 
  2024 		// clean cache...
  2052 		// clean cache...