kernel/eka/memmodel/epoc/flexible/mmu/mpager.cpp
changeset 121 661475905584
parent 90 947f0dc9f7a8
child 132 e4a7b1cbe40c
equal deleted inserted replaced
120:b42b9ce90ea9 121:661475905584
    90 					+(4+KPtClusterSize-1)/KPtClusterSize		// page table pages
    90 					+(4+KPtClusterSize-1)/KPtClusterSize		// page table pages
    91 					+(4+KPageTableInfosPerPage-1)/KPageTableInfosPerPage;	// page table info pages
    91 					+(4+KPageTableInfosPerPage-1)/KPageTableInfosPerPage;	// page table info pages
    92 
    92 
    93 #elif defined(__CPU_X86)
    93 #elif defined(__CPU_X86)
    94 
    94 
    95 /*	Need at least 6 mapped pages to guarantee to be able to execute all ARM instructions,
    95 /*	Need at least 6 mapped pages to guarantee to be able to execute all X86 instructions,
    96 	plus enough pages for 6 page tables to map those pages, plus enough pages for the
    96 	plus enough pages for 6 page tables to map those pages, plus enough pages for the
    97 	page table info structures of those page tables.
    97 	page table info structures of those page tables.
    98 	(Worst case is (?) a MOV [X],[Y] instruction with instruction, 'X' and 'Y' all
    98 	(Worst case is (?) a MOV [X],[Y] instruction with instruction, 'X' and 'Y' all
    99 	straddling chunk boundaries.)
    99 	straddling chunk boundaries.)
   100 */
   100 */
   198 		// Allocate a single page
   198 		// Allocate a single page
   199 		TPhysAddr pagePhys;
   199 		TPhysAddr pagePhys;
   200 		TInt r = m.AllocRam(&pagePhys, 1, 
   200 		TInt r = m.AllocRam(&pagePhys, 1, 
   201 							(Mmu::TRamAllocFlags)(EMemAttNormalCached|Mmu::EAllocNoWipe|Mmu::EAllocNoPagerReclaim), 
   201 							(Mmu::TRamAllocFlags)(EMemAttNormalCached|Mmu::EAllocNoWipe|Mmu::EAllocNoPagerReclaim), 
   202 							EPageDiscard);
   202 							EPageDiscard);
   203 		if(r!=KErrNone)
   203 		__NK_ASSERT_ALWAYS(r == KErrNone);
   204 			__NK_ASSERT_ALWAYS(0);
       
   205 		MmuLock::Lock();
   204 		MmuLock::Lock();
   206 		AddAsFreePage(SPageInfo::FromPhysAddr(pagePhys));
   205 		AddAsFreePage(SPageInfo::FromPhysAddr(pagePhys));
   207 		MmuLock::Unlock();
   206 		MmuLock::Unlock();
   208 		}
   207 		}
   209 	RamAllocLock::Unlock();
   208 	RamAllocLock::Unlock();
   212 	TRACEB(("DPager::InitCache() end with young=%d old=%d oldClean=%d oldDirty=%d min=%d free=%d max=%d",iYoungCount,iOldCount,iOldestCleanCount,iOldestDirtyCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
   211 	TRACEB(("DPager::InitCache() end with young=%d old=%d oldClean=%d oldDirty=%d min=%d free=%d max=%d",iYoungCount,iOldCount,iOldestCleanCount,iOldestDirtyCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
   213 	}
   212 	}
   214 
   213 
   215 
   214 
   216 #ifdef _DEBUG
   215 #ifdef _DEBUG
       
   216 #ifdef FMM_PAGER_CHECK_LISTS
       
   217 TBool CheckList(SDblQueLink* aHead, TUint aCount)
       
   218 	{
       
   219 	SDblQueLink* link = aHead;
       
   220 	while(aCount--)
       
   221 		{
       
   222 		link = link->iNext;
       
   223 		if(link == aHead)
       
   224 			return EFalse;
       
   225 		}
       
   226 	link = link->iNext;
       
   227 	if(link != aHead)
       
   228 		return EFalse;
       
   229 	return ETrue;
       
   230 	}
       
   231 #endif // #ifdef FMM_PAGER_CHECK_LISTS
       
   232 
   217 TBool DPager::CheckLists()
   233 TBool DPager::CheckLists()
   218 	{
   234 	{
   219 #if 0
   235 #ifdef FMM_PAGER_CHECK_LISTS
   220 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   236 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   221 	SDblQueLink* head = &iOldList.iA;
   237 	if (!CheckList(&iOldList.iA, iOldCount))
   222 	TInt n = iOldCount;
   238 		return EFalse;
   223 	SDblQueLink* link = head;
   239 	if (!CheckList(&iYoungList.iA, iYoungCount))
   224 	while(n--)
   240 		return EFalse;
   225 		{
   241 	if (!CheckList(&iOldestCleanList.iA, iOldestCleanCount))
   226 		link = link->iNext;
   242 		return EFalse;
   227 		if(link==head)
   243 	if (!CheckList(&iOldestDirtyList.iA, iOldestDirtyCount))
   228 			return false;
   244 		return EFalse;
   229 		}
   245 	TRACEP(("DP: y=%d o=%d oc=%d od=%d f=%d", iYoungCount, iOldCount, 
   230 	link = link->iNext;
   246 			iOldestCleanCount, iOldestDirtyCount, iNumberOfFreePages));
   231 	if(link!=head)
   247 	TraceCounts();
   232 		return false;
   248 #endif // #ifdef FMM_PAGER_CHECK_LISTS
   233 
       
   234 	head = &iYoungList.iA;
       
   235 	n = iYoungCount;
       
   236 	link = head;
       
   237 	while(n--)
       
   238 		{
       
   239 		link = link->iNext;
       
   240 		if(link==head)
       
   241 			return false;
       
   242 		}
       
   243 	link = link->iNext;
       
   244 	if(link!=head)
       
   245 		return false;
       
   246 
       
   247 //	TRACEP(("DP: y=%d o=%d f=%d",iYoungCount,iOldCount,iNumberOfFreePages));
       
   248 #endif
       
   249 //	TraceCounts();
       
   250 	return true;
   249 	return true;
   251 	}
   250 	}
   252 
   251 
   253 void DPager::TraceCounts()
   252 void DPager::TraceCounts()
   254 	{
   253 	{
   255 	TRACEP(("DP: y=%d o=%d f=%d min=%d max=%d ml=%d res=%d",
   254 	TRACEP(("DP: y=%d o=%d oc=%d od=%d f=%d min=%d max=%d ml=%d res=%d",
   256 		iYoungCount,iOldCount,iNumberOfFreePages,iMinimumPageCount,
   255 		iYoungCount, iOldCount, iOldestCleanCount, iOldestDirtyCount, 
   257 		iMaximumPageCount,iMinimumPageLimit,iReservePageCount));
   256 		iNumberOfFreePages, iMinimumPageCount, iMaximumPageCount,
   258 	}
   257 		iMinimumPageLimit, iReservePageCount));
   259 
   258 	}
   260 #endif
   259 #endif //#ifdef _DEBUG
   261 
   260 
   262 
   261 
   263 TBool DPager::HaveTooManyPages()
   262 TBool DPager::HaveTooManyPages()
   264 	{
   263 	{
   265 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
   264 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
  1290 	TheDataPagedMemoryManager->Init3();
  1289 	TheDataPagedMemoryManager->Init3();
  1291 	TheCodePagedMemoryManager->Init3();
  1290 	TheCodePagedMemoryManager->Init3();
  1292 	TInt r = Kern::AddHalEntry(EHalGroupVM, VMHalFunction, 0);
  1291 	TInt r = Kern::AddHalEntry(EHalGroupVM, VMHalFunction, 0);
  1293 	__NK_ASSERT_ALWAYS(r==KErrNone);
  1292 	__NK_ASSERT_ALWAYS(r==KErrNone);
  1294 	PageCleaningLock::Init();
  1293 	PageCleaningLock::Init();
       
  1294 #ifdef __DEMAND_PAGING_BENCHMARKS__
       
  1295 	for (TInt i = 0 ; i < EMaxPagingBm ; ++i)
       
  1296 		ResetBenchmarkData((TPagingBenchmark)i);
       
  1297 #endif
  1295 	}
  1298 	}
  1296 
  1299 
  1297 
  1300 
  1298 void DPager::Fault(TFault aFault)
  1301 void DPager::Fault(TFault aFault)
  1299 	{
  1302 	{
  1982 	NKern::ThreadEnterCS();
  1985 	NKern::ThreadEnterCS();
  1983 	RamAllocLock::Lock();
  1986 	RamAllocLock::Lock();
  1984 
  1987 
  1985 	MmuLock::Lock();
  1988 	MmuLock::Lock();
  1986 
  1989 
  1987 	__NK_ASSERT_ALWAYS(iYoungOldRatio!=0);
  1990 	__NK_ASSERT_ALWAYS(iYoungOldRatio);
  1988 
  1991 
  1989 	// Make sure aMinimumPageCount is not less than absolute minimum we can cope with...
  1992 	// Make sure aMinimumPageCount is not less than absolute minimum we can cope with...
  1990 	iMinimumPageLimit = iMinYoungPages * (1 + iYoungOldRatio) / iYoungOldRatio
  1993 	iMinimumPageLimit = iMinYoungPages * (1 + iYoungOldRatio) / iYoungOldRatio
  1991 						+ DPageReadRequest::ReservedPagesRequired();
  1994 						+ DPageReadRequest::ReservedPagesRequired();
  1992 	if(iMinimumPageLimit<iAbsoluteMinPageCount)
  1995 	if(iMinimumPageLimit<iAbsoluteMinPageCount)
  1995 		aMinimumPageCount = iMinimumPageLimit+iReservePageCount;
  1998 		aMinimumPageCount = iMinimumPageLimit+iReservePageCount;
  1996 	if(aMaximumPageCount<aMinimumPageCount)
  1999 	if(aMaximumPageCount<aMinimumPageCount)
  1997 		aMaximumPageCount=aMinimumPageCount;
  2000 		aMaximumPageCount=aMinimumPageCount;
  1998 
  2001 
  1999 	// Increase iMaximumPageCount?
  2002 	// Increase iMaximumPageCount?
  2000 	TInt extra = aMaximumPageCount-iMaximumPageCount;
  2003 	if(aMaximumPageCount > iMaximumPageCount)
  2001 	if(extra>0)
  2004 		iMaximumPageCount = aMaximumPageCount;
  2002 		iMaximumPageCount += extra;
       
  2003 
  2005 
  2004 	// Reduce iMinimumPageCount?
  2006 	// Reduce iMinimumPageCount?
  2005 	TInt spare = iMinimumPageCount-aMinimumPageCount;
  2007 	TInt spare = iMinimumPageCount-aMinimumPageCount;
  2006 	if(spare>0)
  2008 	if(spare>0)
  2007 		{
  2009 		{
  2121 	TRACE(("DPager::FlushAll() end with young=%d old=%d min=%d free=%d max=%d",iYoungCount,iOldCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
  2123 	TRACE(("DPager::FlushAll() end with young=%d old=%d min=%d free=%d max=%d",iYoungCount,iOldCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
  2122 
  2124 
  2123 	PageCleaningLock::Unlock();
  2125 	PageCleaningLock::Unlock();
  2124 	RamAllocLock::Unlock();
  2126 	RamAllocLock::Unlock();
  2125 	NKern::ThreadLeaveCS();
  2127 	NKern::ThreadLeaveCS();
       
  2128 	}
       
  2129 
       
  2130 
       
  2131 TInt DPager::FlushRegion(DMemModelProcess* aProcess, TLinAddr aStartAddress, TUint aSize)
       
  2132 	{
       
  2133 	if (aSize == 0)
       
  2134 		return KErrNone;
       
  2135 
       
  2136 	// find mapping
       
  2137 	NKern::ThreadEnterCS();
       
  2138 	TUint offsetInMapping;
       
  2139 	TUint mapInstanceCount;
       
  2140 	DMemoryMapping* mapping = MM::FindMappingInProcess(aProcess, aStartAddress, aSize,
       
  2141 													   offsetInMapping, mapInstanceCount);
       
  2142 	if (!mapping)
       
  2143 		{
       
  2144 		NKern::ThreadLeaveCS();
       
  2145 		return KErrBadDescriptor;
       
  2146 		}
       
  2147 
       
  2148 	// check whether memory is demand paged
       
  2149 	MmuLock::Lock();
       
  2150 	DMemoryObject* memory = mapping->Memory();
       
  2151 	if(mapInstanceCount != mapping->MapInstanceCount() || memory == NULL || !memory->IsDemandPaged())
       
  2152 		{
       
  2153 		MmuLock::Unlock();
       
  2154 		mapping->Close();
       
  2155 		NKern::ThreadLeaveCS();
       
  2156 		return KErrNone;
       
  2157 		}
       
  2158 
       
  2159 	TRACE(("DPager::FlushRegion: %O %08x +%d", aProcess, aStartAddress, aSize));
       
  2160 	if (!K::Initialising)
       
  2161 		TRACE2(("  context %T %d", NCurrentThread(), NKern::CurrentContext()));
       
  2162 
       
  2163 	// why did we not get assertion failures before I added this?
       
  2164 	__NK_ASSERT_DEBUG(!Kern::CurrentThread().IsRealtime());
       
  2165 
       
  2166 	// acquire necessary locks
       
  2167 	MmuLock::Unlock();
       
  2168 	RamAllocLock::Lock();
       
  2169 	PageCleaningLock::Lock();
       
  2170 	MmuLock::Lock();
       
  2171 
       
  2172 	// find region in memory object
       
  2173 	TUint startPage = (offsetInMapping >> KPageShift) + mapping->iStartIndex;
       
  2174 	TUint sizeInPages = ((aStartAddress & KPageMask) + aSize - 1) >> KPageShift;
       
  2175 	TUint endPage = startPage + sizeInPages;
       
  2176 	TRACE2(("DPager::FlushRegion: page range is %d to %d", startPage, endPage));
       
  2177 	
       
  2178 	// attempt to flush each page
       
  2179 	TUint index = startPage;
       
  2180 	while (mapping->MapInstanceCount() == mapInstanceCount &&
       
  2181 		   mapping->Memory() && index <= endPage)
       
  2182 		{
       
  2183 		TRACE2(("DPager::FlushRegion: flushing page %d", index));
       
  2184 		TPhysAddr physAddr = memory->iPages.PhysAddr(index);
       
  2185 		
       
  2186 		if (physAddr != KPhysAddrInvalid)
       
  2187 			{
       
  2188 			TRACE2(("DPager::FlushRegion: phys addr is %08x", physAddr));
       
  2189 			SPageInfo* pi = SPageInfo::SafeFromPhysAddr(physAddr);
       
  2190 			if (pi)
       
  2191 				{
       
  2192 				__NK_ASSERT_DEBUG(pi->Type() == SPageInfo::EManaged);
       
  2193 				SPageInfo::TPagedState state = pi->PagedState();
       
  2194 				if (state==SPageInfo::EPagedYoung || state==SPageInfo::EPagedOld ||
       
  2195 					state==SPageInfo::EPagedOldestClean || state==SPageInfo::EPagedOldestDirty)
       
  2196 					{
       
  2197 					TRACE2(("DPager::FlushRegion: attempt to steal page"));
       
  2198 					TInt r = StealPage(pi);
       
  2199 					if(r==KErrNone)
       
  2200 						{
       
  2201 						TRACE2(("DPager::FlushRegion: attempt to page out %08x", physAddr));
       
  2202 						AddAsFreePage(pi);
       
  2203 						TRACE2(("DPager::FlushRegion: paged out %08x", physAddr));
       
  2204 						}
       
  2205 					else
       
  2206 						TRACE2(("DPager::FlushRegion: page out %08x failed with %d", physAddr, r));
       
  2207 					}
       
  2208 				}
       
  2209 			}
       
  2210 		
       
  2211 		MmuLock::Flash();
       
  2212 		++index;
       
  2213 		}
       
  2214 	
       
  2215 	MmuLock::Unlock();
       
  2216 	PageCleaningLock::Unlock();
       
  2217 	RamAllocLock::Unlock();
       
  2218 	mapping->Close();
       
  2219 	NKern::ThreadLeaveCS();
       
  2220 	TRACE2(("DPager::FlushRegion: done"));
       
  2221 	return KErrNone;
  2126 	}
  2222 	}
  2127 
  2223 
  2128 
  2224 
  2129 void DPager::GetLiveListInfo(SVMCacheInfo& aInfo)
  2225 void DPager::GetLiveListInfo(SVMCacheInfo& aInfo)
  2130 	{
  2226 	{
  2835 
  2931 
  2836 
  2932 
  2837 EXPORT_C TInt DDemandPagingLock::Lock(DThread* aThread, TLinAddr aStart, TInt aSize)
  2933 EXPORT_C TInt DDemandPagingLock::Lock(DThread* aThread, TLinAddr aStart, TInt aSize)
  2838 	{
  2934 	{
  2839 //	TRACEP(("DDemandPagingLock[0x%08x]::Lock(0x%08x,0x%08x,0x%08x)",this,aThread,aStart,aSize));
  2935 //	TRACEP(("DDemandPagingLock[0x%08x]::Lock(0x%08x,0x%08x,0x%08x)",this,aThread,aStart,aSize));
  2840 	if(iLockedPageCount)
  2936 	__NK_ASSERT_ALWAYS(!iLockedPageCount); // lock already used
  2841 		__NK_ASSERT_ALWAYS(0); // lock already used
       
  2842 
  2937 
  2843 	// calculate the number of pages that need to be locked...
  2938 	// calculate the number of pages that need to be locked...
  2844 	TUint mask=KPageMask;
  2939 	TUint mask=KPageMask;
  2845 	TUint offset=aStart&mask;
  2940 	TUint offset=aStart&mask;
  2846 	TInt numPages = (aSize+offset+mask)>>KPageShift;
  2941 	TInt numPages = (aSize+offset+mask)>>KPageShift;
  2847 	if(numPages>iMaxPageCount)
  2942 
  2848 		__NK_ASSERT_ALWAYS(0);
  2943 	// Should never be asked to lock more pages than are allocated to this object.
       
  2944 	__NK_ASSERT_ALWAYS(numPages <= iMaxPageCount);
  2849 
  2945 
  2850 	NKern::ThreadEnterCS();
  2946 	NKern::ThreadEnterCS();
  2851 
  2947 
  2852 	// find mapping which covers the specified region...
  2948 	// find mapping which covers the specified region...
  2853 	TUint offsetInMapping;
  2949 	TUint offsetInMapping;