kernel/eka/memmodel/epoc/flexible/mmu/mpager.cpp
branchRCL_3
changeset 110 c734af59ce98
parent 97 41f0cfe18c80
child 117 5b5d147c7838
equal deleted inserted replaced
97:41f0cfe18c80 110:c734af59ce98
    25 #include "mmanager.h"
    25 #include "mmanager.h"
    26 #include "mptalloc.h"
    26 #include "mptalloc.h"
    27 #include "mpagearray.h"
    27 #include "mpagearray.h"
    28 #include "mswap.h"
    28 #include "mswap.h"
    29 #include "mthrash.h"
    29 #include "mthrash.h"
       
    30 #include "mpagecleaner.h"
       
    31 
    30 #include "cache_maintenance.inl"
    32 #include "cache_maintenance.inl"
    31 
    33 
    32 
    34 
    33 const TUint16 KDefaultYoungOldRatio = 3;
    35 const TUint16 KDefaultYoungOldRatio = 3;
    34 const TUint16 KDefaultMinPages = 256;
    36 const TUint16 KDefaultMinPages = 256;
    35 #ifdef _USE_OLDEST_LISTS
       
    36 const TUint16 KDefaultOldOldestRatio = 3;
    37 const TUint16 KDefaultOldOldestRatio = 3;
    37 #endif
       
    38 
    38 
    39 const TUint KMinOldPages = 1;
    39 const TUint KMinOldPages = 1;
    40 
    40 
    41 /*	On a 32 bit system without PAE can't have more than 2^(32-KPageShift) pages.
    41 /*	On a 32 bit system without PAE can't have more than 2^(32-KPageShift) pages.
    42  *	Subtract 1 so it doesn't overflow when converted to bytes.
    42  *	Subtract 1 so it doesn't overflow when converted to bytes.
    43 */
    43 */
    44 const TUint	KAbsoluteMaxPageCount = (1u<<(32-KPageShift))-1u;
    44 const TUint	KAbsoluteMaxPageCount = (1u<<(32-KPageShift))-1u;
    45 
    45 
    46 
    46 /*
       
    47 Limit the maximum number of oldest pages to bound the time taken by SelectPagesToClean(), which is
       
    48 called with the MmuLock held.
       
    49 */
       
    50 const TUint KMaxOldestPages = 32;
       
    51 
       
    52 static DMutex* ThePageCleaningLock = NULL;
    47 
    53 
    48 DPager ThePager;
    54 DPager ThePager;
    49 
    55 
    50 
    56 
    51 DPager::DPager()
    57 DPager::DPager()
    52 	: iMinimumPageCount(0), iMaximumPageCount(0), iYoungOldRatio(0),
    58 	: iMinimumPageCount(0), iMaximumPageCount(0), iYoungOldRatio(0),
    53 	  iYoungCount(0),iOldCount(0),
    59 	  iYoungCount(0), iOldCount(0), iOldestCleanCount(0),
    54 #ifdef _USE_OLDEST_LISTS
       
    55 	  iOldestCleanCount(0),
       
    56 #endif
       
    57 	  iNumberOfFreePages(0), iReservePageCount(0), iMinimumPageLimit(0)
    60 	  iNumberOfFreePages(0), iReservePageCount(0), iMinimumPageLimit(0)
       
    61 #ifdef __DEMAND_PAGING_BENCHMARKS__
       
    62 	, iBenchmarkLock(TSpinLock::EOrderGenericIrqHigh3)
       
    63 #endif	  
    58 	{
    64 	{
    59 	}
    65 	}
    60 
    66 
    61 
    67 
    62 void DPager::InitCache()
    68 void DPager::InitCache()
   100 #error Unknown CPU
   106 #error Unknown CPU
   101 #endif
   107 #endif
   102 
   108 
   103 #ifdef __SMP__
   109 #ifdef __SMP__
   104 	// Adjust min page count so that all CPUs are guaranteed to make progress.
   110 	// Adjust min page count so that all CPUs are guaranteed to make progress.
   105 	// NOTE: Can't use NKern::NumberOfCpus here because we haven't finished booting yet and will
   111 	TInt numberOfCpus = NKern::NumberOfCpus();
   106 	// always have only one CPU running at this point...
   112 	iMinYoungPages *= numberOfCpus;
   107 
       
   108 	// TODO: Before we can enable this the base test configuration needs
       
   109 	// updating to have a sufficient minimum page size...
       
   110 	//
       
   111 	// iMinYoungPages *= KMaxCpus;
       
   112 #endif
   113 #endif
   113 
   114 
   114 	// A minimum young/old ratio of 1 means that we need at least twice iMinYoungPages pages...
   115 	// A minimum young/old ratio of 1 means that we need at least twice iMinYoungPages pages...
   115 	iAbsoluteMinPageCount = 2*iMinYoungPages;
   116 	iAbsoluteMinPageCount = 2*iMinYoungPages;
   116 
   117 
   121 
   122 
   122 	// Set the list ratios...
   123 	// Set the list ratios...
   123 	iYoungOldRatio = KDefaultYoungOldRatio;
   124 	iYoungOldRatio = KDefaultYoungOldRatio;
   124 	if(config.iYoungOldRatio)
   125 	if(config.iYoungOldRatio)
   125 		iYoungOldRatio = config.iYoungOldRatio;
   126 		iYoungOldRatio = config.iYoungOldRatio;
   126 #ifdef _USE_OLDEST_LISTS
       
   127 	iOldOldestRatio = KDefaultOldOldestRatio;
   127 	iOldOldestRatio = KDefaultOldOldestRatio;
   128 	if(config.iSpare[2])
   128 	if(config.iSpare[2])
   129 		iOldOldestRatio = config.iSpare[2];
   129 		iOldOldestRatio = config.iSpare[2];
   130 #endif
       
   131 
   130 
   132 	// Set the minimum page counts...
   131 	// Set the minimum page counts...
   133 	iMinimumPageLimit = iMinYoungPages * (1 + iYoungOldRatio) / iYoungOldRatio
   132 	iMinimumPageLimit = iMinYoungPages * (1 + iYoungOldRatio) / iYoungOldRatio
   134 									   + DPageReadRequest::ReservedPagesRequired();
   133 									   + DPageReadRequest::ReservedPagesRequired();
   135 	
   134 	
   159 		iMaximumPageCount = config.iMaxPages;
   158 		iMaximumPageCount = config.iMaxPages;
   160 	if (iMaximumPageCount > KAbsoluteMaxPageCount)
   159 	if (iMaximumPageCount > KAbsoluteMaxPageCount)
   161 		iMaximumPageCount = KAbsoluteMaxPageCount;
   160 		iMaximumPageCount = KAbsoluteMaxPageCount;
   162 	iInitMaximumPageCount = iMaximumPageCount;
   161 	iInitMaximumPageCount = iMaximumPageCount;
   163 
   162 
   164 
       
   165 	TRACEB(("DPager::InitCache() live list min=%d max=%d ratio=%d",iMinimumPageCount,iMaximumPageCount,iYoungOldRatio));
   163 	TRACEB(("DPager::InitCache() live list min=%d max=%d ratio=%d",iMinimumPageCount,iMaximumPageCount,iYoungOldRatio));
   166 
   164 
   167 	// Verify the page counts are valid.
   165 	// Verify the page counts are valid.
   168 	__NK_ASSERT_ALWAYS(iMaximumPageCount >= iMinimumPageCount);
   166 	__NK_ASSERT_ALWAYS(iMaximumPageCount >= iMinimumPageCount);
   169 	TUint minOldAndOldest = iMinimumPageCount / (1 + iYoungOldRatio);
   167 	TUint minOldAndOldest = iMinimumPageCount / (1 + iYoungOldRatio);
   177 	// Verify that the young old ratio can be met even when there is only the 
   175 	// Verify that the young old ratio can be met even when there is only the 
   178 	// minimum number of old pages.
   176 	// minimum number of old pages.
   179 	TInt ratioLimit = (iMinimumPageCount-KMinOldPages)/KMinOldPages;
   177 	TInt ratioLimit = (iMinimumPageCount-KMinOldPages)/KMinOldPages;
   180 	__NK_ASSERT_ALWAYS(iYoungOldRatio <= ratioLimit);
   178 	__NK_ASSERT_ALWAYS(iYoungOldRatio <= ratioLimit);
   181 
   179 
   182 #ifdef _USE_OLDEST_LISTS
       
   183 	// There should always be enough old pages to allow the oldest lists ratio.
   180 	// There should always be enough old pages to allow the oldest lists ratio.
   184 	TUint oldestCount = minOldAndOldest / (1 + iOldOldestRatio);
   181 	TUint oldestCount = minOldAndOldest / (1 + iOldOldestRatio);
   185 	__NK_ASSERT_ALWAYS(oldestCount);
   182 	__NK_ASSERT_ALWAYS(oldestCount);
   186 #endif
       
   187 
   183 
   188 	iNumberOfFreePages = 0;
   184 	iNumberOfFreePages = 0;
   189 	iNumberOfDirtyPages = 0;
   185 	iNumberOfDirtyPages = 0;
   190 
   186 
   191 	// Allocate RAM pages and put them all on the old list.
   187 	// Allocate RAM pages and put them all on the old list.
   192 	// Reserved pages have already been allocated and already placed on the
   188 	// Reserved pages have already been allocated and already placed on the
   193 	// old list so don't allocate them again.
   189 	// old list so don't allocate them again.
   194 	RamAllocLock::Lock();
   190 	RamAllocLock::Lock();
   195 	iYoungCount = 0;
   191 	iYoungCount = 0;
   196 #ifdef _USE_OLDEST_LISTS
       
   197 	iOldCount = 0;
   192 	iOldCount = 0;
   198 	iOldestDirtyCount = 0;
   193 	iOldestDirtyCount = 0;
   199 	__NK_ASSERT_DEBUG(iOldestCleanCount == iReservePageCount);
   194 	__NK_ASSERT_DEBUG(iOldestCleanCount == iReservePageCount);
   200 #else
       
   201 	__NK_ASSERT_DEBUG(iOldCount == iReservePageCount);
       
   202 #endif
       
   203 	Mmu& m = TheMmu;
   195 	Mmu& m = TheMmu;
   204 	for(TUint i = iReservePageCount; i < iMinimumPageCount; i++)
   196 	for(TUint i = iReservePageCount; i < iMinimumPageCount; i++)
   205 		{
   197 		{
   206 		// Allocate a single page
   198 		// Allocate a single page
   207 		TPhysAddr pagePhys;
   199 		TPhysAddr pagePhys;
   214 		MmuLock::Unlock();
   206 		MmuLock::Unlock();
   215 		}
   207 		}
   216 	RamAllocLock::Unlock();
   208 	RamAllocLock::Unlock();
   217 
   209 
   218 	__NK_ASSERT_DEBUG(CacheInitialised());
   210 	__NK_ASSERT_DEBUG(CacheInitialised());
   219 #ifdef _USE_OLDEST_LISTS
       
   220 	TRACEB(("DPager::InitCache() end with young=%d old=%d oldClean=%d oldDirty=%d min=%d free=%d max=%d",iYoungCount,iOldCount,iOldestCleanCount,iOldestDirtyCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
   211 	TRACEB(("DPager::InitCache() end with young=%d old=%d oldClean=%d oldDirty=%d min=%d free=%d max=%d",iYoungCount,iOldCount,iOldestCleanCount,iOldestDirtyCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
   221 #else
       
   222 	TRACEB(("DPager::InitCache() end with young=%d old=%d min=%d free=%d max=%d",iYoungCount,iOldCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
       
   223 #endif
       
   224 	}
   212 	}
   225 
   213 
   226 
   214 
   227 #ifdef _DEBUG
   215 #ifdef _DEBUG
   228 #ifdef FMM_PAGER_CHECK_LISTS
   216 #ifdef FMM_PAGER_CHECK_LISTS
   248 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   236 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   249 	if (!CheckList(&iOldList.iA, iOldCount))
   237 	if (!CheckList(&iOldList.iA, iOldCount))
   250 		return EFalse;
   238 		return EFalse;
   251 	if (!CheckList(&iYoungList.iA, iYoungCount))
   239 	if (!CheckList(&iYoungList.iA, iYoungCount))
   252 		return EFalse;
   240 		return EFalse;
   253 
       
   254 #ifdef _USE_OLDEST_LISTS
       
   255 	if (!CheckList(&iOldestCleanList.iA, iOldestCleanCount))
   241 	if (!CheckList(&iOldestCleanList.iA, iOldestCleanCount))
   256 		return EFalse;
   242 		return EFalse;
   257 	if (!CheckList(&iOldestDirtyList.iA, iOldestDirtyCount))
   243 	if (!CheckList(&iOldestDirtyList.iA, iOldestDirtyCount))
   258 		return EFalse;
   244 		return EFalse;
   259 	TRACEP(("DP: y=%d o=%d oc=%d od=%d f=%d", iYoungCount, iOldCount, 
   245 	TRACEP(("DP: y=%d o=%d oc=%d od=%d f=%d", iYoungCount, iOldCount, 
   260 			iOldestCleanCount, iOldestDirtyCount, iNumberOfFreePages));
   246 			iOldestCleanCount, iOldestDirtyCount, iNumberOfFreePages));
   261 #else
       
   262 	TRACEP(("DP: y=%d o=%d f=%d", iYoungCount, iOldCount, iNumberOfFreePages));
       
   263 #endif //#ifdef _USE_OLDEST_LISTS
       
   264 	TraceCounts();
   247 	TraceCounts();
   265 #endif // #ifdef FMM_PAGER_CHECK_LISTS
   248 #endif // #ifdef FMM_PAGER_CHECK_LISTS
   266 	return true;
   249 	return true;
   267 	}
   250 	}
   268 
   251 
   269 void DPager::TraceCounts()
   252 void DPager::TraceCounts()
   270 	{
   253 	{
   271 #ifdef _USE_OLDEST_LISTS
       
   272 	TRACEP(("DP: y=%d o=%d oc=%d od=%d f=%d min=%d max=%d ml=%d res=%d",
   254 	TRACEP(("DP: y=%d o=%d oc=%d od=%d f=%d min=%d max=%d ml=%d res=%d",
   273 		iYoungCount, iOldCount, iOldestCleanCount, iOldestDirtyCount, 
   255 		iYoungCount, iOldCount, iOldestCleanCount, iOldestDirtyCount, 
   274 		iNumberOfFreePages, iMinimumPageCount, iMaximumPageCount,
   256 		iNumberOfFreePages, iMinimumPageCount, iMaximumPageCount,
   275 		iMinimumPageLimit, iReservePageCount));
   257 		iMinimumPageLimit, iReservePageCount));
   276 #else
       
   277 	TRACEP(("DP: y=%d o=%d f=%d min=%d max=%d ml=%d res=%d",
       
   278 		iYoungCount, iOldCount, iNumberOfFreePages, iMinimumPageCount,
       
   279 		iMaximumPageCount, iMinimumPageLimit, iReservePageCount));
       
   280 #endif //#ifdef _USE_OLDEST_LISTS
       
   281 	}
   258 	}
   282 #endif //#ifdef _DEBUG
   259 #endif //#ifdef _DEBUG
   283 
   260 
   284 
   261 
   285 TBool DPager::HaveTooManyPages()
   262 TBool DPager::HaveTooManyPages()
   318 	__NK_ASSERT_DEBUG(aPageInfo->PagedState()==SPageInfo::EUnpaged);
   295 	__NK_ASSERT_DEBUG(aPageInfo->PagedState()==SPageInfo::EUnpaged);
   319 	TheMmu.PageFreed(aPageInfo);
   296 	TheMmu.PageFreed(aPageInfo);
   320 	__NK_ASSERT_DEBUG(aPageInfo->PagedState()==SPageInfo::EUnpaged);
   297 	__NK_ASSERT_DEBUG(aPageInfo->PagedState()==SPageInfo::EUnpaged);
   321 
   298 
   322 	// add as oldest page...
   299 	// add as oldest page...
   323 #ifdef _USE_OLDEST_LISTS
       
   324 	aPageInfo->SetPagedState(SPageInfo::EPagedOldestClean);
   300 	aPageInfo->SetPagedState(SPageInfo::EPagedOldestClean);
   325 	iOldestCleanList.Add(&aPageInfo->iLink);
   301 	iOldestCleanList.Add(&aPageInfo->iLink);
   326 	++iOldestCleanCount;
   302 	++iOldestCleanCount;
   327 #else
       
   328 	aPageInfo->SetPagedState(SPageInfo::EPagedOld);
       
   329 	iOldList.Add(&aPageInfo->iLink);
       
   330 	++iOldCount;
       
   331 #endif
       
   332 
   303 
   333 	Event(EEventPageInFree,aPageInfo);
   304 	Event(EEventPageInFree,aPageInfo);
   334 	}
   305 	}
   335 
   306 
   336 
   307 
   355 		__NK_ASSERT_DEBUG(iOldCount);
   326 		__NK_ASSERT_DEBUG(iOldCount);
   356 		aPageInfo->iLink.Deque();
   327 		aPageInfo->iLink.Deque();
   357 		--iOldCount;
   328 		--iOldCount;
   358 		break;
   329 		break;
   359 
   330 
   360 #ifdef _USE_OLDEST_LISTS
       
   361 	case SPageInfo::EPagedOldestClean:
   331 	case SPageInfo::EPagedOldestClean:
   362 		__NK_ASSERT_DEBUG(iOldestCleanCount);
   332 		__NK_ASSERT_DEBUG(iOldestCleanCount);
   363 		aPageInfo->iLink.Deque();
   333 		aPageInfo->iLink.Deque();
   364 		--iOldestCleanCount;
   334 		--iOldestCleanCount;
   365 		break;
   335 		break;
   367 	case SPageInfo::EPagedOldestDirty:
   337 	case SPageInfo::EPagedOldestDirty:
   368 		__NK_ASSERT_DEBUG(iOldestDirtyCount);
   338 		__NK_ASSERT_DEBUG(iOldestDirtyCount);
   369 		aPageInfo->iLink.Deque();
   339 		aPageInfo->iLink.Deque();
   370 		--iOldestDirtyCount;
   340 		--iOldestDirtyCount;
   371 		break;
   341 		break;
   372 #endif
       
   373 
   342 
   374 	case SPageInfo::EPagedPinned:
   343 	case SPageInfo::EPagedPinned:
   375 		// this can occur if a pinned mapping is being unmapped when memory is decommitted.
   344 		// this can occur if a pinned mapping is being unmapped when memory is decommitted.
   376 		// the decommit will have succeeded because the the mapping no longer vetoes this,
   345 		// the decommit will have succeeded because the the mapping no longer vetoes this,
   377 		// however the unpinning hasn't yet got around to changing the page state.
   346 		// however the unpinning hasn't yet got around to changing the page state.
   390 		return KErrNotFound;
   359 		return KErrNotFound;
   391 		}
   360 		}
   392 
   361 
   393 	// Update the dirty page count as required...
   362 	// Update the dirty page count as required...
   394 	if (aPageInfo->IsDirty())
   363 	if (aPageInfo->IsDirty())
       
   364 		{
       
   365 		aPageInfo->SetReadOnly();
   395 		SetClean(*aPageInfo);
   366 		SetClean(*aPageInfo);
       
   367 		}
   396 
   368 
   397 	if (iNumberOfFreePages > 0)
   369 	if (iNumberOfFreePages > 0)
   398 		{// The paging cache is not at the minimum size so safe to let the 
   370 		{// The paging cache is not at the minimum size so safe to let the 
   399 		// ram allocator free this page.
   371 		// ram allocator free this page.
   400 		iNumberOfFreePages--;
   372 		iNumberOfFreePages--;
   401 		aPageInfo->SetPagedState(SPageInfo::EUnpaged);
   373 		aPageInfo->SetPagedState(SPageInfo::EUnpaged);
   402 		return KErrCompletion;
   374 		return KErrCompletion;
   403 		}
   375 		}
   404 	// Need to hold onto this page as have reached the page cache limit.
   376 	// Need to hold onto this page as have reached the page cache limit.
   405 	// add as oldest page...
   377 	// add as oldest page...
   406 #ifdef _USE_OLDEST_LISTS
       
   407 	aPageInfo->SetPagedState(SPageInfo::EPagedOldestClean);
   378 	aPageInfo->SetPagedState(SPageInfo::EPagedOldestClean);
   408 	iOldestCleanList.Add(&aPageInfo->iLink);
   379 	iOldestCleanList.Add(&aPageInfo->iLink);
   409 	++iOldestCleanCount;
   380 	++iOldestCleanCount;
   410 #else
       
   411 	aPageInfo->SetPagedState(SPageInfo::EPagedOld);
       
   412 	iOldList.Add(&aPageInfo->iLink);
       
   413 	++iOldCount;
       
   414 #endif
       
   415 
   381 
   416 	return KErrNone;
   382 	return KErrNone;
   417 	}
   383 	}
   418 
   384 
   419 
   385 
   436 		__NK_ASSERT_DEBUG(iOldCount);
   402 		__NK_ASSERT_DEBUG(iOldCount);
   437 		aPageInfo->iLink.Deque();
   403 		aPageInfo->iLink.Deque();
   438 		--iOldCount;
   404 		--iOldCount;
   439 		break;
   405 		break;
   440 
   406 
   441 #ifdef _USE_OLDEST_LISTS
       
   442 	case SPageInfo::EPagedOldestClean:
   407 	case SPageInfo::EPagedOldestClean:
   443 		__NK_ASSERT_DEBUG(iOldestCleanCount);
   408 		__NK_ASSERT_DEBUG(iOldestCleanCount);
   444 		aPageInfo->iLink.Deque();
   409 		aPageInfo->iLink.Deque();
   445 		--iOldestCleanCount;
   410 		--iOldestCleanCount;
   446 		break;
   411 		break;
   448 	case SPageInfo::EPagedOldestDirty:
   413 	case SPageInfo::EPagedOldestDirty:
   449 		__NK_ASSERT_DEBUG(iOldestDirtyCount);
   414 		__NK_ASSERT_DEBUG(iOldestDirtyCount);
   450 		aPageInfo->iLink.Deque();
   415 		aPageInfo->iLink.Deque();
   451 		--iOldestDirtyCount;
   416 		--iOldestDirtyCount;
   452 		break;
   417 		break;
   453 #endif
       
   454 
   418 
   455 	case SPageInfo::EPagedPinned:
   419 	case SPageInfo::EPagedPinned:
   456 		__NK_ASSERT_DEBUG(0);
   420 		__NK_ASSERT_DEBUG(0);
   457 	case SPageInfo::EUnpaged:
   421 	case SPageInfo::EUnpaged:
   458 #ifdef _DEBUG
   422 #ifdef _DEBUG
   519 			break;
   483 			break;
   520 		}	
   484 		}	
   521 	}
   485 	}
   522 
   486 
   523 
   487 
   524 SPageInfo* DPager::StealOldestPage()
   488 TInt DPager::TryStealOldestPage(SPageInfo*& aPageInfoOut)
   525 	{
   489 	{
   526 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
   490 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
   527 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   491 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   528 
   492 
       
   493 	// find oldest page in list...
       
   494 	SDblQueLink* link;
       
   495 	if (iOldestCleanCount)
       
   496 		{
       
   497 		__NK_ASSERT_DEBUG(!iOldestCleanList.IsEmpty());
       
   498 		link = iOldestCleanList.Last();
       
   499 		}
       
   500 	else if (iOldestDirtyCount)
       
   501 		{
       
   502 		__NK_ASSERT_DEBUG(!iOldestDirtyList.IsEmpty());
       
   503 		link = iOldestDirtyList.Last();
       
   504 		}
       
   505 	else if (iOldCount)
       
   506 		{
       
   507 		__NK_ASSERT_DEBUG(!iOldList.IsEmpty());
       
   508 		link = iOldList.Last();
       
   509 		}
       
   510 	else
       
   511 		{
       
   512 		__NK_ASSERT_DEBUG(iYoungCount);
       
   513 		__NK_ASSERT_ALWAYS(!iYoungList.IsEmpty());
       
   514 		link = iYoungList.Last();
       
   515 		}
       
   516 	SPageInfo* pageInfo = SPageInfo::FromLink(link);
       
   517 
       
   518 	if (pageInfo->IsDirty() && !PageCleaningLock::IsHeld())
       
   519 		return 1;
       
   520 
       
   521 	// try to steal it from owning object...
       
   522 	TInt r = StealPage(pageInfo);	
       
   523 	if (r == KErrNone)
       
   524 		{
       
   525 		BalanceAges();
       
   526 		aPageInfoOut = pageInfo;
       
   527 		}
       
   528 	
       
   529 	return r;
       
   530 	}
       
   531 
       
   532 
       
   533 SPageInfo* DPager::StealOldestPage()
       
   534 	{
       
   535 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
       
   536 	TBool pageCleaningLockHeld = EFalse;
   529 	for(;;)
   537 	for(;;)
   530 		{
   538 		{
   531 		// find oldest page in list...
   539 		SPageInfo* pageInfo = NULL;
   532 		SDblQueLink* link;
   540 		TInt r = TryStealOldestPage(pageInfo);
   533 #ifdef _USE_OLDEST_LISTS
   541 		
   534 		if (iOldestCleanCount)
   542 		if (r == KErrNone)
   535 			{
   543 			{
   536 			__NK_ASSERT_DEBUG(!iOldestCleanList.IsEmpty());
   544 			if (pageCleaningLockHeld)
   537 			link = iOldestCleanList.Last();
   545 				{
   538 			}
   546 				MmuLock::Unlock();
   539 		else if (iOldestDirtyCount)
   547 				PageCleaningLock::Unlock();
   540 			{
   548 				MmuLock::Lock();
   541 			__NK_ASSERT_DEBUG(!iOldestDirtyList.IsEmpty());
   549 				}
   542 			link = iOldestDirtyList.Last();
   550 			return pageInfo;
   543 			}
   551 			}
   544 		else if (iOldCount)
   552 		else if (r == 1)
       
   553 			{
       
   554 			__NK_ASSERT_ALWAYS(!pageCleaningLockHeld);
       
   555 			MmuLock::Unlock();
       
   556 			PageCleaningLock::Lock();
       
   557 			MmuLock::Lock();
       
   558 			pageCleaningLockHeld = ETrue;
       
   559 			}
       
   560 		// else retry...
       
   561 		}
       
   562 	}
       
   563 
       
   564 #ifdef __CPU_CACHE_HAS_COLOUR
       
   565 
       
   566 template <class T, TInt maxObjects> class TSequentialColourSelector
       
   567 	{
       
   568 public:
       
   569 	static const TInt KMaxLength = maxObjects;
       
   570 	static const TInt KArrayLength = _ALIGN_UP(KMaxLength, KPageColourCount);
       
   571 	
       
   572 	FORCE_INLINE TSequentialColourSelector()
       
   573 		{
       
   574 		memclr(this, sizeof(*this));
       
   575 		}
       
   576 
       
   577 	FORCE_INLINE TBool FoundLongestSequence()
       
   578 		{
       
   579 		return iLongestLength >= KMaxLength;
       
   580 		}
       
   581 
       
   582 	FORCE_INLINE void AddCandidate(T* aObject, TInt aColour)
       
   583 		{
       
   584 		// allocate objects to slots based on colour
       
   585 		for (TInt i = aColour ; i < KArrayLength ; i += KPageColourCount)
       
   586 			{
       
   587 			if (!iSlot[i])
       
   588 				{
       
   589 				iSlot[i] = aObject;
       
   590 				iSeqLength[i] = i == 0 ? 1 : iSeqLength[i - 1] + 1;
       
   591 				TInt j = i + 1;
       
   592 				while(j < KArrayLength && iSeqLength[j])
       
   593 					iSeqLength[j++] += iSeqLength[i];
       
   594 				TInt currentLength = iSeqLength[j - 1];
       
   595 				if (currentLength > iLongestLength)
       
   596 					{
       
   597 					iLongestLength = currentLength;
       
   598 					iLongestStart = j - currentLength;
       
   599 					}
       
   600 				break;
       
   601 				}
       
   602 			}
       
   603 		}
       
   604 
       
   605 	FORCE_INLINE TInt FindLongestRun(T** aObjectsOut)
       
   606 		{
       
   607 		if (iLongestLength == 0)
       
   608 			return 0;
       
   609 
       
   610 		if (iLongestLength < KMaxLength && iSlot[0] && iSlot[KArrayLength - 1])
       
   611 			{
       
   612 			// check possibility of wrapping
       
   613 
       
   614 			TInt i = 1;
       
   615 			while (iSlot[i]) ++i;  // find first hole
       
   616 			TInt wrappedLength = iSeqLength[KArrayLength - 1] + iSeqLength[i - 1];
       
   617 			if (wrappedLength > iLongestLength)
       
   618 				{
       
   619 				iLongestLength = wrappedLength;
       
   620 				iLongestStart = KArrayLength - iSeqLength[KArrayLength - 1];
       
   621 				}
       
   622 			}		
       
   623 
       
   624 		iLongestLength = Min(iLongestLength, KMaxLength);
       
   625 
       
   626 		__NK_ASSERT_DEBUG(iLongestStart >= 0 && iLongestStart < KArrayLength);
       
   627 		__NK_ASSERT_DEBUG(iLongestStart + iLongestLength < 2 * KArrayLength);
       
   628 
       
   629 		TInt len = Min(iLongestLength, KArrayLength - iLongestStart);
       
   630 		wordmove(aObjectsOut, &iSlot[iLongestStart], len * sizeof(T*));
       
   631 		wordmove(aObjectsOut + len, &iSlot[0], (iLongestLength - len) * sizeof(T*));
       
   632 		
       
   633 		return iLongestLength;
       
   634 		}
       
   635 
       
   636 private:
       
   637 	T* iSlot[KArrayLength];
       
   638 	TInt8 iSeqLength[KArrayLength];
       
   639 	TInt iLongestStart;
       
   640 	TInt iLongestLength;
       
   641 	};
       
   642 
       
   643 TInt DPager::SelectPagesToClean(SPageInfo** aPageInfosOut)
       
   644 	{
       
   645 	// select up to KMaxPagesToClean oldest dirty pages with sequential page colours
       
   646 	
       
   647 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
       
   648 
       
   649 	TSequentialColourSelector<SPageInfo, KMaxPagesToClean> selector;
       
   650 
       
   651 	SDblQueLink* link = iOldestDirtyList.Last();
       
   652 	while (link != &iOldestDirtyList.iA)
       
   653 		{
       
   654 		SPageInfo* pi = SPageInfo::FromLink(link);
       
   655 		if (!pi->IsWritable())  
       
   656 			{
       
   657 			// the page may be in the process of being restricted, stolen or decommitted, but don't
       
   658 			// check for this as it will occur infrequently and will be detected by CheckModified
       
   659 			// anyway
       
   660 			TInt colour = pi->Index() & KPageColourMask;
       
   661 			selector.AddCandidate(pi, colour);
       
   662 			if (selector.FoundLongestSequence())
       
   663 				break;
       
   664 			}
       
   665 		link = link->iPrev;
       
   666 		}
       
   667 	
       
   668 	return selector.FindLongestRun(aPageInfosOut);
       
   669 	}
       
   670 
   545 #else
   671 #else
   546 		if (iOldCount)
   672 
       
   673 TInt DPager::SelectPagesToClean(SPageInfo** aPageInfosOut)
       
   674 	{
       
   675 	// no page colouring restrictions, so just take up to KMaxPagesToClean oldest dirty pages
       
   676 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
       
   677 	TInt pageCount = 0;
       
   678 	SDblQueLink* link = iOldestDirtyList.Last();
       
   679 	while (link != &iOldestDirtyList.iA && pageCount < KMaxPagesToClean)
       
   680 		{
       
   681 		SPageInfo* pi = SPageInfo::FromLink(link);
       
   682 		if (!pi->IsWritable())
       
   683 			{
       
   684 			// the page may be in the process of being restricted, stolen or decommitted, but don't
       
   685 			// check for this as it will occur infrequently and will be detected by CheckModified
       
   686 			// anyway
       
   687 			aPageInfosOut[pageCount++] = pi;
       
   688 			}
       
   689 		link = link->iPrev;
       
   690 		}
       
   691 	return pageCount;
       
   692 	}
       
   693 
   547 #endif
   694 #endif
   548 			{
   695 
   549 			__NK_ASSERT_DEBUG(!iOldList.IsEmpty());
   696 
   550 			link = iOldList.Last();
   697 TInt DPager::CleanSomePages(TBool aBackground)
   551 			}
   698 	{
   552 		else
   699 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   553 			{
   700 	__NK_ASSERT_DEBUG(PageCleaningLock::IsHeld());
   554 			__NK_ASSERT_DEBUG(iYoungCount);
   701 	// ram alloc lock may or may not be held
   555 			__NK_ASSERT_ALWAYS(!iYoungList.IsEmpty());
   702 
   556 			link = iYoungList.Last();
   703 	SPageInfo* pageInfos[KMaxPagesToClean];
   557 			}
   704 	TInt pageCount = SelectPagesToClean(&pageInfos[0]);
   558 		SPageInfo* pageInfo = SPageInfo::FromLink(link);
   705 	
   559 
   706 	if (pageCount == 0)
   560 		// steal it from owning object...
   707 		return 0;
   561 		TInt r = StealPage(pageInfo);
   708 	
   562 
   709 	TheDataPagedMemoryManager->CleanPages(pageCount, pageInfos, aBackground);
   563 		BalanceAges();
   710 
   564 
   711 	for (TInt i = 0 ; i < pageCount ; ++i)
   565 		if(r==KErrNone)
   712 		{
   566 			return pageInfo; // done
   713 		SPageInfo* pi = pageInfos[i];
   567 
   714 		if (pi)
   568 		// loop back and try again
   715 			{
   569 		}
   716 			__NK_ASSERT_DEBUG(pi->PagedState() == SPageInfo::EPagedOldestDirty && iOldestDirtyCount);
       
   717 			__NK_ASSERT_DEBUG(!pi->IsDirty() && !pi->IsWritable());
       
   718 		
       
   719 			pi->iLink.Deque();
       
   720 			iOldestCleanList.AddHead(&pi->iLink);
       
   721 			--iOldestDirtyCount;
       
   722 			++iOldestCleanCount;
       
   723 			pi->SetPagedState(SPageInfo::EPagedOldestClean);
       
   724 			}
       
   725 		}
       
   726 
       
   727 	return pageCount;
       
   728 	}
       
   729 
       
   730 
       
   731 TBool DPager::HasPagesToClean()
       
   732 	{
       
   733 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
       
   734 	return iOldestDirtyCount > 0;
   570 	}
   735 	}
   571 
   736 
   572 
   737 
   573 TInt DPager::RestrictPage(SPageInfo* aPageInfo, TRestrictPagesType aRestriction)
   738 TInt DPager::RestrictPage(SPageInfo* aPageInfo, TRestrictPagesType aRestriction)
   574 	{
   739 	{
   645 	TRACE(("DPager::StealPage returns %d",r));
   810 	TRACE(("DPager::StealPage returns %d",r));
   646 	return r;
   811 	return r;
   647 	}
   812 	}
   648 
   813 
   649 
   814 
       
   815 TInt DPager::DiscardAndAllocPage(SPageInfo* aPageInfo, TZonePageType aPageType)
       
   816 	{
       
   817 	TInt r = DiscardPage(aPageInfo, KRamZoneInvalidId, EFalse);
       
   818 	if (r == KErrNone)
       
   819 		{
       
   820 		TheMmu.MarkPageAllocated(aPageInfo->PhysAddr(), aPageType);
       
   821 		}
       
   822 	// Flash the ram alloc lock as we may have had to write a page out to swap.
       
   823 	RamAllocLock::Unlock();
       
   824 	RamAllocLock::Lock();
       
   825 	return r;
       
   826 	}
       
   827 
       
   828 
       
   829 static TBool DiscardCanStealPage(SPageInfo* aOldPageInfo, TBool aBlockRest)
       
   830 	{
       
   831  	// If the page is pinned or if the page is dirty and a general defrag is being performed then
       
   832 	// don't attempt to steal it
       
   833 	return aOldPageInfo->Type() == SPageInfo::EUnused ||
       
   834 		(aOldPageInfo->PagedState() != SPageInfo::EPagedPinned && (!aBlockRest || !aOldPageInfo->IsDirty()));	
       
   835 	}
       
   836 
       
   837 
   650 TInt DPager::DiscardPage(SPageInfo* aOldPageInfo, TUint aBlockZoneId, TBool aBlockRest)
   838 TInt DPager::DiscardPage(SPageInfo* aOldPageInfo, TUint aBlockZoneId, TBool aBlockRest)
   651 	{
   839 	{
       
   840 	// todo: assert MmuLock not released
       
   841 	
       
   842 	TRACE(("> DPager::DiscardPage %08x", aOldPageInfo));
       
   843 	
   652 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
   844 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
   653 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   845 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   654 
   846 
   655 	TInt r;
   847 	if (!DiscardCanStealPage(aOldPageInfo, aBlockRest))
   656 	// If the page is pinned or if the page is dirty and a general defrag is being 
   848 		{
   657 	// performed then don't attempt to steal it.
   849 		// The page is pinned or is dirty and this is a general defrag so move the page.
   658 	if (aOldPageInfo->Type() != SPageInfo::EUnused && 
       
   659 		(aOldPageInfo->PagedState() == SPageInfo::EPagedPinned ||
       
   660 		(aBlockRest && aOldPageInfo->IsDirty())))
       
   661 		{// The page is pinned or is dirty and this is a general defrag so move the page.
       
   662 		DMemoryObject* memory = aOldPageInfo->Owner();
   850 		DMemoryObject* memory = aOldPageInfo->Owner();
   663 		// Page must be managed if it is pinned or dirty.
   851 		// Page must be managed if it is pinned or dirty.
   664 		__NK_ASSERT_DEBUG(aOldPageInfo->Type()==SPageInfo::EManaged);
   852 		__NK_ASSERT_DEBUG(aOldPageInfo->Type()==SPageInfo::EManaged);
   665 		__NK_ASSERT_DEBUG(memory);
   853 		__NK_ASSERT_DEBUG(memory);
   666 		MmuLock::Unlock();
   854 		MmuLock::Unlock();
   667 		TPhysAddr newAddr;
   855 		TPhysAddr newAddr;
   668 		return memory->iManager->MovePage(memory, aOldPageInfo, newAddr, aBlockZoneId, aBlockRest);
   856 		TRACE2(("DPager::DiscardPage delegating pinned/dirty page to manager"));
   669 		}
   857 		TInt r = memory->iManager->MovePage(memory, aOldPageInfo, newAddr, aBlockZoneId, aBlockRest);
   670 
   858 		TRACE(("< DPager::DiscardPage %d", r));
   671 	if (!iNumberOfFreePages)
   859 		return r;
   672 		{
   860 		}
   673 		// Allocate a new page for the live list as it has reached its minimum size.
   861 
       
   862 	TInt r = KErrNone;
       
   863 	SPageInfo* newPageInfo = NULL;
       
   864 	TBool havePageCleaningLock = EFalse;
       
   865 
       
   866 	TBool needNewPage;
       
   867 	TBool needPageCleaningLock;
       
   868 	while(needNewPage = (iNumberOfFreePages == 0 && newPageInfo == NULL),
       
   869 		  needPageCleaningLock = (aOldPageInfo->IsDirty() && !havePageCleaningLock),
       
   870 		  needNewPage || needPageCleaningLock)
       
   871 		{
   674 		MmuLock::Unlock();
   872 		MmuLock::Unlock();
   675 		SPageInfo* newPageInfo = GetPageFromSystem((Mmu::TRamAllocFlags)(EMemAttNormalCached|Mmu::EAllocNoWipe),
   873 
   676 													aBlockZoneId, aBlockRest);
   874 		if (needNewPage)
   677 		 if (!newPageInfo)
   875 			{
   678 			return KErrNoMemory;
   876 			// Allocate a new page for the live list as it has reached its minimum size.
       
   877 			TUint flags = EMemAttNormalCached | Mmu::EAllocNoWipe;
       
   878 			newPageInfo = GetPageFromSystem((Mmu::TRamAllocFlags)flags, aBlockZoneId, aBlockRest);
       
   879 			if (!newPageInfo)
       
   880 				{
       
   881 				TRACE(("< DPager::DiscardPage KErrNoMemory"));
       
   882 				r = KErrNoMemory;
       
   883 				MmuLock::Lock();
       
   884 				break;
       
   885 				}
       
   886 			}
       
   887 
       
   888 		if (needPageCleaningLock)
       
   889 			{
       
   890 			// Acquire the page cleaning mutex so StealPage can clean it
       
   891 			PageCleaningLock::Lock();
       
   892 			havePageCleaningLock = ETrue;
       
   893 			}
   679 
   894 
   680 		// Re-acquire the mmulock and re-check that the page is not pinned or dirty.
   895 		// Re-acquire the mmulock and re-check that the page is not pinned or dirty.
   681 		MmuLock::Lock();
   896 		MmuLock::Lock();
   682 		if (aOldPageInfo->Type() != SPageInfo::EUnused && 
   897 		if (!DiscardCanStealPage(aOldPageInfo, aBlockRest))
   683 			(aOldPageInfo->PagedState() == SPageInfo::EPagedPinned ||
   898 			{
   684 			(aBlockRest && aOldPageInfo->IsDirty())))
   899 			// Page is now pinned or dirty so give up as it is in use.
   685 			{// Page is now pinned or dirty so give up as it is inuse.
   900 			r = KErrInUse;
   686 			ReturnPageToSystem(*newPageInfo);
   901 			break;
   687 			MmuLock::Unlock();
   902 			}
   688 			return KErrInUse;
   903 		}
   689 			}
   904 
   690 
   905 	if (r == KErrNone)
       
   906 		{
   691 		// Attempt to steal the page
   907 		// Attempt to steal the page
   692 		r = StealPage(aOldPageInfo);
   908 		r = StealPage(aOldPageInfo);  // temporarily releases MmuLock if page is dirty
   693 		__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   909 		}
   694 
   910 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   695 		if (r == KErrCompletion)
   911 
   696 			{// This was a page table that has been freed but added to the 
   912 	if (r == KErrCompletion)
   697 			// live list as a free page.  Remove from live list and continue.
   913 		{// This was a page table that has been freed but added to the 
   698 			__NK_ASSERT_DEBUG(!aOldPageInfo->IsDirty());
   914 		// live list as a free page.  Remove from live list and continue.
   699 			RemovePage(aOldPageInfo);
   915 		__NK_ASSERT_DEBUG(!aOldPageInfo->IsDirty());
   700 			r = KErrNone;
   916 		RemovePage(aOldPageInfo);
   701 			}
   917 		r = KErrNone;
   702 
   918 		}
   703 		if (r == KErrNone)
   919 
   704 			{// Add the new page to the live list as discarding the old page 
   920 	if (r == KErrNone && iNumberOfFreePages == 0)
   705 			// will reduce the live list below the minimum.
   921 		{
       
   922 		if (newPageInfo)
       
   923 			{
       
   924 			// Add a new page to the live list if we have one as discarding the old page will reduce
       
   925 			// the live list below the minimum.
   706 			AddAsFreePage(newPageInfo);
   926 			AddAsFreePage(newPageInfo);
   707 			// We've successfully discarded the page so return it to the free pool.
   927 			newPageInfo = NULL;
   708 			ReturnPageToSystem(*aOldPageInfo);
   928 			}
   709 			BalanceAges();
   929 		else
   710 			}
   930 			{
   711 		 else
   931 			// Otherwise the live list shrank when page was being cleaned so have to give up
   712 			{
   932 			AddAsFreePage(aOldPageInfo);
   713 			// New page not required so just return it to the system.  This is safe as 
   933 			BalanceAges();                  // temporarily releases MmuLock
   714 			// iNumberOfFreePages will have this page counted but as it is not on the live list
   934 			r = KErrInUse;
   715 			// noone else can touch it.
   935 			}
   716 			ReturnPageToSystem(*newPageInfo);
   936 		}
   717 			}
   937 
   718 		}
   938 	if (r == KErrNone)
   719 	else
   939 		{
   720 		{
   940 		// We've successfully discarded the page and ensured the live list is large enough, so
   721 		// Attempt to steal the page
   941 		// return it to the free pool.
   722 		r = StealPage(aOldPageInfo);
   942 		ReturnPageToSystem(*aOldPageInfo);  // temporarily releases MmuLock
   723 
   943 		BalanceAges();                      // temporarily releases MmuLock
   724 		__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   944 		}
   725 
   945 
   726 		if (r == KErrCompletion)
   946 	if (newPageInfo)
   727 			{// This was a page table that has been freed but added to the 
   947 		{
   728 			// live list as a free page.  Remove from live list.
   948 		// New page not required so just return it to the system.  This is safe as
   729 			__NK_ASSERT_DEBUG(!aOldPageInfo->IsDirty());
   949 		// iNumberOfFreePages will have this page counted but as it is not on the live list noone
   730 			RemovePage(aOldPageInfo);
   950 		// else can touch it.
   731 			r = KErrNone;
   951 		if (iNumberOfFreePages == 0)
   732 			}
   952 			AddAsFreePage(newPageInfo);
   733 
   953 		else
   734 		if (r == KErrNone)
   954 			ReturnPageToSystem(*newPageInfo);   // temporarily releases MmuLock
   735 			{// We've successfully discarded the page so return it to the free pool.
   955 		}
   736 			ReturnPageToSystem(*aOldPageInfo);
   956 
   737 			BalanceAges();
   957 	if (havePageCleaningLock)
   738 			}
   958 		{
   739 		}
   959 		// Release the page cleaning mutex
       
   960 		MmuLock::Unlock();
       
   961 		PageCleaningLock::Unlock();
       
   962 		MmuLock::Lock();
       
   963 		}	
       
   964 	
   740 	MmuLock::Unlock();
   965 	MmuLock::Unlock();
       
   966 	TRACE(("< DPager::DiscardPage returns %d", r));
   741 	return r;	
   967 	return r;	
   742 	}
   968 	}
   743 
   969 
   744 
   970 
   745 TBool DPager::TryGrowLiveList()
   971 TBool DPager::TryGrowLiveList()
   791 void DPager::ReturnPageToSystem(SPageInfo& aPageInfo)
  1017 void DPager::ReturnPageToSystem(SPageInfo& aPageInfo)
   792 	{
  1018 	{
   793 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
  1019 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
   794 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1020 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   795 
  1021 
       
  1022 	// should be unpaged at this point, otherwise Mmu::FreeRam will just give it back to us
       
  1023 	__NK_ASSERT_DEBUG(aPageInfo.PagedState() == SPageInfo::EUnpaged);
       
  1024 
   796 	__NK_ASSERT_DEBUG(iNumberOfFreePages>0);
  1025 	__NK_ASSERT_DEBUG(iNumberOfFreePages>0);
   797 	--iNumberOfFreePages;
  1026 	--iNumberOfFreePages;
   798 
  1027 
   799 	// The page must be unpaged, otherwise it wasn't successfully removed 
  1028 	// The page must be unpaged, otherwise it wasn't successfully removed 
   800 	// from the live list.
  1029 	// from the live list.
   808 	}
  1037 	}
   809 
  1038 
   810 
  1039 
   811 SPageInfo* DPager::PageInAllocPage(Mmu::TRamAllocFlags aAllocFlags)
  1040 SPageInfo* DPager::PageInAllocPage(Mmu::TRamAllocFlags aAllocFlags)
   812 	{
  1041 	{
       
  1042 	TBool pageCleaningLockHeld = EFalse;
   813 	SPageInfo* pageInfo;
  1043 	SPageInfo* pageInfo;
   814 	TPhysAddr pagePhys;
  1044 	TPhysAddr pagePhys;
   815 
  1045 	TInt r = KErrGeneral;
       
  1046 	
   816 	RamAllocLock::Lock();
  1047 	RamAllocLock::Lock();
   817 	MmuLock::Lock();
  1048 	MmuLock::Lock();
   818 
  1049 
       
  1050 find_a_page:
   819 	// try getting a free page from our live list...
  1051 	// try getting a free page from our live list...
   820 #ifdef _USE_OLDEST_LISTS
       
   821 	if (iOldestCleanCount)
  1052 	if (iOldestCleanCount)
   822 		{
  1053 		{
   823 		pageInfo = SPageInfo::FromLink(iOldestCleanList.Last());
  1054 		pageInfo = SPageInfo::FromLink(iOldestCleanList.Last());
   824 		if(pageInfo->Type()==SPageInfo::EUnused)
  1055 		if(pageInfo->Type()==SPageInfo::EUnused)
   825 			goto get_oldest;
  1056 			goto try_steal_oldest_page;
   826 		}
  1057 		}
   827 #else
       
   828 	if(iOldCount)
       
   829 		{
       
   830 		pageInfo = SPageInfo::FromLink(iOldList.Last());
       
   831 		if(pageInfo->Type()==SPageInfo::EUnused)
       
   832 			goto get_oldest;
       
   833 		}
       
   834 #endif
       
   835 
  1058 
   836 	// try getting a free page from the system pool...
  1059 	// try getting a free page from the system pool...
   837 	if(!HaveMaximumPages())
  1060 	if(!HaveMaximumPages())
   838 		{
  1061 		{
   839 		MmuLock::Unlock();
  1062 		MmuLock::Unlock();
   841 		if(pageInfo)
  1064 		if(pageInfo)
   842 			goto done;
  1065 			goto done;
   843 		MmuLock::Lock();
  1066 		MmuLock::Lock();
   844 		}
  1067 		}
   845 
  1068 
       
  1069 	// try stealing a clean page...
       
  1070 	if (iOldestCleanCount)
       
  1071 		goto try_steal_oldest_page;
       
  1072 
       
  1073 	// see if we can clean multiple dirty pages in one go...
       
  1074 	if (KMaxPagesToClean > 1 && iOldestDirtyCount > 1)
       
  1075 		{
       
  1076 		// if we don't hold the page cleaning mutex then temporarily release ram alloc mutex and
       
  1077 		// acquire page cleaning mutex; if we hold it already just proceed
       
  1078 		if (!pageCleaningLockHeld)
       
  1079 			{
       
  1080 			MmuLock::Unlock();
       
  1081 			RamAllocLock::Unlock();
       
  1082 			PageCleaningLock::Lock();			
       
  1083 			MmuLock::Lock();
       
  1084 			}
       
  1085 		
       
  1086 		// there may be clean pages now if we've waited on the page cleaning mutex, if so don't
       
  1087 		// bother cleaning but just restart
       
  1088 		if (iOldestCleanCount == 0)
       
  1089 			CleanSomePages(EFalse);
       
  1090 		
       
  1091 		if (!pageCleaningLockHeld)
       
  1092 			{
       
  1093 			MmuLock::Unlock();
       
  1094 			PageCleaningLock::Unlock();			
       
  1095 			RamAllocLock::Lock();
       
  1096 			MmuLock::Lock();
       
  1097 			}
       
  1098 		
       
  1099 		if (iOldestCleanCount > 0)
       
  1100 			goto find_a_page;
       
  1101 		}
       
  1102 
   846 	// as a last resort, steal a page from the live list...
  1103 	// as a last resort, steal a page from the live list...
   847 get_oldest:
  1104 	
   848 #ifdef _USE_OLDEST_LISTS
  1105 try_steal_oldest_page:
   849 	__NK_ASSERT_ALWAYS(iOldestCleanCount|iOldestDirtyCount|iOldCount|iYoungCount);
  1106 	__NK_ASSERT_ALWAYS(iOldestCleanCount|iOldestDirtyCount|iOldCount|iYoungCount);
   850 #else
  1107 	r = TryStealOldestPage(pageInfo);
   851 	__NK_ASSERT_ALWAYS(iOldCount|iYoungCount);
  1108 	// if this fails we restart whole process
   852 #endif
  1109 	if (r < KErrNone)
   853 	pageInfo = StealOldestPage();
  1110 		goto find_a_page;
       
  1111 
       
  1112 	// if we need to clean, acquire page cleaning mutex for life of this function
       
  1113 	if (r == 1)
       
  1114 		{
       
  1115 		__NK_ASSERT_ALWAYS(!pageCleaningLockHeld);
       
  1116 		MmuLock::Unlock();
       
  1117 		PageCleaningLock::Lock();
       
  1118 		MmuLock::Lock();
       
  1119 		pageCleaningLockHeld = ETrue;
       
  1120 		goto find_a_page;		
       
  1121 		}
       
  1122 
       
  1123 	// otherwise we're done!
       
  1124 	__NK_ASSERT_DEBUG(r == KErrNone);
   854 	MmuLock::Unlock();
  1125 	MmuLock::Unlock();
   855 
  1126 
   856 	// make page state same as a freshly allocated page...
  1127 	// make page state same as a freshly allocated page...
   857 	pagePhys = pageInfo->PhysAddr();
  1128 	pagePhys = pageInfo->PhysAddr();
   858 	TheMmu.PagesAllocated(&pagePhys,1,aAllocFlags);
  1129 	TheMmu.PagesAllocated(&pagePhys,1,aAllocFlags);
   859 
  1130 
   860 done:
  1131 done:
       
  1132 	if (pageCleaningLockHeld)
       
  1133 		PageCleaningLock::Unlock();
   861 	RamAllocLock::Unlock();
  1134 	RamAllocLock::Unlock();
       
  1135 
   862 	return pageInfo;
  1136 	return pageInfo;
   863 	}
  1137 	}
   864 
  1138 
   865 
  1139 
   866 TBool DPager::GetFreePages(TInt aNumPages)
  1140 TBool DPager::GetFreePages(TInt aNumPages)
   913 			TheMmu.ChangePageType(pi, EPageMovable, EPageDiscard);
  1187 			TheMmu.ChangePageType(pi, EPageMovable, EPageDiscard);
   914 			break;
  1188 			break;
   915 
  1189 
   916 		case SPageInfo::EPagedYoung:
  1190 		case SPageInfo::EPagedYoung:
   917 		case SPageInfo::EPagedOld:
  1191 		case SPageInfo::EPagedOld:
   918 #ifdef _USE_OLDEST_LISTS
       
   919 		case SPageInfo::EPagedOldestDirty:
  1192 		case SPageInfo::EPagedOldestDirty:
   920 		case SPageInfo::EPagedOldestClean:
  1193 		case SPageInfo::EPagedOldestClean:
   921 #endif
       
   922 			continue; // discard already been allowed
  1194 			continue; // discard already been allowed
   923 
  1195 
   924 		case SPageInfo::EPagedPinned:
  1196 		case SPageInfo::EPagedPinned:
   925 			__NK_ASSERT_DEBUG(0);
  1197 			__NK_ASSERT_DEBUG(0);
   926 		default:
  1198 		default:
   975 		case SPageInfo::EUnpaged:
  1247 		case SPageInfo::EUnpaged:
   976 			continue; // discard already been disallowed
  1248 			continue; // discard already been disallowed
   977 
  1249 
   978 		case SPageInfo::EPagedYoung:
  1250 		case SPageInfo::EPagedYoung:
   979 		case SPageInfo::EPagedOld:
  1251 		case SPageInfo::EPagedOld:
   980 #ifdef _USE_OLDEST_LISTS
       
   981 		case SPageInfo::EPagedOldestClean:
  1252 		case SPageInfo::EPagedOldestClean:
   982 		case SPageInfo::EPagedOldestDirty:
  1253 		case SPageInfo::EPagedOldestDirty:
   983 #endif
       
   984 			changeType = ETrue;
  1254 			changeType = ETrue;
   985 			break; // remove from live list
  1255 			break; // remove from live list
   986 
  1256 
   987 		case SPageInfo::EPagedPinned:
  1257 		case SPageInfo::EPagedPinned:
   988 			__NK_ASSERT_DEBUG(0);
  1258 			__NK_ASSERT_DEBUG(0);
  1044 	TheRomMemoryManager->Init3();
  1314 	TheRomMemoryManager->Init3();
  1045 	TheDataPagedMemoryManager->Init3();
  1315 	TheDataPagedMemoryManager->Init3();
  1046 	TheCodePagedMemoryManager->Init3();
  1316 	TheCodePagedMemoryManager->Init3();
  1047 	TInt r = Kern::AddHalEntry(EHalGroupVM, VMHalFunction, 0);
  1317 	TInt r = Kern::AddHalEntry(EHalGroupVM, VMHalFunction, 0);
  1048 	__NK_ASSERT_ALWAYS(r==KErrNone);
  1318 	__NK_ASSERT_ALWAYS(r==KErrNone);
       
  1319 	PageCleaningLock::Init();
  1049 	}
  1320 	}
  1050 
  1321 
  1051 
  1322 
  1052 void DPager::Fault(TFault aFault)
  1323 void DPager::Fault(TFault aFault)
  1053 	{
  1324 	{
  1058 void DPager::BalanceAges()
  1329 void DPager::BalanceAges()
  1059 	{
  1330 	{
  1060 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1331 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1061 	TBool restrictPage = EFalse;
  1332 	TBool restrictPage = EFalse;
  1062 	SPageInfo* pageInfo = NULL;
  1333 	SPageInfo* pageInfo = NULL;
  1063 #ifdef _USE_OLDEST_LISTS
       
  1064 	TUint oldestCount = iOldestCleanCount + iOldestDirtyCount;
  1334 	TUint oldestCount = iOldestCleanCount + iOldestDirtyCount;
  1065 	if((iOldCount + oldestCount) * iYoungOldRatio < iYoungCount)
  1335 	if((iOldCount + oldestCount) * iYoungOldRatio < iYoungCount)
  1066 #else
       
  1067 	if (iOldCount * iYoungOldRatio < iYoungCount)
       
  1068 #endif
       
  1069 		{
  1336 		{
  1070 		// Need more old pages so make one young page into an old page...
  1337 		// Need more old pages so make one young page into an old page...
  1071 		__NK_ASSERT_DEBUG(!iYoungList.IsEmpty());
  1338 		__NK_ASSERT_DEBUG(!iYoungList.IsEmpty());
  1072 		__NK_ASSERT_DEBUG(iYoungCount);
  1339 		__NK_ASSERT_DEBUG(iYoungCount);
  1073 		SDblQueLink* link = iYoungList.Last()->Deque();
  1340 		SDblQueLink* link = iYoungList.Last()->Deque();
  1082 		Event(EEventPageAged,pageInfo);
  1349 		Event(EEventPageAged,pageInfo);
  1083 		// Delay restricting the page until it is safe to release the MmuLock.
  1350 		// Delay restricting the page until it is safe to release the MmuLock.
  1084 		restrictPage = ETrue;
  1351 		restrictPage = ETrue;
  1085 		}
  1352 		}
  1086 
  1353 
  1087 #ifdef _USE_OLDEST_LISTS
       
  1088 	// Check we have enough oldest pages.
  1354 	// Check we have enough oldest pages.
  1089 	if (oldestCount * iOldOldestRatio < iOldCount)
  1355 	if (oldestCount < KMaxOldestPages &&
       
  1356 		oldestCount * iOldOldestRatio < iOldCount)
  1090 		{
  1357 		{
  1091 		__NK_ASSERT_DEBUG(!iOldList.IsEmpty());
  1358 		__NK_ASSERT_DEBUG(!iOldList.IsEmpty());
  1092 		__NK_ASSERT_DEBUG(iOldCount);
  1359 		__NK_ASSERT_DEBUG(iOldCount);
  1093 		SDblQueLink* link = iOldList.Last()->Deque();
  1360 		SDblQueLink* link = iOldList.Last()->Deque();
  1094 		--iOldCount;
  1361 		--iOldCount;
  1097 		if (oldestPageInfo->IsDirty())
  1364 		if (oldestPageInfo->IsDirty())
  1098 			{
  1365 			{
  1099 			oldestPageInfo->SetPagedState(SPageInfo::EPagedOldestDirty);
  1366 			oldestPageInfo->SetPagedState(SPageInfo::EPagedOldestDirty);
  1100 			iOldestDirtyList.AddHead(link);
  1367 			iOldestDirtyList.AddHead(link);
  1101 			++iOldestDirtyCount;
  1368 			++iOldestDirtyCount;
       
  1369 			PageCleaner::NotifyPagesToClean();
  1102 			Event(EEventPageAgedDirty,oldestPageInfo);
  1370 			Event(EEventPageAgedDirty,oldestPageInfo);
  1103 			}
  1371 			}
  1104 		else
  1372 		else
  1105 			{
  1373 			{
  1106 			oldestPageInfo->SetPagedState(SPageInfo::EPagedOldestClean);
  1374 			oldestPageInfo->SetPagedState(SPageInfo::EPagedOldestClean);
  1107 			iOldestCleanList.AddHead(link);
  1375 			iOldestCleanList.AddHead(link);
  1108 			++iOldestCleanCount;
  1376 			++iOldestCleanCount;
  1109 			Event(EEventPageAgedClean,oldestPageInfo);
  1377 			Event(EEventPageAgedClean,oldestPageInfo);
  1110 			}
  1378 			}
  1111 		}
  1379 		}
  1112 #endif
  1380 
  1113 	if (restrictPage)
  1381 	if (restrictPage)
  1114 		{
  1382 		{
  1115 		// Make the recently aged old page inaccessible.  This is done last as it 
  1383 		// Make the recently aged old page inaccessible.  This is done last as it 
  1116 		// will release the MmuLock and therefore the page counts may otherwise change.
  1384 		// will release the MmuLock and therefore the page counts may otherwise change.
  1117 		RestrictPage(pageInfo,ERestrictPagesNoAccessForOldPage);
  1385 		RestrictPage(pageInfo,ERestrictPagesNoAccessForOldPage);
  1142 	TRACE2(("DP: %O Rejuvenate PT 0x%08x 0x%08x",TheCurrentThread,pi->PhysAddr(),aPt));
  1410 	TRACE2(("DP: %O Rejuvenate PT 0x%08x 0x%08x",TheCurrentThread,pi->PhysAddr(),aPt));
  1143 	switch(pi->PagedState())
  1411 	switch(pi->PagedState())
  1144 		{
  1412 		{
  1145 	case SPageInfo::EPagedYoung:
  1413 	case SPageInfo::EPagedYoung:
  1146 	case SPageInfo::EPagedOld:
  1414 	case SPageInfo::EPagedOld:
  1147 #ifdef _USE_OLDEST_LISTS
       
  1148 	case SPageInfo::EPagedOldestClean:
  1415 	case SPageInfo::EPagedOldestClean:
  1149 	case SPageInfo::EPagedOldestDirty:
  1416 	case SPageInfo::EPagedOldestDirty:
  1150 #endif
       
  1151 		RemovePage(pi);
  1417 		RemovePage(pi);
  1152 		AddAsYoungestPage(pi);
  1418 		AddAsYoungestPage(pi);
  1153 		BalanceAges();
  1419 		BalanceAges();
  1154 		break;
  1420 		break;
  1155 
  1421 
  1165 		__NK_ASSERT_DEBUG(0);
  1431 		__NK_ASSERT_DEBUG(0);
  1166 		break;
  1432 		break;
  1167 		}
  1433 		}
  1168 	}
  1434 	}
  1169 
  1435 
       
  1436 
  1170 TInt DPager::PteAndInfoFromLinAddr(	TInt aOsAsid, TLinAddr aAddress, DMemoryMappingBase* aMapping, 
  1437 TInt DPager::PteAndInfoFromLinAddr(	TInt aOsAsid, TLinAddr aAddress, DMemoryMappingBase* aMapping, 
  1171 									TUint aMapInstanceCount, TPte*& aPte, SPageInfo*& aPageInfo)
  1438 									TUint aMapInstanceCount, TPte*& aPte, SPageInfo*& aPageInfo)
  1172 	{
  1439 	{
  1173 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());	
  1440 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());	
  1174 
  1441 
  1190 	aPageInfo = pi;
  1457 	aPageInfo = pi;
  1191 
  1458 
  1192 	return KErrNone;
  1459 	return KErrNone;
  1193 	}
  1460 	}
  1194 
  1461 
       
  1462 
  1195 TInt DPager::TryRejuvenate(	TInt aOsAsid, TLinAddr aAddress, TUint aAccessPermissions, TLinAddr aPc,
  1463 TInt DPager::TryRejuvenate(	TInt aOsAsid, TLinAddr aAddress, TUint aAccessPermissions, TLinAddr aPc,
  1196 							DMemoryMappingBase* aMapping, TUint aMapInstanceCount, DThread* aThread, 
  1464 							DMemoryMappingBase* aMapping, TUint aMapInstanceCount, DThread* aThread, 
  1197 							TAny* aExceptionInfo)
  1465 							TAny* aExceptionInfo)
  1198 	{
  1466 	{
  1199 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1467 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
       
  1468 	START_PAGING_BENCHMARK;
  1200 
  1469 
  1201 	SPageInfo* pi;
  1470 	SPageInfo* pi;
  1202 	TPte* pPte;
  1471 	TPte* pPte;
  1203 	TPte pte;
  1472 	TPte pte;
  1204 	TInt r = PteAndInfoFromLinAddr(aOsAsid, aAddress, aMapping, aMapInstanceCount, pPte, pi);
  1473 	TInt r = PteAndInfoFromLinAddr(aOsAsid, aAddress, aMapping, aMapInstanceCount, pPte, pi);
  1290 	InvalidateTLBForPage((aAddress&~KPageMask)|aOsAsid);
  1559 	InvalidateTLBForPage((aAddress&~KPageMask)|aOsAsid);
  1291 
  1560 
  1292 	Event(EEventPageRejuvenate,pi,aPc,aAddress,aAccessPermissions);
  1561 	Event(EEventPageRejuvenate,pi,aPc,aAddress,aAccessPermissions);
  1293 
  1562 
  1294 	TBool balance = false;
  1563 	TBool balance = false;
  1295 #ifdef _USE_OLDEST_LISTS
       
  1296 	if(	state==SPageInfo::EPagedYoung || state==SPageInfo::EPagedOld || 
  1564 	if(	state==SPageInfo::EPagedYoung || state==SPageInfo::EPagedOld || 
  1297 		state==SPageInfo::EPagedOldestClean || state==SPageInfo::EPagedOldestDirty)
  1565 		state==SPageInfo::EPagedOldestClean || state==SPageInfo::EPagedOldestDirty)
  1298 #else
       
  1299 	if(state==SPageInfo::EPagedYoung || state==SPageInfo::EPagedOld)
       
  1300 #endif
       
  1301 		{
  1566 		{
  1302 		RemovePage(pi);
  1567 		RemovePage(pi);
  1303 		AddAsYoungestPage(pi);
  1568 		AddAsYoungestPage(pi);
  1304 		// delay BalanceAges because we don't want to release MmuLock until after
  1569 		// delay BalanceAges because we don't want to release MmuLock until after
  1305 		// RejuvenatePageTable has chance to look at the page table page...
  1570 		// RejuvenatePageTable has chance to look at the page table page...
  1316 	RejuvenatePageTable(pPte);
  1581 	RejuvenatePageTable(pPte);
  1317 
  1582 
  1318 	if(balance)
  1583 	if(balance)
  1319 		BalanceAges();
  1584 		BalanceAges();
  1320 
  1585 
       
  1586 	END_PAGING_BENCHMARK(EPagingBmRejuvenate);
  1321 	return KErrNone;
  1587 	return KErrNone;
  1322 	}
  1588 	}
  1323 
  1589 
  1324 
  1590 
  1325 TInt DPager::PageInAllocPages(TPhysAddr* aPages, TUint aCount, Mmu::TRamAllocFlags aAllocFlags)
  1591 TInt DPager::PageInAllocPages(TPhysAddr* aPages, TUint aCount, Mmu::TRamAllocFlags aAllocFlags)
  1347 		SPageInfo* pi = SPageInfo::FromPhysAddr(aPages[aCount]);
  1613 		SPageInfo* pi = SPageInfo::FromPhysAddr(aPages[aCount]);
  1348 		switch(pi->PagedState())
  1614 		switch(pi->PagedState())
  1349 			{
  1615 			{
  1350 		case SPageInfo::EPagedYoung:
  1616 		case SPageInfo::EPagedYoung:
  1351 		case SPageInfo::EPagedOld:
  1617 		case SPageInfo::EPagedOld:
  1352 #ifdef _USE_OLDEST_LISTS
       
  1353 		case SPageInfo::EPagedOldestClean:
  1618 		case SPageInfo::EPagedOldestClean:
  1354 		case SPageInfo::EPagedOldestDirty:
  1619 		case SPageInfo::EPagedOldestDirty:
  1355 #endif
       
  1356 			RemovePage(pi);
  1620 			RemovePage(pi);
  1357 			// fall through...
  1621 			// fall through...
  1358 		case SPageInfo::EUnpaged:
  1622 		case SPageInfo::EUnpaged:
  1359 			AddAsFreePage(pi);
  1623 			AddAsFreePage(pi);
  1360 			break;
  1624 			break;
  1384 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1648 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1385 	switch(aPageInfo->PagedState())
  1649 	switch(aPageInfo->PagedState())
  1386 		{
  1650 		{
  1387 	case SPageInfo::EPagedYoung:
  1651 	case SPageInfo::EPagedYoung:
  1388 	case SPageInfo::EPagedOld:
  1652 	case SPageInfo::EPagedOld:
  1389 #ifdef _USE_OLDEST_LISTS
       
  1390 	case SPageInfo::EPagedOldestClean:
  1653 	case SPageInfo::EPagedOldestClean:
  1391 	case SPageInfo::EPagedOldestDirty:
  1654 	case SPageInfo::EPagedOldestDirty:
  1392 #endif
       
  1393 		RemovePage(aPageInfo);
  1655 		RemovePage(aPageInfo);
  1394 		AddAsYoungestPage(aPageInfo);
  1656 		AddAsYoungestPage(aPageInfo);
  1395 		BalanceAges();
  1657 		BalanceAges();
  1396 		break;
  1658 		break;
  1397 
  1659 
  1444 		aPageInfo->iLink.Deque();
  1706 		aPageInfo->iLink.Deque();
  1445 		--iOldCount;
  1707 		--iOldCount;
  1446 		__NK_ASSERT_DEBUG(aPageInfo->PinCount()==1);
  1708 		__NK_ASSERT_DEBUG(aPageInfo->PinCount()==1);
  1447 		break;
  1709 		break;
  1448 
  1710 
  1449 #ifdef _USE_OLDEST_LISTS
       
  1450 	case SPageInfo::EPagedOldestClean:
  1711 	case SPageInfo::EPagedOldestClean:
  1451 		__NK_ASSERT_DEBUG(iOldestCleanCount);
  1712 		__NK_ASSERT_DEBUG(iOldestCleanCount);
  1452 		aPageInfo->iLink.Deque();
  1713 		aPageInfo->iLink.Deque();
  1453 		--iOldestCleanCount;
  1714 		--iOldestCleanCount;
  1454 		__NK_ASSERT_DEBUG(aPageInfo->PinCount()==1);
  1715 		__NK_ASSERT_DEBUG(aPageInfo->PinCount()==1);
  1458 		__NK_ASSERT_DEBUG(iOldestDirtyCount);
  1719 		__NK_ASSERT_DEBUG(iOldestDirtyCount);
  1459 		aPageInfo->iLink.Deque();
  1720 		aPageInfo->iLink.Deque();
  1460 		--iOldestDirtyCount;
  1721 		--iOldestDirtyCount;
  1461 		__NK_ASSERT_DEBUG(aPageInfo->PinCount()==1);
  1722 		__NK_ASSERT_DEBUG(aPageInfo->PinCount()==1);
  1462 		break;
  1723 		break;
  1463 #endif
       
  1464 
  1724 
  1465 	case SPageInfo::EPagedPinned:
  1725 	case SPageInfo::EPagedPinned:
  1466 		// nothing more to do...
  1726 		// nothing more to do...
  1467 		__NK_ASSERT_DEBUG(aPageInfo->PinCount()>1);
  1727 		__NK_ASSERT_DEBUG(aPageInfo->PinCount()>1);
  1468 		return;
  1728 		return;
  1747 	NKern::ThreadEnterCS();
  2007 	NKern::ThreadEnterCS();
  1748 	RamAllocLock::Lock();
  2008 	RamAllocLock::Lock();
  1749 
  2009 
  1750 	MmuLock::Lock();
  2010 	MmuLock::Lock();
  1751 
  2011 
  1752 	__NK_ASSERT_ALWAYS(iYoungOldRatio!=0);
  2012 	__NK_ASSERT_ALWAYS(iYoungOldRatio);
  1753 
  2013 
  1754 	// Make sure aMinimumPageCount is not less than absolute minimum we can cope with...
  2014 	// Make sure aMinimumPageCount is not less than absolute minimum we can cope with...
  1755 	iMinimumPageLimit = iMinYoungPages * (1 + iYoungOldRatio) / iYoungOldRatio
  2015 	iMinimumPageLimit = iMinYoungPages * (1 + iYoungOldRatio) / iYoungOldRatio
  1756 						+ DPageReadRequest::ReservedPagesRequired();
  2016 						+ DPageReadRequest::ReservedPagesRequired();
  1757 	if(iMinimumPageLimit<iAbsoluteMinPageCount)
  2017 	if(iMinimumPageLimit<iAbsoluteMinPageCount)
  1828 
  2088 
  1829 	return r;
  2089 	return r;
  1830 	}
  2090 	}
  1831 
  2091 
  1832 
  2092 
       
  2093 // WARNING THIS METHOD MAY HOLD THE RAM ALLOC LOCK FOR EXCESSIVE PERIODS.  DON'T USE THIS IN ANY PRODUCTION CODE.
  1833 void DPager::FlushAll()
  2094 void DPager::FlushAll()
  1834 	{
  2095 	{
  1835 	NKern::ThreadEnterCS();
  2096 	NKern::ThreadEnterCS();
  1836 	RamAllocLock::Lock();
  2097 	RamAllocLock::Lock();
       
  2098 	PageCleaningLock::Lock();
  1837 
  2099 
  1838 	TRACE(("DPager::FlushAll() live list young=%d old=%d min=%d free=%d max=%d",iYoungCount,iOldCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
  2100 	TRACE(("DPager::FlushAll() live list young=%d old=%d min=%d free=%d max=%d",iYoungCount,iOldCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
  1839 
  2101 
  1840 	// look at all RAM pages in the system, and unmap all those used for paging
  2102 	// look at all RAM pages in the system, and unmap all those used for paging
  1841 	const TUint32* piMap = (TUint32*)KPageInfoMap;
  2103 	const TUint32* piMap = (TUint32*)KPageInfoMap;
  1854 				}
  2116 				}
  1855 			SPageInfo* piEnd = pi+KPageInfosPerPage;
  2117 			SPageInfo* piEnd = pi+KPageInfosPerPage;
  1856 			do
  2118 			do
  1857 				{
  2119 				{
  1858 				SPageInfo::TPagedState state = pi->PagedState();
  2120 				SPageInfo::TPagedState state = pi->PagedState();
  1859 #ifdef _USE_OLDEST_LISTS
       
  1860 				if (state==SPageInfo::EPagedYoung || state==SPageInfo::EPagedOld ||
  2121 				if (state==SPageInfo::EPagedYoung || state==SPageInfo::EPagedOld ||
  1861 					state==SPageInfo::EPagedOldestClean || state==SPageInfo::EPagedOldestDirty)
  2122 					state==SPageInfo::EPagedOldestClean || state==SPageInfo::EPagedOldestDirty)
  1862 #else
       
  1863 				if(state==SPageInfo::EPagedYoung || state==SPageInfo::EPagedOld)
       
  1864 #endif
       
  1865 					{
  2123 					{
  1866 					if (pi->Type() != SPageInfo::EUnused)
  2124 					if (pi->Type() != SPageInfo::EUnused)
  1867 						{
  2125 						{
  1868 						TInt r = StealPage(pi);
  2126 						TInt r = StealPage(pi);
  1869 						if(r==KErrNone)
  2127 						if(r==KErrNone)
  1872 						}
  2130 						}
  1873 					}
  2131 					}
  1874 				++pi;
  2132 				++pi;
  1875 				if(((TUint)pi&(0xf<<KPageInfoShift))==0)
  2133 				if(((TUint)pi&(0xf<<KPageInfoShift))==0)
  1876 					{
  2134 					{
  1877 					MmuLock::Unlock(); // every 16 page infos
  2135 					MmuLock::Flash(); // every 16 page infos
  1878 					RamAllocLock::Unlock();
       
  1879 					RamAllocLock::Lock();
       
  1880 					MmuLock::Lock();
       
  1881 					}
  2136 					}
  1882 				}
  2137 				}
  1883 			while(pi<piEnd);
  2138 			while(pi<piEnd);
  1884 			}
  2139 			}
  1885 		pi = piNext;
  2140 		pi = piNext;
  1890 	// reduce live page list to a minimum
  2145 	// reduce live page list to a minimum
  1891 	while(GetFreePages(1)) {}; 
  2146 	while(GetFreePages(1)) {}; 
  1892 
  2147 
  1893 	TRACE(("DPager::FlushAll() end with young=%d old=%d min=%d free=%d max=%d",iYoungCount,iOldCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
  2148 	TRACE(("DPager::FlushAll() end with young=%d old=%d min=%d free=%d max=%d",iYoungCount,iOldCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
  1894 
  2149 
       
  2150 	PageCleaningLock::Unlock();
  1895 	RamAllocLock::Unlock();
  2151 	RamAllocLock::Unlock();
  1896 	NKern::ThreadLeaveCS();
  2152 	NKern::ThreadLeaveCS();
  1897 	}
  2153 	}
  1898 
  2154 
  1899 
  2155 
  2064 	case EVMHalGetPagingBenchmark:
  2320 	case EVMHalGetPagingBenchmark:
  2065 		{
  2321 		{
  2066 		TUint index = (TInt) a1;
  2322 		TUint index = (TInt) a1;
  2067 		if (index >= EMaxPagingBm)
  2323 		if (index >= EMaxPagingBm)
  2068 			return KErrNotFound;
  2324 			return KErrNotFound;
  2069 		NKern::LockSystem();
  2325 		SPagingBenchmarkInfo info;
  2070 		SPagingBenchmarkInfo info = ThePager.iBenchmarkInfo[index];
  2326 		ThePager.ReadBenchmarkData((TPagingBenchmark)index, info);
  2071 		NKern::UnlockSystem();
       
  2072 		kumemput32(a2,&info,sizeof(info));
  2327 		kumemput32(a2,&info,sizeof(info));
  2073 		}		
  2328 		}		
  2074 		return KErrNone;
  2329 		return KErrNone;
  2075 		
  2330 		
  2076 	case EVMHalResetPagingBenchmark:
  2331 	case EVMHalResetPagingBenchmark:
  2077 		{
  2332 		{
  2078 		TUint index = (TInt) a1;
  2333 		TUint index = (TInt) a1;
  2079 		if (index >= EMaxPagingBm)
  2334 		if (index >= EMaxPagingBm)
  2080 			return KErrNotFound;
  2335 			return KErrNotFound;
  2081 		NKern::LockSystem();
       
  2082 		ThePager.ResetBenchmarkData((TPagingBenchmark)index);
  2336 		ThePager.ResetBenchmarkData((TPagingBenchmark)index);
  2083 		NKern::UnlockSystem();
       
  2084 		}
  2337 		}
  2085 		return KErrNone;
  2338 		return KErrNone;
  2086 #endif
  2339 #endif
  2087 
  2340 
  2088 	default:
  2341 	default:
  2094 #ifdef __DEMAND_PAGING_BENCHMARKS__
  2347 #ifdef __DEMAND_PAGING_BENCHMARKS__
  2095 
  2348 
  2096 void DPager::ResetBenchmarkData(TPagingBenchmark aBm)
  2349 void DPager::ResetBenchmarkData(TPagingBenchmark aBm)
  2097     {
  2350     {
  2098     SPagingBenchmarkInfo& info = iBenchmarkInfo[aBm];
  2351     SPagingBenchmarkInfo& info = iBenchmarkInfo[aBm];
       
  2352 	__SPIN_LOCK_IRQ(iBenchmarkLock);
  2099     info.iCount = 0;
  2353     info.iCount = 0;
  2100     info.iTotalTime = 0;
  2354     info.iTotalTime = 0;
  2101     info.iMaxTime = 0;
  2355     info.iMaxTime = 0;
  2102     info.iMinTime = KMaxTInt;
  2356     info.iMinTime = KMaxTInt;
       
  2357 	__SPIN_UNLOCK_IRQ(iBenchmarkLock);
  2103     }
  2358     }
  2104  
  2359  
  2105 void DPager::RecordBenchmarkData(TPagingBenchmark aBm, TUint32 aStartTime, TUint32 aEndTime)
  2360 void DPager::RecordBenchmarkData(TPagingBenchmark aBm, TUint32 aStartTime, TUint32 aEndTime, TUint aCount)
  2106     {
  2361     {
  2107     SPagingBenchmarkInfo& info = iBenchmarkInfo[aBm];
  2362     SPagingBenchmarkInfo& info = iBenchmarkInfo[aBm];
  2108     ++info.iCount;
       
  2109 #if !defined(HIGH_RES_TIMER) || defined(HIGH_RES_TIMER_COUNTS_UP)
  2363 #if !defined(HIGH_RES_TIMER) || defined(HIGH_RES_TIMER_COUNTS_UP)
  2110     TInt64 elapsed = aEndTime - aStartTime;
  2364     TInt64 elapsed = aEndTime - aStartTime;
  2111 #else
  2365 #else
  2112     TInt64 elapsed = aStartTime - aEndTime;
  2366     TInt64 elapsed = aStartTime - aEndTime;
  2113 #endif
  2367 #endif
       
  2368 	__SPIN_LOCK_IRQ(iBenchmarkLock);
       
  2369     info.iCount +=  aCount;
  2114     info.iTotalTime += elapsed;
  2370     info.iTotalTime += elapsed;
  2115     if (elapsed > info.iMaxTime)
  2371     if (elapsed > info.iMaxTime)
  2116         info.iMaxTime = elapsed;
  2372         info.iMaxTime = elapsed;
  2117     if (elapsed < info.iMinTime)
  2373     if (elapsed < info.iMinTime)
  2118         info.iMinTime = elapsed;
  2374         info.iMinTime = elapsed;
       
  2375 	__SPIN_UNLOCK_IRQ(iBenchmarkLock);
  2119     }
  2376     }
       
  2377 
       
  2378 void DPager::ReadBenchmarkData(TPagingBenchmark aBm, SPagingBenchmarkInfo& aDataOut)
       
  2379 	{
       
  2380 	__SPIN_LOCK_IRQ(iBenchmarkLock);
       
  2381 	aDataOut = iBenchmarkInfo[aBm];
       
  2382 	__SPIN_UNLOCK_IRQ(iBenchmarkLock);
       
  2383 	}
  2120 
  2384 
  2121 #endif //__DEMAND_PAGING_BENCHMARKS__
  2385 #endif //__DEMAND_PAGING_BENCHMARKS__
  2122 
  2386 
  2123 
  2387 
  2124 //
  2388 //
  2127 
  2391 
  2128 //
  2392 //
  2129 // DPagingRequest
  2393 // DPagingRequest
  2130 //
  2394 //
  2131 
  2395 
  2132 DPagingRequest::DPagingRequest(DPagingRequestPool::TGroup& aPoolGroup)
  2396 DPagingRequest::DPagingRequest()
  2133 	: iPoolGroup(aPoolGroup), iUseRegionMemory(0), iUseRegionIndex(0), iUseRegionCount(0)
  2397 	: iMutex(NULL), iUseRegionCount(0)
  2134 	{
  2398 	{
  2135 	}
  2399 	}
  2136 
  2400 
  2137 
  2401 
  2138 FORCE_INLINE void DPagingRequest::SetUse(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
  2402 void DPagingRequest::SetUseContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
  2139 	{
  2403 	{
  2140 	__ASSERT_SYSTEM_LOCK;
  2404 	__ASSERT_SYSTEM_LOCK;
  2141 	iUseRegionMemory = aMemory;
  2405 	__NK_ASSERT_DEBUG(iUseRegionCount == 0);
  2142 	iUseRegionIndex = aIndex;
  2406 	__NK_ASSERT_DEBUG(aCount > 0 && aCount <= EMaxPages);
       
  2407 	for (TUint i = 0 ; i < aCount ; ++i)
       
  2408 		{
       
  2409 		iUseRegionMemory[i] = aMemory;
       
  2410 		iUseRegionIndex[i] = aIndex + i;		
       
  2411 		}
  2143 	iUseRegionCount = aCount;
  2412 	iUseRegionCount = aCount;
  2144 	}
  2413 	}
  2145 
  2414 
  2146 
  2415 
  2147 TBool DPagingRequest::CheckUse(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
  2416 void DPagingRequest::SetUseDiscontiguous(DMemoryObject** aMemory, TUint* aIndex, TUint aCount)
  2148 	{
  2417 	{
  2149 	return aMemory==iUseRegionMemory
  2418 	__ASSERT_SYSTEM_LOCK;
  2150 		&& TUint(aIndex-iUseRegionIndex) < iUseRegionCount
  2419 	__NK_ASSERT_DEBUG(iUseRegionCount == 0);
  2151 		&& TUint(iUseRegionCount-TUint(aIndex-iUseRegionIndex)) <= aCount;
  2420 	__NK_ASSERT_DEBUG(aCount > 0 && aCount <= EMaxPages);
  2152 	}
  2421 	for (TUint i = 0 ; i < aCount ; ++i)
  2153 
  2422 		{
  2154 
  2423 		iUseRegionMemory[i] = aMemory[i];
  2155 void DPagingRequest::Release()
  2424 		iUseRegionIndex[i] = aIndex[i];
       
  2425 		}
       
  2426 	iUseRegionCount = aCount;
       
  2427 	}
       
  2428 
       
  2429 
       
  2430 void DPagingRequest::ResetUse()
       
  2431 	{
       
  2432 	__ASSERT_SYSTEM_LOCK;
       
  2433 	__NK_ASSERT_DEBUG(iUseRegionCount > 0);
       
  2434 	iUseRegionCount = 0;
       
  2435 	}
       
  2436 
       
  2437 
       
  2438 TBool DPagingRequest::CheckUseContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
       
  2439 	{
       
  2440 	if (iUseRegionCount != aCount)
       
  2441 		return EFalse;
       
  2442 	for (TUint i = 0 ; i < iUseRegionCount ; ++i)
       
  2443 		{
       
  2444 		if (iUseRegionMemory[i] != aMemory || iUseRegionIndex[i] != aIndex + i)
       
  2445 			return EFalse;
       
  2446 		}
       
  2447 	return ETrue;
       
  2448 	}
       
  2449 
       
  2450 
       
  2451 TBool DPagingRequest::CheckUseDiscontiguous(DMemoryObject** aMemory, TUint* aIndex, TUint aCount)
       
  2452 	{
       
  2453 	if (iUseRegionCount != aCount)
       
  2454 		return EFalse;
       
  2455 	for (TUint i = 0 ; i < iUseRegionCount ; ++i)
       
  2456 		{
       
  2457 		if (iUseRegionMemory[i] != aMemory[i] || iUseRegionIndex[i] != aIndex[i])
       
  2458 			return EFalse;
       
  2459 		}
       
  2460 	return ETrue;
       
  2461 	}
       
  2462 
       
  2463 
       
  2464  TBool DPagingRequest::IsCollisionContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
       
  2465 	{
       
  2466 	// note this could be optimised as most of the time we will be checking read/read collusions,
       
  2467 	// both of which will be contiguous
       
  2468 	__ASSERT_SYSTEM_LOCK;
       
  2469 	for (TUint i = 0 ; i < iUseRegionCount ; ++i)
       
  2470 		{
       
  2471 		if (iUseRegionMemory[i] == aMemory &&
       
  2472 			TUint(iUseRegionIndex[i] - aIndex) < aCount)
       
  2473 			return ETrue;
       
  2474 		}
       
  2475 	return EFalse;
       
  2476 	}
       
  2477 
       
  2478 
       
  2479 TLinAddr DPagingRequest::MapPages(TUint aColour, TUint aCount, TPhysAddr* aPages)
       
  2480 	{
       
  2481 	__NK_ASSERT_DEBUG(iMutex->iCleanup.iThread == &Kern::CurrentThread());
       
  2482 	return iTempMapping.Map(aPages,aCount,aColour);
       
  2483 	}
       
  2484 
       
  2485 
       
  2486 void DPagingRequest::UnmapPages(TBool aIMBRequired)
       
  2487 	{
       
  2488 	__NK_ASSERT_DEBUG(iMutex->iCleanup.iThread == &Kern::CurrentThread());
       
  2489 	iTempMapping.Unmap(aIMBRequired);
       
  2490 	}
       
  2491 
       
  2492 //
       
  2493 // DPoolPagingRequest
       
  2494 //
       
  2495 
       
  2496 DPoolPagingRequest::DPoolPagingRequest(DPagingRequestPool::TGroup& aPoolGroup) :
       
  2497 	iPoolGroup(aPoolGroup)
       
  2498 	{
       
  2499 	}
       
  2500 
       
  2501 
       
  2502 void DPoolPagingRequest::Release()
  2156 	{
  2503 	{
  2157 	NKern::LockSystem();
  2504 	NKern::LockSystem();
  2158 	SetUse(0,0,0);
  2505 	ResetUse();
  2159 	Signal();
  2506 	Signal();
  2160 	}
  2507 	}
  2161 
  2508 
  2162 
  2509 
  2163 void DPagingRequest::Wait()
  2510 void DPoolPagingRequest::Wait()
  2164 	{
  2511 	{
  2165 	__ASSERT_SYSTEM_LOCK;
  2512 	__ASSERT_SYSTEM_LOCK;
  2166 	++iUsageCount;
  2513 	++iUsageCount;
  2167 	TInt r = iMutex->Wait();
  2514 	TInt r = iMutex->Wait();
  2168 	__NK_ASSERT_ALWAYS(r == KErrNone);
  2515 	__NK_ASSERT_ALWAYS(r == KErrNone);
  2169 	}
  2516 	}
  2170 
  2517 
  2171 
  2518 
  2172 void DPagingRequest::Signal()
  2519 void DPoolPagingRequest::Signal()
  2173 	{
  2520 	{
  2174 	__ASSERT_SYSTEM_LOCK;
  2521 	__ASSERT_SYSTEM_LOCK;
  2175 	iPoolGroup.Signal(this);
  2522 	iPoolGroup.Signal(this);
  2176 	}
  2523 	}
  2177 
       
  2178 
       
  2179 FORCE_INLINE TBool DPagingRequest::IsCollision(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
       
  2180 	{
       
  2181 	__ASSERT_SYSTEM_LOCK;
       
  2182 	DMemoryObject* memory = iUseRegionMemory;
       
  2183 	TUint index = iUseRegionIndex;
       
  2184 	TUint count = iUseRegionCount;
       
  2185 	// note, this comparison would fail if either region includes page number KMaxTUint,
       
  2186 	// but it isn't possible to create a memory object which is > KMaxTUint pages...
       
  2187 	return (memory == aMemory) && ((index + count) > aIndex) && (index < (aIndex + aCount));
       
  2188 	}
       
  2189 
       
  2190 
       
  2191 TLinAddr DPagingRequest::MapPages(TUint aColour, TUint aCount, TPhysAddr* aPages)
       
  2192 	{
       
  2193 	__NK_ASSERT_DEBUG(iMutex->iCleanup.iThread == &Kern::CurrentThread());
       
  2194 	return iTempMapping.Map(aPages,aCount,aColour);
       
  2195 	}
       
  2196 
       
  2197 
       
  2198 void DPagingRequest::UnmapPages(TBool aIMBRequired)
       
  2199 	{
       
  2200 	__NK_ASSERT_DEBUG(iMutex->iCleanup.iThread == &Kern::CurrentThread());
       
  2201 	iTempMapping.Unmap(aIMBRequired);
       
  2202 	}
       
  2203 
       
  2204 
  2524 
  2205 //
  2525 //
  2206 // DPageReadRequest
  2526 // DPageReadRequest
  2207 //
  2527 //
  2208 
  2528 
  2209 TInt DPageReadRequest::iAllocNext = 0;
  2529 TInt DPageReadRequest::iAllocNext = 0;
       
  2530 
       
  2531 DPageReadRequest::DPageReadRequest(DPagingRequestPool::TGroup& aPoolGroup) :
       
  2532 	DPoolPagingRequest(aPoolGroup)
       
  2533 	{
       
  2534 	// allocate space for mapping pages whilst they're being loaded...
       
  2535 	iTempMapping.Alloc(EMaxPages);
       
  2536 	}
  2210 
  2537 
  2211 TInt DPageReadRequest::Construct()
  2538 TInt DPageReadRequest::Construct()
  2212 	{
  2539 	{
  2213 	// allocate id and mutex...
  2540 	// allocate id and mutex...
  2214 	TUint id = (TUint)__e32_atomic_add_ord32(&iAllocNext, 1);
  2541 	TUint id = (TUint)__e32_atomic_add_ord32(&iAllocNext, 1);
  2216 	TBuf<sizeof("PageReadRequest-")+10> mutexName(KLitPagingRequest);
  2543 	TBuf<sizeof("PageReadRequest-")+10> mutexName(KLitPagingRequest);
  2217 	mutexName.AppendNum(id);
  2544 	mutexName.AppendNum(id);
  2218 	TInt r = K::MutexCreate(iMutex, mutexName, NULL, EFalse, KMutexOrdPageIn);
  2545 	TInt r = K::MutexCreate(iMutex, mutexName, NULL, EFalse, KMutexOrdPageIn);
  2219 	if(r!=KErrNone)
  2546 	if(r!=KErrNone)
  2220 		return r;
  2547 		return r;
  2221 
       
  2222 	// allocate space for mapping pages whilst they're being loaded...
       
  2223 	iTempMapping.Alloc(EMaxPages);
       
  2224 
  2548 
  2225 	// create memory buffer...
  2549 	// create memory buffer...
  2226 	TUint bufferSize = EMaxPages+1;
  2550 	TUint bufferSize = EMaxPages+1;
  2227 	DMemoryObject* bufferMemory;
  2551 	DMemoryObject* bufferMemory;
  2228 	r = MM::MemoryNew(bufferMemory,EMemoryObjectUnpaged,bufferSize,EMemoryCreateNoWipe);
  2552 	r = MM::MemoryNew(bufferMemory,EMemoryObjectUnpaged,bufferSize,EMemoryCreateNoWipe);
  2246 
  2570 
  2247 //
  2571 //
  2248 // DPageWriteRequest
  2572 // DPageWriteRequest
  2249 //
  2573 //
  2250 
  2574 
  2251 TInt DPageWriteRequest::iAllocNext = 0;
  2575 
  2252 
  2576 DPageWriteRequest::DPageWriteRequest()
  2253 TInt DPageWriteRequest::Construct()
  2577 	{
  2254 	{
  2578 	iMutex = ThePageCleaningLock;
  2255 	// allocate id and mutex...
       
  2256 	TUint id = (TUint)__e32_atomic_add_ord32(&iAllocNext, 1);
       
  2257 	_LIT(KLitPagingRequest,"PageWriteRequest-");
       
  2258 	TBuf<sizeof("PageWriteRequest-")+10> mutexName(KLitPagingRequest);
       
  2259 	mutexName.AppendNum(id);
       
  2260 	TInt r = K::MutexCreate(iMutex, mutexName, NULL, EFalse, KMutexOrdPageOut);
       
  2261 	if(r!=KErrNone)
       
  2262 		return r;
       
  2263 
       
  2264 	// allocate space for mapping pages whilst they're being loaded...
  2579 	// allocate space for mapping pages whilst they're being loaded...
  2265 	iTempMapping.Alloc(EMaxPages);
  2580 	iTempMapping.Alloc(KMaxPagesToClean);
  2266 
  2581 	}
  2267 	return r;
  2582 
       
  2583 
       
  2584 void DPageWriteRequest::Release()
       
  2585 	{
       
  2586 	NKern::LockSystem();
       
  2587 	ResetUse();
       
  2588 	NKern::UnlockSystem();
  2268 	}
  2589 	}
  2269 
  2590 
  2270 
  2591 
  2271 //
  2592 //
  2272 // DPagingRequestPool
  2593 // DPagingRequestPool
  2273 //
  2594 //
  2274 
  2595 
  2275 DPagingRequestPool::DPagingRequestPool(TUint aNumPageReadRequest,TUint aNumPageWriteRequest)
  2596 DPagingRequestPool::DPagingRequestPool(TUint aNumPageReadRequest, TBool aWriteRequest)
  2276 	: iPageReadRequests(aNumPageReadRequest), iPageWriteRequests(aNumPageWriteRequest)
  2597 	: iPageReadRequests(aNumPageReadRequest)
  2277 	{
  2598 	{
  2278 	TUint i;
  2599 	TUint i;
  2279 
       
  2280 	for(i=0; i<aNumPageReadRequest; ++i)
  2600 	for(i=0; i<aNumPageReadRequest; ++i)
  2281 		{
  2601 		{
  2282 		DPageReadRequest* req = new DPageReadRequest(iPageReadRequests);
  2602 		DPageReadRequest* req = new DPageReadRequest(iPageReadRequests);
  2283 		__NK_ASSERT_ALWAYS(req);
  2603 		__NK_ASSERT_ALWAYS(req);
  2284 		TInt r = req->Construct();
  2604 		TInt r = req->Construct();
  2285 		__NK_ASSERT_ALWAYS(r==KErrNone);
  2605 		__NK_ASSERT_ALWAYS(r==KErrNone);
  2286 		iPageReadRequests.iRequests[i] = req;
  2606 		iPageReadRequests.iRequests[i] = req;
  2287 		iPageReadRequests.iFreeList.Add(req);
  2607 		iPageReadRequests.iFreeList.Add(req);
  2288 		}
  2608 		}
  2289 
  2609 
  2290 	for(i=0; i<aNumPageWriteRequest; ++i)
  2610 	if (aWriteRequest)
  2291 		{
  2611 		{
  2292 		DPageWriteRequest* req = new DPageWriteRequest(iPageWriteRequests);
  2612 		iPageWriteRequest = new DPageWriteRequest();
  2293 		__NK_ASSERT_ALWAYS(req);
  2613 		__NK_ASSERT_ALWAYS(iPageWriteRequest);
  2294 		TInt r = req->Construct();
       
  2295 		__NK_ASSERT_ALWAYS(r==KErrNone);
       
  2296 		iPageWriteRequests.iRequests[i] = req;
       
  2297 		iPageWriteRequests.iFreeList.Add(req);
       
  2298 		}
  2614 		}
  2299 	}
  2615 	}
  2300 
  2616 
  2301 
  2617 
  2302 DPagingRequestPool::~DPagingRequestPool()
  2618 DPagingRequestPool::~DPagingRequestPool()
  2307 
  2623 
  2308 DPageReadRequest* DPagingRequestPool::AcquirePageReadRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
  2624 DPageReadRequest* DPagingRequestPool::AcquirePageReadRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
  2309 	{
  2625 	{
  2310 	NKern::LockSystem();
  2626 	NKern::LockSystem();
  2311 
  2627 
  2312 	DPagingRequest* req;
  2628 	DPoolPagingRequest* req;
  2313 
  2629 	
  2314 	// if we collide with page write operation...
  2630 	// check for collision with existing write
  2315 	req = iPageWriteRequests.FindCollision(aMemory,aIndex,aCount);
  2631 	if(iPageWriteRequest && iPageWriteRequest->IsCollisionContiguous(aMemory,aIndex,aCount))
  2316 	if(req)
  2632 		{
  2317 		{
  2633 		NKern::UnlockSystem();
  2318 		// wait until write completes...
  2634 		PageCleaningLock::Lock();
  2319 		req->Wait();
  2635 		PageCleaningLock::Unlock();
  2320 		req->Signal();
       
  2321 		return 0; // caller expected to retry if needed
  2636 		return 0; // caller expected to retry if needed
  2322 		}
  2637 		}
  2323 
  2638 
  2324 	// get a request object to use...
  2639 	// get a request object to use...
  2325 	req = iPageReadRequests.GetRequest(aMemory,aIndex,aCount);
  2640 	req = iPageReadRequests.GetRequest(aMemory,aIndex,aCount);
  2326 
  2641 
  2327 	// check no new requests collide with us...
  2642 	// check no new read or write requests collide with us...
  2328 	if(iPageWriteRequests.FindCollision(aMemory,aIndex,aCount)
  2643 	if ((iPageWriteRequest && iPageWriteRequest->IsCollisionContiguous(aMemory,aIndex,aCount)) ||
  2329 		|| iPageReadRequests.FindCollision(aMemory,aIndex,aCount))
  2644 		iPageReadRequests.FindCollisionContiguous(aMemory,aIndex,aCount))
  2330 		{
  2645 		{
  2331 		// another operation is colliding with this region, give up and retry...
  2646 		// another operation is colliding with this region, give up and retry...
  2332 		req->Signal();
  2647 		req->Signal();
  2333 		return 0; // caller expected to retry if needed
  2648 		return 0; // caller expected to retry if needed
  2334 		}
  2649 		}
  2335 
  2650 
  2336 	// we have a request object which we can use...
  2651 	// we have a request object which we can use...
  2337 	req->SetUse(aMemory,aIndex,aCount);
  2652 	req->SetUseContiguous(aMemory,aIndex,aCount);
  2338 
  2653 
  2339 	NKern::UnlockSystem();
  2654 	NKern::UnlockSystem();
  2340 	return (DPageReadRequest*)req;
  2655 	return (DPageReadRequest*)req;
  2341 	}
  2656 	}
  2342 
  2657 
  2343 
  2658 
  2344 DPageWriteRequest* DPagingRequestPool::AcquirePageWriteRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
  2659 DPageWriteRequest* DPagingRequestPool::AcquirePageWriteRequest(DMemoryObject** aMemory, TUint* aIndex, TUint aCount)
  2345 	{
  2660 	{
       
  2661 	__NK_ASSERT_DEBUG(iPageWriteRequest);
       
  2662 	__NK_ASSERT_DEBUG(PageCleaningLock::IsHeld());
       
  2663 
  2346 	NKern::LockSystem();
  2664 	NKern::LockSystem();
  2347 
  2665 
  2348 	DPagingRequest* req;
  2666 	// Collision with existing read requests is not possible here.  For a page to be read it must
  2349 
  2667 	// not be present, and for it to be written it must be present and dirty.  There is no way for a
  2350 	for(;;)
  2668 	// page to go between these states without an intervening read on an uninitialised (freshly
  2351 		{
  2669 	// committed) page, which will wait on the first read request.  In other words something like
  2352 		// get a request object to use...
  2670 	// this:
  2353 		req = iPageWriteRequests.GetRequest(aMemory,aIndex,aCount);
  2671 	//
  2354 
  2672 	//   read (blocks), decommit, re-commit, read (waits on mutex), write (now no pending reads!)
  2355 		if(iPageWriteRequests.FindCollision(aMemory,aIndex,aCount))
  2673 	//
  2356 			{
  2674 	// Note that a read request can be outstanding and appear to collide with this write, but only
  2357 			// another write operation is colliding with this region, give up and retry...
  2675 	// in the case when the thread making the read has blocked just after acquiring the request but
  2358 			req->Signal();
  2676 	// before it checks whether the read is still necessasry.  This makes it difficult to assert
  2359 			// Reacquire the system lock as Signal() above will release it.
  2677 	// that no collisions take place.
  2360 			NKern::LockSystem();
  2678 	
  2361 			continue;
  2679 	iPageWriteRequest->SetUseDiscontiguous(aMemory,aIndex,aCount);
  2362 			}
       
  2363 
       
  2364 		break;
       
  2365 		}
       
  2366 
       
  2367 	// we have a request object which we can use...
       
  2368 	req->SetUse(aMemory,aIndex,aCount);
       
  2369 
       
  2370 	NKern::UnlockSystem();
  2680 	NKern::UnlockSystem();
  2371 	return (DPageWriteRequest*)req;
  2681 	
       
  2682 	return iPageWriteRequest;
  2372 	}
  2683 	}
  2373 
  2684 
  2374 
  2685 
  2375 DPagingRequestPool::TGroup::TGroup(TUint aNumRequests)
  2686 DPagingRequestPool::TGroup::TGroup(TUint aNumRequests)
  2376 	{
  2687 	{
  2377 	iNumRequests = aNumRequests;
  2688 	iNumRequests = aNumRequests;
  2378 	iRequests = new DPagingRequest*[aNumRequests];
  2689 	iRequests = new DPoolPagingRequest*[aNumRequests];
  2379 	__NK_ASSERT_ALWAYS(iRequests);
  2690 	__NK_ASSERT_ALWAYS(iRequests);
  2380 	}
  2691 	}
  2381 
  2692 
  2382 
  2693 
  2383 DPagingRequest* DPagingRequestPool::TGroup::FindCollision(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
  2694 DPoolPagingRequest* DPagingRequestPool::TGroup::FindCollisionContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
  2384 	{
  2695 	{
  2385 	__ASSERT_SYSTEM_LOCK;
  2696 	__ASSERT_SYSTEM_LOCK;
  2386 	DPagingRequest** ptr = iRequests;
  2697 	DPoolPagingRequest** ptr = iRequests;
  2387 	DPagingRequest** ptrEnd = ptr+iNumRequests;
  2698 	DPoolPagingRequest** ptrEnd = ptr+iNumRequests;
  2388 	while(ptr<ptrEnd)
  2699 	while(ptr<ptrEnd)
  2389 		{
  2700 		{
  2390 		DPagingRequest* req = *ptr++;
  2701 		DPoolPagingRequest* req = *ptr++;
  2391 		if(req->IsCollision(aMemory,aIndex,aCount))
  2702 		if(req->IsCollisionContiguous(aMemory,aIndex,aCount))
  2392 			return req;
  2703 			return req;
  2393 		}
  2704 		}
  2394 	return 0;
  2705 	return 0;
  2395 	}
  2706 	}
  2396 
  2707 
  2397 
  2708 
  2398 static TUint32 RandomSeed = 33333;
  2709 static TUint32 RandomSeed = 33333;
  2399 
  2710 
  2400 DPagingRequest* DPagingRequestPool::TGroup::GetRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
  2711 DPoolPagingRequest* DPagingRequestPool::TGroup::GetRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
  2401 	{
  2712 	{
  2402 	__NK_ASSERT_DEBUG(iNumRequests > 0);
  2713 	__NK_ASSERT_DEBUG(iNumRequests > 0);
  2403 
  2714 
  2404 	// try using an existing request which collides with this region...
  2715 	// try using an existing request which collides with this region...
  2405 	DPagingRequest* req  = FindCollision(aMemory,aIndex,aCount);
  2716 	DPoolPagingRequest* req  = FindCollisionContiguous(aMemory,aIndex,aCount);
  2406 	if(!req)
  2717 	if(!req)
  2407 		{
  2718 		{
  2408 		// use a free request...
  2719 		// use a free request...
  2409 		req = (DPagingRequest*)iFreeList.GetFirst();
  2720 		req = (DPoolPagingRequest*)iFreeList.GetFirst();
  2410 		if(req)
  2721 		if(req)
  2411 			{
  2722 			{
  2412 			// free requests aren't being used...
  2723 			// free requests aren't being used...
  2413 			__NK_ASSERT_DEBUG(req->iUsageCount == 0);
  2724 			__NK_ASSERT_DEBUG(req->iUsageCount == 0);
  2414 			}
  2725 			}
  2427 
  2738 
  2428 	return req;
  2739 	return req;
  2429 	}
  2740 	}
  2430 
  2741 
  2431 
  2742 
  2432 void DPagingRequestPool::TGroup::Signal(DPagingRequest* aRequest)
  2743 void DPagingRequestPool::TGroup::Signal(DPoolPagingRequest* aRequest)
  2433 	{
  2744 	{
  2434 	// if there are no threads waiting on the mutex then return it to the free pool...
  2745 	// if there are no threads waiting on the mutex then return it to the free pool...
  2435 	__NK_ASSERT_DEBUG(aRequest->iUsageCount > 0);
  2746 	__NK_ASSERT_DEBUG(aRequest->iUsageCount > 0);
  2436 	if (--aRequest->iUsageCount==0)
  2747 	if (--aRequest->iUsageCount==0)
  2437 		iFreeList.AddHead(aRequest);
  2748 		iFreeList.AddHead(aRequest);
  2455 	__NK_ASSERT_ALWAYS(aDevice->iReadUnitShift <= KPageShift);
  2766 	__NK_ASSERT_ALWAYS(aDevice->iReadUnitShift <= KPageShift);
  2456 
  2767 
  2457 	TInt r = KErrNotSupported;	// Will return this if unsupported device type is installed
  2768 	TInt r = KErrNotSupported;	// Will return this if unsupported device type is installed
  2458 
  2769 
  2459 	// create the pools of page out and page in requests...
  2770 	// create the pools of page out and page in requests...
  2460 	const TInt writeReqs = (aDevice->iType & DPagingDevice::EData) ? KPagingRequestsPerDevice : 0;
  2771 	const TBool writeReq = (aDevice->iType & DPagingDevice::EData) != 0;
  2461 	aDevice->iRequestPool = new DPagingRequestPool(KPagingRequestsPerDevice,writeReqs);
  2772 	aDevice->iRequestPool = new DPagingRequestPool(KPagingRequestsPerDevice, writeReq);
  2462 	if(!aDevice->iRequestPool)
  2773 	if(!aDevice->iRequestPool)
  2463 		{
  2774 		{
  2464 		r = KErrNoMemory;
  2775 		r = KErrNoMemory;
  2465 		goto exit;
  2776 		goto exit;
  2466 		}
  2777 		}
  2486 			goto exit;
  2797 			goto exit;
  2487 		}
  2798 		}
  2488 
  2799 
  2489  	if (K::MemModelAttributes & (EMemModelAttrRomPaging | EMemModelAttrCodePaging | EMemModelAttrDataPaging))
  2800  	if (K::MemModelAttributes & (EMemModelAttrRomPaging | EMemModelAttrCodePaging | EMemModelAttrDataPaging))
  2490 		TheThrashMonitor.Start();
  2801 		TheThrashMonitor.Start();
       
  2802 	
       
  2803  	if (K::MemModelAttributes & EMemModelAttrDataPaging)
       
  2804 		PageCleaner::Start();
  2491 
  2805 
  2492 exit:
  2806 exit:
  2493 	TRACEB(("Kern::InstallPagingDevice returns %d",r));
  2807 	TRACEB(("Kern::InstallPagingDevice returns %d",r));
  2494 	return r;
  2808 	return r;
  2495 	}
  2809 	}
  2635 	iLockedPageCount = 0;
  2949 	iLockedPageCount = 0;
  2636 	NKern::ThreadLeaveCS();
  2950 	NKern::ThreadLeaveCS();
  2637 	}
  2951 	}
  2638 
  2952 
  2639 
  2953 
       
  2954 
       
  2955 //
       
  2956 // PageCleaningLock
       
  2957 //
       
  2958 
       
  2959 _LIT(KLitPageCleaningLock,"PageCleaningLock");
       
  2960 
       
  2961 void PageCleaningLock::Init()
       
  2962 	{
       
  2963 	__NK_ASSERT_DEBUG(!ThePageCleaningLock);
       
  2964 	TInt r = Kern::MutexCreate(ThePageCleaningLock, KLitPageCleaningLock, KMutexOrdPageOut);
       
  2965 	__NK_ASSERT_ALWAYS(r == KErrNone);
       
  2966 	}
       
  2967 
       
  2968 void PageCleaningLock::Lock()
       
  2969 	{
       
  2970 	Kern::MutexWait(*ThePageCleaningLock);
       
  2971 	}
       
  2972 
       
  2973 
       
  2974 void PageCleaningLock::Unlock()
       
  2975 	{
       
  2976 	Kern::MutexSignal(*ThePageCleaningLock);
       
  2977 	}
       
  2978 
       
  2979 TBool PageCleaningLock::IsHeld()
       
  2980 	{
       
  2981 	return ThePageCleaningLock->iCleanup.iThread == &Kern::CurrentThread();
       
  2982 	}