kernel/eka/memmodel/epoc/flexible/mmu/mpager.cpp
changeset 176 af6ec97d9189
parent 132 e4a7b1cbe40c
child 245 647ab20fee2e
equal deleted inserted replaced
175:5af6c74cd793 176:af6ec97d9189
    41 /*	On a 32 bit system without PAE can't have more than 2^(32-KPageShift) pages.
    41 /*	On a 32 bit system without PAE can't have more than 2^(32-KPageShift) pages.
    42  *	Subtract 1 so it doesn't overflow when converted to bytes.
    42  *	Subtract 1 so it doesn't overflow when converted to bytes.
    43 */
    43 */
    44 const TUint	KAbsoluteMaxPageCount = (1u<<(32-KPageShift))-1u;
    44 const TUint	KAbsoluteMaxPageCount = (1u<<(32-KPageShift))-1u;
    45 
    45 
    46 /*
    46 /**
    47 Limit the maximum number of oldest pages to bound the time taken by SelectPagesToClean(), which is
    47 Default limit for the maximum number of oldest pages.
    48 called with the MmuLock held.
    48 
       
    49 If the data paging device sets iPreferredWriteShift, then this is increased if necessary to allow
       
    50 that many pages to be present.
       
    51 
       
    52 This limit exists to make our live list implementation a closer approximation to LRU, and to bound
       
    53 the time taken by SelectSequentialPagesToClean(), which is called with the MmuLock held.
    49 */
    54 */
    50 const TUint KMaxOldestPages = 32;
    55 const TUint KDefaultMaxOldestPages = 32;
    51 
    56 
    52 static DMutex* ThePageCleaningLock = NULL;
    57 static DMutex* ThePageCleaningLock = NULL;
    53 
    58 
    54 DPager ThePager;
    59 DPager ThePager;
    55 
    60 
    56 
    61 
    57 DPager::DPager()
    62 DPager::DPager() :
    58 	: iMinimumPageCount(0), iMaximumPageCount(0), iYoungOldRatio(0),
    63 	iMinimumPageCount(0),
    59 	  iYoungCount(0), iOldCount(0), iOldestCleanCount(0),
    64 	iMaximumPageCount(0),
    60 	  iNumberOfFreePages(0), iReservePageCount(0), iMinimumPageLimit(0)
    65 	iYoungOldRatio(0),
       
    66 	iYoungCount(0),
       
    67 	iOldCount(0),
       
    68 	iOldestCleanCount(0),
       
    69 	iMaxOldestPages(KDefaultMaxOldestPages),
       
    70 	iNumberOfFreePages(0),
       
    71 	iReservePageCount(0),
       
    72 	iMinimumPageLimit(0),
       
    73 	iPagesToClean(1)
    61 #ifdef __DEMAND_PAGING_BENCHMARKS__
    74 #ifdef __DEMAND_PAGING_BENCHMARKS__
    62 	, iBenchmarkLock(TSpinLock::EOrderGenericIrqHigh3)
    75 	, iBenchmarkLock(TSpinLock::EOrderGenericIrqHigh3)
    63 #endif	  
    76 #endif	  
    64 	{
    77 	{
    65 	}
    78 	}
   443 		{
   456 		{
   444 		case SPageInfo::EPagedYoung:
   457 		case SPageInfo::EPagedYoung:
   445 		case SPageInfo::EPagedOld:
   458 		case SPageInfo::EPagedOld:
   446 		case SPageInfo::EPagedOldestClean:
   459 		case SPageInfo::EPagedOldestClean:
   447 		case SPageInfo::EPagedOldestDirty:
   460 		case SPageInfo::EPagedOldestDirty:
   448 			{// Update the list links point to the new page.
   461 			{// Update the list links to point to the new page.
   449 			__NK_ASSERT_DEBUG(iYoungCount);
       
   450 			SDblQueLink* prevLink = aOldPageInfo.iLink.iPrev;
   462 			SDblQueLink* prevLink = aOldPageInfo.iLink.iPrev;
   451 #ifdef _DEBUG
   463 #ifdef _DEBUG
   452 			SDblQueLink* nextLink = aOldPageInfo.iLink.iNext;
   464 			SDblQueLink* nextLink = aOldPageInfo.iLink.iNext;
   453 			__NK_ASSERT_DEBUG(prevLink == aOldPageInfo.iLink.iPrev);
   465 			__NK_ASSERT_DEBUG(prevLink == aOldPageInfo.iLink.iPrev);
   454 			__NK_ASSERT_DEBUG(prevLink->iNext == &aOldPageInfo.iLink);
   466 			__NK_ASSERT_DEBUG(prevLink->iNext == &aOldPageInfo.iLink);
   483 			break;
   495 			break;
   484 		}	
   496 		}	
   485 	}
   497 	}
   486 
   498 
   487 
   499 
   488 TInt DPager::TryStealOldestPage(SPageInfo*& aPageInfoOut)
   500 SPageInfo* DPager::StealOrAllocPage(TBool aAllowAlloc, Mmu::TRamAllocFlags aAllocFlags)
   489 	{
   501 	{
   490 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
   502 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
   491 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   503 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   492 
   504 
   493 	// find oldest page in list...
   505 	// The PageCleaningLock may or may not be held to start with
       
   506 	TBool pageCleaningLockAcquired = EFalse;
       
   507 
   494 	SDblQueLink* link;
   508 	SDblQueLink* link;
       
   509 	SPageInfo* pageInfo ;
       
   510 	
       
   511 restart:
       
   512 
       
   513 	// if there is a free page in the live list then use that (it will be at the end)...
   495 	if (iOldestCleanCount)
   514 	if (iOldestCleanCount)
   496 		{
   515 		{
   497 		__NK_ASSERT_DEBUG(!iOldestCleanList.IsEmpty());
   516 		__NK_ASSERT_DEBUG(!iOldestCleanList.IsEmpty());
   498 		link = iOldestCleanList.Last();
   517 		link = iOldestCleanList.Last();
   499 		}
   518 		pageInfo = SPageInfo::FromLink(link);
   500 	else if (iOldestDirtyCount)
   519 		if(pageInfo->Type()==SPageInfo::EUnused)
       
   520 			goto try_steal_from_page_info;
       
   521 		}
       
   522 	
       
   523 	// maybe try getting a free page from the system pool...
       
   524 	if (aAllowAlloc && !HaveMaximumPages())
       
   525 		{
       
   526 		MmuLock::Unlock();
       
   527 		pageInfo = GetPageFromSystem(aAllocFlags);
       
   528 		MmuLock::Lock();
       
   529 		if (pageInfo)
       
   530 			goto exit;
       
   531 		}
       
   532 	
       
   533 	// try stealing the oldest clean page on the  live list if there is one...
       
   534 	if (iOldestCleanCount)
       
   535 		{
       
   536 		__NK_ASSERT_DEBUG(!iOldestCleanList.IsEmpty());
       
   537 		link = iOldestCleanList.Last();
       
   538 		goto try_steal_from_link;
       
   539 		}
       
   540 
       
   541 	// no clean oldest pages, see if we can clean multiple dirty pages in one go...
       
   542 	if (iOldestDirtyCount > 1 && iPagesToClean > 1)
       
   543 		{
       
   544 		__NK_ASSERT_DEBUG(!iOldestDirtyList.IsEmpty());
       
   545 
       
   546 		// check if we hold page cleaning lock
       
   547 		TBool needPageCleaningLock = !PageCleaningLock::IsHeld();
       
   548 		if (needPageCleaningLock)
       
   549 			{
       
   550 			// temporarily release ram alloc mutex and acquire page cleaning mutex
       
   551 			MmuLock::Unlock();
       
   552 			RamAllocLock::Unlock();
       
   553 			PageCleaningLock::Lock();
       
   554 			MmuLock::Lock();
       
   555 			}
       
   556 
       
   557 		// there may be clean pages now if we've waited on the page cleaning mutex, if so don't
       
   558 		// bother cleaning but just restart
       
   559 		if (iOldestCleanCount == 0 && iOldestDirtyCount >= 1)
       
   560 			CleanSomePages(EFalse);
       
   561 
       
   562 		if (needPageCleaningLock)
       
   563 			{
       
   564 			// release page cleaning mutex and re-aquire ram alloc mutex
       
   565 			MmuLock::Unlock();
       
   566 			PageCleaningLock::Unlock();			
       
   567 			RamAllocLock::Lock();
       
   568 			MmuLock::Lock();
       
   569 			}
       
   570 
       
   571 		// if there are now some clean pages we restart so as to take one of them
       
   572 		if (iOldestCleanCount > 0)
       
   573 			goto restart;
       
   574 		}
       
   575 
       
   576 	// otherwise just try to steal the oldest page...
       
   577 	if (iOldestDirtyCount)
   501 		{
   578 		{
   502 		__NK_ASSERT_DEBUG(!iOldestDirtyList.IsEmpty());
   579 		__NK_ASSERT_DEBUG(!iOldestDirtyList.IsEmpty());
   503 		link = iOldestDirtyList.Last();
   580 		link = iOldestDirtyList.Last();
   504 		}
   581 		}
   505 	else if (iOldCount)
   582 	else if (iOldCount)
   511 		{
   588 		{
   512 		__NK_ASSERT_DEBUG(iYoungCount);
   589 		__NK_ASSERT_DEBUG(iYoungCount);
   513 		__NK_ASSERT_ALWAYS(!iYoungList.IsEmpty());
   590 		__NK_ASSERT_ALWAYS(!iYoungList.IsEmpty());
   514 		link = iYoungList.Last();
   591 		link = iYoungList.Last();
   515 		}
   592 		}
   516 	SPageInfo* pageInfo = SPageInfo::FromLink(link);
   593 
   517 
   594 try_steal_from_link:
       
   595 
       
   596 	// lookup page info
       
   597 	__NK_ASSERT_DEBUG(link);
       
   598 	pageInfo = SPageInfo::FromLink(link);
       
   599 	
       
   600 try_steal_from_page_info:
       
   601 	
       
   602 	// if the page is dirty and we don't hold the page cleaning mutex then we have to wait on it,
       
   603 	// and restart - we clean with the ram alloc mutex held in this case
   518 	if (pageInfo->IsDirty() && !PageCleaningLock::IsHeld())
   604 	if (pageInfo->IsDirty() && !PageCleaningLock::IsHeld())
   519 		return 1;
   605 		{		
   520 
   606 		MmuLock::Unlock();
       
   607 		PageCleaningLock::Lock();
       
   608 		MmuLock::Lock();
       
   609 		pageCleaningLockAcquired = ETrue;
       
   610 		goto restart;
       
   611 		}
       
   612 	
   521 	// try to steal it from owning object...
   613 	// try to steal it from owning object...
   522 	TInt r = StealPage(pageInfo);	
   614 	if (StealPage(pageInfo) != KErrNone)
   523 	if (r == KErrNone)
   615 		goto restart;
   524 		{
       
   525 		BalanceAges();
       
   526 		aPageInfoOut = pageInfo;
       
   527 		}
       
   528 	
   616 	
   529 	return r;
   617 	BalanceAges();
   530 	}
   618 	
   531 
   619 exit:
   532 
   620 	if (pageCleaningLockAcquired)
   533 SPageInfo* DPager::StealOldestPage()
   621 		{		
   534 	{
   622 		MmuLock::Unlock();
       
   623 		PageCleaningLock::Unlock();
       
   624 		MmuLock::Lock();
       
   625 		}
       
   626 
       
   627 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
   535 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   628 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   536 	TBool pageCleaningLockHeld = EFalse;
   629 	
   537 	for(;;)
   630 	return pageInfo;
   538 		{
   631 	}
   539 		SPageInfo* pageInfo = NULL;
   632 
   540 		TInt r = TryStealOldestPage(pageInfo);
   633 
   541 		
   634 template <class T, TUint maxObjects> class TSequentialColourSelector
   542 		if (r == KErrNone)
       
   543 			{
       
   544 			if (pageCleaningLockHeld)
       
   545 				{
       
   546 				MmuLock::Unlock();
       
   547 				PageCleaningLock::Unlock();
       
   548 				MmuLock::Lock();
       
   549 				}
       
   550 			return pageInfo;
       
   551 			}
       
   552 		else if (r == 1)
       
   553 			{
       
   554 			__NK_ASSERT_ALWAYS(!pageCleaningLockHeld);
       
   555 			MmuLock::Unlock();
       
   556 			PageCleaningLock::Lock();
       
   557 			MmuLock::Lock();
       
   558 			pageCleaningLockHeld = ETrue;
       
   559 			}
       
   560 		// else retry...
       
   561 		}
       
   562 	}
       
   563 
       
   564 #ifdef __CPU_CACHE_HAS_COLOUR
       
   565 
       
   566 template <class T, TInt maxObjects> class TSequentialColourSelector
       
   567 	{
   635 	{
   568 public:
   636 public:
   569 	static const TInt KMaxLength = maxObjects;
   637 	enum
   570 	static const TInt KArrayLength = _ALIGN_UP(KMaxLength, KPageColourCount);
   638 		{
       
   639 		KMaxSearchLength = _ALIGN_UP(maxObjects, KPageColourCount)
       
   640 		};
   571 	
   641 	
   572 	FORCE_INLINE TSequentialColourSelector()
   642 	FORCE_INLINE TSequentialColourSelector(TUint aTargetLength)
   573 		{
   643 		{
   574 		memclr(this, sizeof(*this));
   644 		memclr(this, sizeof(*this));
       
   645 		__NK_ASSERT_DEBUG(aTargetLength <= maxObjects);
       
   646 		iTargetLength = aTargetLength;
       
   647 		iSearchLength = _ALIGN_UP(aTargetLength, KPageColourCount);
   575 		}
   648 		}
   576 
   649 
   577 	FORCE_INLINE TBool FoundLongestSequence()
   650 	FORCE_INLINE TBool FoundLongestSequence()
   578 		{
   651 		{
   579 		return iLongestLength >= KMaxLength;
   652 		return iLongestLength >= iTargetLength;
   580 		}
   653 		}
   581 
   654 
   582 	FORCE_INLINE void AddCandidate(T* aObject, TInt aColour)
   655 	FORCE_INLINE void AddCandidate(T* aObject, TUint aColour)
   583 		{
   656 		{
   584 		// allocate objects to slots based on colour
   657 		// allocate objects to slots based on colour
   585 		for (TInt i = aColour ; i < KArrayLength ; i += KPageColourCount)
   658 		for (TUint i = aColour ; i < iSearchLength ; i += KPageColourCount)
   586 			{
   659 			{
   587 			if (!iSlot[i])
   660 			if (!iSlot[i])
   588 				{
   661 				{
   589 				iSlot[i] = aObject;
   662 				iSlot[i] = aObject;
   590 				iSeqLength[i] = i == 0 ? 1 : iSeqLength[i - 1] + 1;
   663 				iSeqLength[i] = i == 0 ? 1 : iSeqLength[i - 1] + 1;
   591 				TInt j = i + 1;
   664 				TUint j = i + 1;
   592 				while(j < KArrayLength && iSeqLength[j])
   665 				while(j < iSearchLength && iSeqLength[j])
   593 					iSeqLength[j++] += iSeqLength[i];
   666 					iSeqLength[j++] += iSeqLength[i];
   594 				TInt currentLength = iSeqLength[j - 1];
   667 				TUint currentLength = iSeqLength[j - 1];
   595 				if (currentLength > iLongestLength)
   668 				if (currentLength > iLongestLength)
   596 					{
   669 					{
   597 					iLongestLength = currentLength;
   670 					iLongestLength = currentLength;
   598 					iLongestStart = j - currentLength;
   671 					iLongestStart = j - currentLength;
   599 					}
   672 					}
   600 				break;
   673 				break;
   601 				}
   674 				}
   602 			}
   675 			}
   603 		}
   676 		}
   604 
   677 
   605 	FORCE_INLINE TInt FindLongestRun(T** aObjectsOut)
   678 	FORCE_INLINE TUint FindLongestRun(T** aObjectsOut)
   606 		{
   679 		{
   607 		if (iLongestLength == 0)
   680 		if (iLongestLength == 0)
   608 			return 0;
   681 			return 0;
   609 
   682 
   610 		if (iLongestLength < KMaxLength && iSlot[0] && iSlot[KArrayLength - 1])
   683 		if (iLongestLength < iTargetLength && iSlot[0] && iSlot[iSearchLength - 1])
   611 			{
   684 			{
   612 			// check possibility of wrapping
   685 			// check possibility of wrapping
   613 
   686 
   614 			TInt i = 1;
   687 			TInt i = 1;
   615 			while (iSlot[i]) ++i;  // find first hole
   688 			while (iSlot[i]) ++i;  // find first hole
   616 			TInt wrappedLength = iSeqLength[KArrayLength - 1] + iSeqLength[i - 1];
   689 			TUint wrappedLength = iSeqLength[iSearchLength - 1] + iSeqLength[i - 1];
   617 			if (wrappedLength > iLongestLength)
   690 			if (wrappedLength > iLongestLength)
   618 				{
   691 				{
   619 				iLongestLength = wrappedLength;
   692 				iLongestLength = wrappedLength;
   620 				iLongestStart = KArrayLength - iSeqLength[KArrayLength - 1];
   693 				iLongestStart = iSearchLength - iSeqLength[iSearchLength - 1];
   621 				}
   694 				}
   622 			}		
   695 			}		
   623 
   696 
   624 		iLongestLength = Min(iLongestLength, KMaxLength);
   697 		iLongestLength = MinU(iLongestLength, iTargetLength);
   625 
   698 
   626 		__NK_ASSERT_DEBUG(iLongestStart >= 0 && iLongestStart < KArrayLength);
   699 		__NK_ASSERT_DEBUG(iLongestStart < iSearchLength);
   627 		__NK_ASSERT_DEBUG(iLongestStart + iLongestLength < 2 * KArrayLength);
   700 		__NK_ASSERT_DEBUG(iLongestStart + iLongestLength < 2 * iSearchLength);
   628 
   701 
   629 		TInt len = Min(iLongestLength, KArrayLength - iLongestStart);
   702 		TUint len = MinU(iLongestLength, iSearchLength - iLongestStart);
   630 		wordmove(aObjectsOut, &iSlot[iLongestStart], len * sizeof(T*));
   703 		wordmove(aObjectsOut, &iSlot[iLongestStart], len * sizeof(T*));
   631 		wordmove(aObjectsOut + len, &iSlot[0], (iLongestLength - len) * sizeof(T*));
   704 		wordmove(aObjectsOut + len, &iSlot[0], (iLongestLength - len) * sizeof(T*));
   632 		
   705 		
   633 		return iLongestLength;
   706 		return iLongestLength;
   634 		}
   707 		}
   635 
   708 
   636 private:
   709 private:
   637 	T* iSlot[KArrayLength];
   710 	TUint iTargetLength;
   638 	TInt8 iSeqLength[KArrayLength];
   711 	TUint iSearchLength;
   639 	TInt iLongestStart;
   712 	TUint iLongestStart;
   640 	TInt iLongestLength;
   713 	TUint iLongestLength;
       
   714 	T* iSlot[KMaxSearchLength];
       
   715 	TUint8 iSeqLength[KMaxSearchLength];
   641 	};
   716 	};
   642 
   717 
   643 TInt DPager::SelectPagesToClean(SPageInfo** aPageInfosOut)
   718 
   644 	{
   719 TInt DPager::SelectSequentialPagesToClean(SPageInfo** aPageInfosOut)
   645 	// select up to KMaxPagesToClean oldest dirty pages with sequential page colours
   720 	{
       
   721 	// select up to iPagesToClean oldest dirty pages with sequential page colours
   646 	
   722 	
   647 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   723 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   648 
   724 
   649 	TSequentialColourSelector<SPageInfo, KMaxPagesToClean> selector;
   725 	TSequentialColourSelector<SPageInfo, KMaxPagesToClean> selector(iPagesToClean);
   650 
   726 
   651 	SDblQueLink* link = iOldestDirtyList.Last();
   727 	SDblQueLink* link = iOldestDirtyList.Last();
   652 	while (link != &iOldestDirtyList.iA)
   728 	while (link != &iOldestDirtyList.iA)
   653 		{
   729 		{
   654 		SPageInfo* pi = SPageInfo::FromLink(link);
   730 		SPageInfo* pi = SPageInfo::FromLink(link);
   666 		}
   742 		}
   667 	
   743 	
   668 	return selector.FindLongestRun(aPageInfosOut);
   744 	return selector.FindLongestRun(aPageInfosOut);
   669 	}
   745 	}
   670 
   746 
   671 #else
   747 
   672 
   748 TInt DPager::SelectOldestPagesToClean(SPageInfo** aPageInfosOut)
   673 TInt DPager::SelectPagesToClean(SPageInfo** aPageInfosOut)
   749 	{
   674 	{
   750 	// select up to iPagesToClean oldest dirty pages
   675 	// no page colouring restrictions, so just take up to KMaxPagesToClean oldest dirty pages
       
   676 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   751 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   677 	TInt pageCount = 0;
   752 	TUint pageCount = 0;
   678 	SDblQueLink* link = iOldestDirtyList.Last();
   753 	SDblQueLink* link = iOldestDirtyList.Last();
   679 	while (link != &iOldestDirtyList.iA && pageCount < KMaxPagesToClean)
   754 	while (link != &iOldestDirtyList.iA && pageCount < iPagesToClean)
   680 		{
   755 		{
   681 		SPageInfo* pi = SPageInfo::FromLink(link);
   756 		SPageInfo* pi = SPageInfo::FromLink(link);
   682 		if (!pi->IsWritable())
   757 		if (!pi->IsWritable())
   683 			{
   758 			{
   684 			// the page may be in the process of being restricted, stolen or decommitted, but don't
   759 			// the page may be in the process of being restricted, stolen or decommitted, but don't
   689 		link = link->iPrev;
   764 		link = link->iPrev;
   690 		}
   765 		}
   691 	return pageCount;
   766 	return pageCount;
   692 	}
   767 	}
   693 
   768 
   694 #endif
       
   695 
       
   696 
   769 
   697 TInt DPager::CleanSomePages(TBool aBackground)
   770 TInt DPager::CleanSomePages(TBool aBackground)
   698 	{
   771 	{
       
   772 	TRACE(("DPager::CleanSomePages"));
       
   773 	
   699 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   774 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   700 	__NK_ASSERT_DEBUG(PageCleaningLock::IsHeld());
   775 	__NK_ASSERT_DEBUG(PageCleaningLock::IsHeld());
   701 	// ram alloc lock may or may not be held
   776 	// ram alloc lock may or may not be held
   702 
   777 
   703 	SPageInfo* pageInfos[KMaxPagesToClean];
   778 	SPageInfo* pageInfos[KMaxPagesToClean];
   704 	TInt pageCount = SelectPagesToClean(&pageInfos[0]);
   779 	TInt pageCount;
       
   780 	if (iCleanInSequence)
       
   781 		pageCount = SelectSequentialPagesToClean(&pageInfos[0]);
       
   782 	else
       
   783 		pageCount = SelectOldestPagesToClean(&pageInfos[0]);
   705 	
   784 	
   706 	if (pageCount == 0)
   785 	if (pageCount == 0)
       
   786 		{
       
   787 		TRACE2(("DPager::CleanSomePages no pages to clean", pageCount));
       
   788 		TRACE2(("  page counts %d, %d, %d, %d",
       
   789 				iYoungCount, iOldCount, iOldestCleanCount, iOldestDirtyCount));
   707 		return 0;
   790 		return 0;
       
   791 		}
   708 	
   792 	
   709 	TheDataPagedMemoryManager->CleanPages(pageCount, pageInfos, aBackground);
   793 	TheDataPagedMemoryManager->CleanPages(pageCount, pageInfos, aBackground);
   710 
   794 
   711 	for (TInt i = 0 ; i < pageCount ; ++i)
   795 	for (TInt i = 0 ; i < pageCount ; ++i)
   712 		{
   796 		{
   722 			++iOldestCleanCount;
   806 			++iOldestCleanCount;
   723 			pi->SetPagedState(SPageInfo::EPagedOldestClean);
   807 			pi->SetPagedState(SPageInfo::EPagedOldestClean);
   724 			}
   808 			}
   725 		}
   809 		}
   726 
   810 
       
   811 	TRACE2(("DPager::CleanSomePages cleaned %d pages", pageCount));
       
   812 
   727 	return pageCount;
   813 	return pageCount;
   728 	}
   814 	}
   729 
   815 
   730 
   816 
   731 TBool DPager::HasPagesToClean()
   817 TBool DPager::HasPagesToClean()
   735 	}
   821 	}
   736 
   822 
   737 
   823 
   738 TInt DPager::RestrictPage(SPageInfo* aPageInfo, TRestrictPagesType aRestriction)
   824 TInt DPager::RestrictPage(SPageInfo* aPageInfo, TRestrictPagesType aRestriction)
   739 	{
   825 	{
   740 	TRACE(("DPager::RestrictPage(0x%08x,%d)",aPageInfo,aRestriction));
       
   741 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   826 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   742 
   827 
   743 	TInt r;
   828 	TInt r;
   744 	if(aPageInfo->Type()==SPageInfo::EUnused)
   829 	if(aPageInfo->Type()==SPageInfo::EUnused)
   745 		{
   830 		{
   761 		MmuLock::Unlock();
   846 		MmuLock::Unlock();
   762 		memory->AsyncClose();
   847 		memory->AsyncClose();
   763 		MmuLock::Lock();
   848 		MmuLock::Lock();
   764 		}
   849 		}
   765 
   850 
   766 	TRACE(("DPager::RestrictPage returns %d",r));
       
   767 	return r;
   851 	return r;
   768 	}
   852 	}
   769 
   853 
   770 
   854 
   771 TInt DPager::StealPage(SPageInfo* aPageInfo)
   855 TInt DPager::StealPage(SPageInfo* aPageInfo)
   812 	}
   896 	}
   813 
   897 
   814 
   898 
   815 TInt DPager::DiscardAndAllocPage(SPageInfo* aPageInfo, TZonePageType aPageType)
   899 TInt DPager::DiscardAndAllocPage(SPageInfo* aPageInfo, TZonePageType aPageType)
   816 	{
   900 	{
   817 	TInt r = DiscardPage(aPageInfo, KRamZoneInvalidId, EFalse);
   901 	TInt r = DiscardPage(aPageInfo, KRamZoneInvalidId, M::EMoveDisMoveDirty);
   818 	if (r == KErrNone)
   902 	if (r == KErrNone)
   819 		{
   903 		{
   820 		TheMmu.MarkPageAllocated(aPageInfo->PhysAddr(), aPageType);
   904 		TheMmu.MarkPageAllocated(aPageInfo->PhysAddr(), aPageType);
   821 		}
   905 		}
   822 	// Flash the ram alloc lock as we may have had to write a page out to swap.
       
   823 	RamAllocLock::Unlock();
       
   824 	RamAllocLock::Lock();
       
   825 	return r;
   906 	return r;
   826 	}
   907 	}
   827 
   908 
   828 
   909 
   829 static TBool DiscardCanStealPage(SPageInfo* aOldPageInfo, TBool aBlockRest)
   910 static TBool DiscardCanStealPage(SPageInfo* aOldPageInfo, TBool aMoveDirty)
   830 	{
   911 	{
   831  	// If the page is pinned or if the page is dirty and a general defrag is being performed then
   912  	// If the page is pinned or if the page is dirty and a general defrag is being performed then
   832 	// don't attempt to steal it
   913 	// don't attempt to steal it
   833 	return aOldPageInfo->Type() == SPageInfo::EUnused ||
   914 	return aOldPageInfo->Type() == SPageInfo::EUnused ||
   834 		(aOldPageInfo->PagedState() != SPageInfo::EPagedPinned && (!aBlockRest || !aOldPageInfo->IsDirty()));	
   915 		(aOldPageInfo->PagedState() != SPageInfo::EPagedPinned && (!aMoveDirty || !aOldPageInfo->IsDirty()));	
   835 	}
   916 	}
   836 
   917 
   837 
   918 
   838 TInt DPager::DiscardPage(SPageInfo* aOldPageInfo, TUint aBlockZoneId, TBool aBlockRest)
   919 TInt DPager::DiscardPage(SPageInfo* aOldPageInfo, TUint aBlockZoneId, TUint aMoveDisFlags)
   839 	{
   920 	{
   840 	// todo: assert MmuLock not released
   921 	// todo: assert MmuLock not released
   841 	
   922 	
   842 	TRACE(("> DPager::DiscardPage %08x", aOldPageInfo));
   923 	TRACE(("> DPager::DiscardPage %08x", aOldPageInfo));
   843 	
   924 	
   844 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
   925 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
   845 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   926 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   846 
   927 	TBool moveDirty = (aMoveDisFlags & M::EMoveDisMoveDirty) != 0;
   847 	if (!DiscardCanStealPage(aOldPageInfo, aBlockRest))
   928 	TBool blockRest = (aMoveDisFlags & M::EMoveDisBlockRest) != 0;
   848 		{
   929 
       
   930 	if (!DiscardCanStealPage(aOldPageInfo, moveDirty))
       
   931 		{
       
   932 		// Page must be managed if it is pinned or dirty.
       
   933 		__NK_ASSERT_DEBUG(aOldPageInfo->Type()==SPageInfo::EManaged);
   849 		// The page is pinned or is dirty and this is a general defrag so move the page.
   934 		// The page is pinned or is dirty and this is a general defrag so move the page.
   850 		DMemoryObject* memory = aOldPageInfo->Owner();
   935 		DMemoryObject* memory = aOldPageInfo->Owner();
   851 		// Page must be managed if it is pinned or dirty.
   936 		memory->Open();
   852 		__NK_ASSERT_DEBUG(aOldPageInfo->Type()==SPageInfo::EManaged);
       
   853 		__NK_ASSERT_DEBUG(memory);
       
   854 		MmuLock::Unlock();
       
   855 		TPhysAddr newAddr;
   937 		TPhysAddr newAddr;
   856 		TRACE2(("DPager::DiscardPage delegating pinned/dirty page to manager"));
   938 		TRACE2(("DPager::DiscardPage delegating pinned/dirty page to manager"));
   857 		TInt r = memory->iManager->MovePage(memory, aOldPageInfo, newAddr, aBlockZoneId, aBlockRest);
   939 		TInt r = memory->iManager->MovePage(memory, aOldPageInfo, newAddr, aBlockZoneId, blockRest);
   858 		TRACE(("< DPager::DiscardPage %d", r));
   940 		TRACE(("< DPager::DiscardPage %d", r));
       
   941 		memory->AsyncClose();
   859 		return r;
   942 		return r;
   860 		}
   943 		}
   861 
   944 
   862 	TInt r = KErrNone;
   945 	TInt r = KErrNone;
   863 	SPageInfo* newPageInfo = NULL;
   946 	SPageInfo* newPageInfo = NULL;
   873 
   956 
   874 		if (needNewPage)
   957 		if (needNewPage)
   875 			{
   958 			{
   876 			// Allocate a new page for the live list as it has reached its minimum size.
   959 			// Allocate a new page for the live list as it has reached its minimum size.
   877 			TUint flags = EMemAttNormalCached | Mmu::EAllocNoWipe;
   960 			TUint flags = EMemAttNormalCached | Mmu::EAllocNoWipe;
   878 			newPageInfo = GetPageFromSystem((Mmu::TRamAllocFlags)flags, aBlockZoneId, aBlockRest);
   961 			newPageInfo = GetPageFromSystem((Mmu::TRamAllocFlags)flags, aBlockZoneId, blockRest);
   879 			if (!newPageInfo)
   962 			if (!newPageInfo)
   880 				{
   963 				{
   881 				TRACE(("< DPager::DiscardPage KErrNoMemory"));
   964 				TRACE(("< DPager::DiscardPage KErrNoMemory"));
   882 				r = KErrNoMemory;
   965 				r = KErrNoMemory;
   883 				MmuLock::Lock();
   966 				MmuLock::Lock();
   892 			havePageCleaningLock = ETrue;
   975 			havePageCleaningLock = ETrue;
   893 			}
   976 			}
   894 
   977 
   895 		// Re-acquire the mmulock and re-check that the page is not pinned or dirty.
   978 		// Re-acquire the mmulock and re-check that the page is not pinned or dirty.
   896 		MmuLock::Lock();
   979 		MmuLock::Lock();
   897 		if (!DiscardCanStealPage(aOldPageInfo, aBlockRest))
   980 		if (!DiscardCanStealPage(aOldPageInfo, moveDirty))
   898 			{
   981 			{
   899 			// Page is now pinned or dirty so give up as it is in use.
   982 			// Page is now pinned or dirty so give up as it is in use.
   900 			r = KErrInUse;
   983 			r = KErrInUse;
   901 			break;
   984 			break;
   902 			}
   985 			}
   950 		// else can touch it.
  1033 		// else can touch it.
   951 		if (iNumberOfFreePages == 0)
  1034 		if (iNumberOfFreePages == 0)
   952 			AddAsFreePage(newPageInfo);
  1035 			AddAsFreePage(newPageInfo);
   953 		else
  1036 		else
   954 			ReturnPageToSystem(*newPageInfo);   // temporarily releases MmuLock
  1037 			ReturnPageToSystem(*newPageInfo);   // temporarily releases MmuLock
   955 		}
  1038 		}	
       
  1039 	MmuLock::Unlock();
   956 
  1040 
   957 	if (havePageCleaningLock)
  1041 	if (havePageCleaningLock)
   958 		{
  1042 		{
   959 		// Release the page cleaning mutex
  1043 		// Release the page cleaning mutex
   960 		MmuLock::Unlock();
       
   961 		PageCleaningLock::Unlock();
  1044 		PageCleaningLock::Unlock();
   962 		MmuLock::Lock();
  1045 		}
   963 		}	
  1046 
   964 	
       
   965 	MmuLock::Unlock();
       
   966 	TRACE(("< DPager::DiscardPage returns %d", r));
  1047 	TRACE(("< DPager::DiscardPage returns %d", r));
   967 	return r;	
  1048 	return r;	
   968 	}
  1049 	}
   969 
  1050 
   970 
  1051 
  1003 
  1084 
  1004 	return SPageInfo::FromPhysAddr(pagePhys);
  1085 	return SPageInfo::FromPhysAddr(pagePhys);
  1005 	}
  1086 	}
  1006 
  1087 
  1007 
  1088 
  1008 void DPager::ReturnPageToSystem()
  1089 TBool DPager::TryReturnOldestPageToSystem()
  1009 	{
  1090 	{
  1010 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
  1091 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
  1011 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1092 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1012 
  1093 	__NK_ASSERT_DEBUG(iNumberOfFreePages>0);
  1013 	ReturnPageToSystem(*StealOldestPage());
  1094 
       
  1095 	SPageInfo* pageInfo = StealOrAllocPage(EFalse, (Mmu::TRamAllocFlags)0);
       
  1096 	
       
  1097 	// StealOrAllocPage may have released the MmuLock, so check there are still enough pages
       
  1098 	// to remove one from the live list
       
  1099 	if (iNumberOfFreePages>0)
       
  1100 		{
       
  1101 		ReturnPageToSystem(*pageInfo);
       
  1102 		return ETrue;
       
  1103 		}
       
  1104 	else
       
  1105 		{
       
  1106 		AddAsFreePage(pageInfo);
       
  1107 		return EFalse;
       
  1108 		}
       
  1109 	}
       
  1110 
       
  1111 
       
  1112 TUint DPager::AllowAddFreePages(SPageInfo*& aPageInfo, TUint aNumPages)
       
  1113 	{
       
  1114 	if (iMinimumPageCount + iNumberOfFreePages == iMaximumPageCount)
       
  1115 		{// The paging cache is already at the maximum size so steal a page
       
  1116 		// so it can be returned to the system if required.
       
  1117 		aPageInfo = StealOrAllocPage(EFalse, (Mmu::TRamAllocFlags)0);
       
  1118 		__NK_ASSERT_DEBUG(aPageInfo->PagedState() == SPageInfo::EUnpaged);
       
  1119 		return 1;
       
  1120 		}
       
  1121 	// The paging cache is not at its maximum so determine how many can be added to
       
  1122 	// the paging cache without it growing past its maximum.
       
  1123 	aPageInfo = NULL;
       
  1124 	__NK_ASSERT_DEBUG(iMinimumPageCount + iNumberOfFreePages < iMaximumPageCount);
       
  1125 	if (iMinimumPageCount + iNumberOfFreePages + aNumPages > iMaximumPageCount)
       
  1126 		{
       
  1127 		return iMaximumPageCount - (iMinimumPageCount + iNumberOfFreePages);
       
  1128 		}
       
  1129 	else
       
  1130 		return aNumPages;
       
  1131 	}
       
  1132 
       
  1133 
       
  1134 void DPager::AllowAddFreePage(SPageInfo*& aPageInfo)
       
  1135 	{
       
  1136 	if (iMinimumPageCount + iNumberOfFreePages == iMaximumPageCount)
       
  1137 		{// The paging cache is already at the maximum size so steal a page
       
  1138 		// so it can be returned to the system if required.
       
  1139 		aPageInfo = StealOrAllocPage(EFalse, (Mmu::TRamAllocFlags)0);
       
  1140 		__NK_ASSERT_DEBUG(aPageInfo->PagedState() == SPageInfo::EUnpaged);
       
  1141 		return;
       
  1142 		}
       
  1143 	aPageInfo = NULL;
  1014 	}
  1144 	}
  1015 
  1145 
  1016 
  1146 
  1017 void DPager::ReturnPageToSystem(SPageInfo& aPageInfo)
  1147 void DPager::ReturnPageToSystem(SPageInfo& aPageInfo)
  1018 	{
  1148 	{
  1037 	}
  1167 	}
  1038 
  1168 
  1039 
  1169 
  1040 SPageInfo* DPager::PageInAllocPage(Mmu::TRamAllocFlags aAllocFlags)
  1170 SPageInfo* DPager::PageInAllocPage(Mmu::TRamAllocFlags aAllocFlags)
  1041 	{
  1171 	{
  1042 	TBool pageCleaningLockHeld = EFalse;
  1172 	// ram alloc mutex may or may not be held
  1043 	SPageInfo* pageInfo;
  1173 	__NK_ASSERT_DEBUG(!MmuLock::IsHeld());
  1044 	TPhysAddr pagePhys;
       
  1045 	TInt r = KErrGeneral;
       
  1046 	
  1174 	
  1047 	RamAllocLock::Lock();
  1175 	RamAllocLock::Lock();
  1048 	MmuLock::Lock();
       
  1049 
       
  1050 find_a_page:
       
  1051 	// try getting a free page from our live list...
       
  1052 	if (iOldestCleanCount)
       
  1053 		{
       
  1054 		pageInfo = SPageInfo::FromLink(iOldestCleanList.Last());
       
  1055 		if(pageInfo->Type()==SPageInfo::EUnused)
       
  1056 			goto try_steal_oldest_page;
       
  1057 		}
       
  1058 
       
  1059 	// try getting a free page from the system pool...
       
  1060 	if(!HaveMaximumPages())
       
  1061 		{
       
  1062 		MmuLock::Unlock();
       
  1063 		pageInfo = GetPageFromSystem(aAllocFlags);
       
  1064 		if(pageInfo)
       
  1065 			goto done;
       
  1066 		MmuLock::Lock();
       
  1067 		}
       
  1068 
       
  1069 	// try stealing a clean page...
       
  1070 	if (iOldestCleanCount)
       
  1071 		goto try_steal_oldest_page;
       
  1072 
       
  1073 	// see if we can clean multiple dirty pages in one go...
       
  1074 	if (KMaxPagesToClean > 1 && iOldestDirtyCount > 1)
       
  1075 		{
       
  1076 		// if we don't hold the page cleaning mutex then temporarily release ram alloc mutex and
       
  1077 		// acquire page cleaning mutex; if we hold it already just proceed
       
  1078 		if (!pageCleaningLockHeld)
       
  1079 			{
       
  1080 			MmuLock::Unlock();
       
  1081 			RamAllocLock::Unlock();
       
  1082 			PageCleaningLock::Lock();			
       
  1083 			MmuLock::Lock();
       
  1084 			}
       
  1085 		
       
  1086 		// there may be clean pages now if we've waited on the page cleaning mutex, if so don't
       
  1087 		// bother cleaning but just restart
       
  1088 		if (iOldestCleanCount == 0)
       
  1089 			CleanSomePages(EFalse);
       
  1090 		
       
  1091 		if (!pageCleaningLockHeld)
       
  1092 			{
       
  1093 			MmuLock::Unlock();
       
  1094 			PageCleaningLock::Unlock();			
       
  1095 			RamAllocLock::Lock();
       
  1096 			MmuLock::Lock();
       
  1097 			}
       
  1098 		
       
  1099 		if (iOldestCleanCount > 0)
       
  1100 			goto find_a_page;
       
  1101 		}
       
  1102 
       
  1103 	// as a last resort, steal a page from the live list...
       
  1104 	
  1176 	
  1105 try_steal_oldest_page:
  1177 	MmuLock::Lock();	
  1106 	__NK_ASSERT_ALWAYS(iOldestCleanCount|iOldestDirtyCount|iOldCount|iYoungCount);
  1178 	SPageInfo* pageInfo = StealOrAllocPage(ETrue, aAllocFlags);
  1107 	r = TryStealOldestPage(pageInfo);
  1179 	TBool wasAllocated = pageInfo->Type() == SPageInfo::EUnknown;
  1108 	// if this fails we restart whole process
       
  1109 	if (r < KErrNone)
       
  1110 		goto find_a_page;
       
  1111 
       
  1112 	// if we need to clean, acquire page cleaning mutex for life of this function
       
  1113 	if (r == 1)
       
  1114 		{
       
  1115 		__NK_ASSERT_ALWAYS(!pageCleaningLockHeld);
       
  1116 		MmuLock::Unlock();
       
  1117 		PageCleaningLock::Lock();
       
  1118 		MmuLock::Lock();
       
  1119 		pageCleaningLockHeld = ETrue;
       
  1120 		goto find_a_page;		
       
  1121 		}
       
  1122 
       
  1123 	// otherwise we're done!
       
  1124 	__NK_ASSERT_DEBUG(r == KErrNone);
       
  1125 	MmuLock::Unlock();
  1180 	MmuLock::Unlock();
  1126 
  1181 
  1127 	// make page state same as a freshly allocated page...
  1182 	if (!wasAllocated)
  1128 	pagePhys = pageInfo->PhysAddr();
  1183 		{
  1129 	TheMmu.PagesAllocated(&pagePhys,1,aAllocFlags);
  1184 		// make page state same as a freshly allocated page...
  1130 
  1185 		TPhysAddr pagePhys = pageInfo->PhysAddr();
  1131 done:
  1186 		TheMmu.PagesAllocated(&pagePhys,1,aAllocFlags);
  1132 	if (pageCleaningLockHeld)
  1187 		}
  1133 		PageCleaningLock::Unlock();
  1188 
  1134 	RamAllocLock::Unlock();
  1189 	RamAllocLock::Unlock();
  1135 
  1190 
  1136 	return pageInfo;
  1191 	return pageInfo;
  1137 	}
  1192 	}
  1138 
  1193 
  1144 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
  1199 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
  1145 
  1200 
  1146 	MmuLock::Lock();
  1201 	MmuLock::Lock();
  1147 	while(aNumPages>0 && (TInt)NumberOfFreePages()>=aNumPages)
  1202 	while(aNumPages>0 && (TInt)NumberOfFreePages()>=aNumPages)
  1148 		{
  1203 		{
  1149 		ReturnPageToSystem();
  1204 		if (TryReturnOldestPageToSystem())
  1150 		--aNumPages;
  1205 			--aNumPages;
  1151 		}
  1206 		}
  1152 	MmuLock::Unlock();
  1207 	MmuLock::Unlock();
  1153 
  1208 
  1154 	TRACE(("DPager::GetFreePages returns %d",!aNumPages));
  1209 	TRACE(("DPager::GetFreePages returns %d",!aNumPages));
  1155 	return !aNumPages;
  1210 	return !aNumPages;
  1164 	MmuLock::Lock();
  1219 	MmuLock::Lock();
  1165 
  1220 
  1166 	TPhysAddr* end = aPages+aCount;
  1221 	TPhysAddr* end = aPages+aCount;
  1167 	while(aPages<end)
  1222 	while(aPages<end)
  1168 		{
  1223 		{
       
  1224 		// Steal a page from the paging cache in case we need to return one to the system.
       
  1225 		// This may release the ram alloc lock.
       
  1226 		SPageInfo* pageInfo;
       
  1227 		AllowAddFreePage(pageInfo);
       
  1228 
  1169 		TPhysAddr pagePhys = *aPages++;
  1229 		TPhysAddr pagePhys = *aPages++;
  1170 		if(RPageArray::State(pagePhys)!=RPageArray::ECommitted)
  1230 		if(RPageArray::State(pagePhys)!=RPageArray::ECommitted)
       
  1231 			{
       
  1232 			if (pageInfo)
       
  1233 				AddAsFreePage(pageInfo);
  1171 			continue; // page is not present
  1234 			continue; // page is not present
       
  1235 			}
  1172 
  1236 
  1173 #ifdef _DEBUG
  1237 #ifdef _DEBUG
  1174 		SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pagePhys&~KPageMask);
  1238 		SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pagePhys&~KPageMask);
  1175 		__NK_ASSERT_DEBUG(pi);
  1239 		__NK_ASSERT_DEBUG(pi);
  1176 #else
  1240 #else
  1189 
  1253 
  1190 		case SPageInfo::EPagedYoung:
  1254 		case SPageInfo::EPagedYoung:
  1191 		case SPageInfo::EPagedOld:
  1255 		case SPageInfo::EPagedOld:
  1192 		case SPageInfo::EPagedOldestDirty:
  1256 		case SPageInfo::EPagedOldestDirty:
  1193 		case SPageInfo::EPagedOldestClean:
  1257 		case SPageInfo::EPagedOldestClean:
       
  1258 			if (pageInfo)
       
  1259 				AddAsFreePage(pageInfo);
  1194 			continue; // discard already been allowed
  1260 			continue; // discard already been allowed
  1195 
  1261 
  1196 		case SPageInfo::EPagedPinned:
  1262 		case SPageInfo::EPagedPinned:
  1197 			__NK_ASSERT_DEBUG(0);
  1263 			__NK_ASSERT_DEBUG(0);
  1198 		default:
  1264 		default:
  1199 			__NK_ASSERT_DEBUG(0);
  1265 			__NK_ASSERT_DEBUG(0);
       
  1266 			if (pageInfo)
       
  1267 				AddAsFreePage(pageInfo);
  1200 			continue;
  1268 			continue;
  1201 			}
  1269 			}
  1202 
  1270 
  1203 		// put page on live list...
  1271 		// put page on live list and free the stolen page...
  1204 		AddAsYoungestPage(pi);
  1272 		AddAsYoungestPage(pi);
  1205 		++iNumberOfFreePages;
  1273 		++iNumberOfFreePages;
  1206 
  1274 		if (pageInfo)
       
  1275 			ReturnPageToSystem(*pageInfo);
  1207 		Event(EEventPageDonate,pi);
  1276 		Event(EEventPageDonate,pi);
  1208 
  1277 
  1209 		// re-balance live list...
  1278 		// re-balance live list...
  1210 		RemoveExcessPages();
       
  1211 		BalanceAges();
  1279 		BalanceAges();
  1212 		}
  1280 		}
  1213 
  1281 
       
  1282 	__NK_ASSERT_DEBUG((iMinimumPageCount + iNumberOfFreePages) <= iMaximumPageCount);
  1214 	MmuLock::Unlock();
  1283 	MmuLock::Unlock();
  1215 	RamAllocLock::Unlock();
  1284 	RamAllocLock::Unlock();
  1216 	}
  1285 	}
  1217 
  1286 
  1218 
  1287 
  1260 			__NK_ASSERT_DEBUG(0);
  1329 			__NK_ASSERT_DEBUG(0);
  1261 			break;
  1330 			break;
  1262 			}
  1331 			}
  1263 
  1332 
  1264 		// check paging list has enough pages before we remove one...
  1333 		// check paging list has enough pages before we remove one...
  1265 		if(iNumberOfFreePages<1)
  1334 		if(!iNumberOfFreePages)
  1266 			{
  1335 			{
  1267 			// need more pages so get a page from the system...
  1336 			// need more pages so get a page from the system...
  1268 			if(!TryGrowLiveList())
  1337 			if(!TryGrowLiveList())
  1269 				{
  1338 				{
  1270 				// out of memory...
  1339 				// out of memory...
  1296 		BalanceAges();
  1365 		BalanceAges();
  1297 		}
  1366 		}
  1298 
  1367 
  1299 	// we may have added a spare free page to the live list without removing one,
  1368 	// we may have added a spare free page to the live list without removing one,
  1300 	// this could cause us to have too many pages, so deal with this...
  1369 	// this could cause us to have too many pages, so deal with this...
       
  1370 
       
  1371 	// If there are too many pages they should all be unused free pages otherwise 
       
  1372 	// the ram alloc lock may be released by RemoveExcessPages().
       
  1373 	__NK_ASSERT_DEBUG(	!HaveTooManyPages() ||
       
  1374 						(iMinimumPageCount + iNumberOfFreePages - iMaximumPageCount
       
  1375 						<= iOldestCleanCount));
  1301 	RemoveExcessPages();
  1376 	RemoveExcessPages();
  1302 
  1377 
       
  1378 	__NK_ASSERT_DEBUG((iMinimumPageCount + iNumberOfFreePages) <= iMaximumPageCount);
  1303 	MmuLock::Unlock();
  1379 	MmuLock::Unlock();
  1304 	RamAllocLock::Unlock();
  1380 	RamAllocLock::Unlock();
  1305 	return r;
  1381 	return r;
  1306 	}
  1382 	}
  1307 
  1383 
  1331 
  1407 
  1332 
  1408 
  1333 void DPager::BalanceAges()
  1409 void DPager::BalanceAges()
  1334 	{
  1410 	{
  1335 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1411 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1336 	TBool restrictPage = EFalse;
  1412 	TBool retry;
  1337 	SPageInfo* pageInfo = NULL;
  1413 	do
  1338 	TUint oldestCount = iOldestCleanCount + iOldestDirtyCount;
  1414 		{
  1339 	if((iOldCount + oldestCount) * iYoungOldRatio < iYoungCount)
  1415 		retry = EFalse;
  1340 		{
  1416 		TBool restrictPage = EFalse;
  1341 		// Need more old pages so make one young page into an old page...
  1417 		SPageInfo* pageInfo = NULL;
  1342 		__NK_ASSERT_DEBUG(!iYoungList.IsEmpty());
  1418 		TUint oldestCount = iOldestCleanCount + iOldestDirtyCount;
  1343 		__NK_ASSERT_DEBUG(iYoungCount);
  1419 		if((iOldCount + oldestCount) * iYoungOldRatio < iYoungCount)
  1344 		SDblQueLink* link = iYoungList.Last()->Deque();
  1420 			{
  1345 		--iYoungCount;
  1421 			// Need more old pages so make one young page into an old page...
  1346 
  1422 			__NK_ASSERT_DEBUG(!iYoungList.IsEmpty());
  1347 		pageInfo = SPageInfo::FromLink(link);
  1423 			__NK_ASSERT_DEBUG(iYoungCount);
  1348 		pageInfo->SetPagedState(SPageInfo::EPagedOld);
  1424 			SDblQueLink* link = iYoungList.Last()->Deque();
  1349 
  1425 			--iYoungCount;
  1350 		iOldList.AddHead(link);
  1426 
  1351 		++iOldCount;
  1427 			pageInfo = SPageInfo::FromLink(link);
  1352 
  1428 			pageInfo->SetPagedState(SPageInfo::EPagedOld);
  1353 		Event(EEventPageAged,pageInfo);
  1429 
  1354 		// Delay restricting the page until it is safe to release the MmuLock.
  1430 			iOldList.AddHead(link);
  1355 		restrictPage = ETrue;
  1431 			++iOldCount;
  1356 		}
  1432 
  1357 
  1433 			Event(EEventPageAged,pageInfo);
  1358 	// Check we have enough oldest pages.
  1434 			// Delay restricting the page until it is safe to release the MmuLock.
  1359 	if (oldestCount < KMaxOldestPages &&
  1435 			restrictPage = ETrue;
  1360 		oldestCount * iOldOldestRatio < iOldCount)
  1436 			}
  1361 		{
  1437 
  1362 		__NK_ASSERT_DEBUG(!iOldList.IsEmpty());
  1438 		// Check we have enough oldest pages.
  1363 		__NK_ASSERT_DEBUG(iOldCount);
  1439 		if (oldestCount < iMaxOldestPages &&
  1364 		SDblQueLink* link = iOldList.Last()->Deque();
  1440 			oldestCount * iOldOldestRatio < iOldCount)
  1365 		--iOldCount;
  1441 			{
  1366 
  1442 			__NK_ASSERT_DEBUG(!iOldList.IsEmpty());
  1367 		SPageInfo* oldestPageInfo = SPageInfo::FromLink(link);
  1443 			__NK_ASSERT_DEBUG(iOldCount);
  1368 		if (oldestPageInfo->IsDirty())
  1444 			SDblQueLink* link = iOldList.Last()->Deque();
  1369 			{
  1445 			--iOldCount;
  1370 			oldestPageInfo->SetPagedState(SPageInfo::EPagedOldestDirty);
  1446 
  1371 			iOldestDirtyList.AddHead(link);
  1447 			SPageInfo* oldestPageInfo = SPageInfo::FromLink(link);
  1372 			++iOldestDirtyCount;
  1448 			if (oldestPageInfo->IsDirty())
  1373 			PageCleaner::NotifyPagesToClean();
  1449 				{
  1374 			Event(EEventPageAgedDirty,oldestPageInfo);
  1450 				oldestPageInfo->SetOldestPage(SPageInfo::EPagedOldestDirty);
  1375 			}
  1451 				iOldestDirtyList.AddHead(link);
  1376 		else
  1452 				++iOldestDirtyCount;
  1377 			{
  1453 				PageCleaner::NotifyPagesToClean();
  1378 			oldestPageInfo->SetPagedState(SPageInfo::EPagedOldestClean);
  1454 				Event(EEventPageAgedDirty,oldestPageInfo);
  1379 			iOldestCleanList.AddHead(link);
  1455 				}
  1380 			++iOldestCleanCount;
  1456 			else
  1381 			Event(EEventPageAgedClean,oldestPageInfo);
  1457 				{
  1382 			}
  1458 				oldestPageInfo->SetOldestPage(SPageInfo::EPagedOldestClean);
  1383 		}
  1459 				iOldestCleanList.AddHead(link);
  1384 
  1460 				++iOldestCleanCount;
  1385 	if (restrictPage)
  1461 				Event(EEventPageAgedClean,oldestPageInfo);
  1386 		{
  1462 				}
  1387 		// Make the recently aged old page inaccessible.  This is done last as it 
  1463 			}
  1388 		// will release the MmuLock and therefore the page counts may otherwise change.
  1464 
  1389 		RestrictPage(pageInfo,ERestrictPagesNoAccessForOldPage);
  1465 		if (restrictPage)
  1390 		}
  1466 			{
       
  1467 			// Make the recently aged old page inaccessible.  This is done last as it will release
       
  1468 			// the MmuLock and therefore the page counts may otherwise change.
       
  1469 			TInt r = RestrictPage(pageInfo,ERestrictPagesNoAccessForOldPage);
       
  1470 
       
  1471 			if (r == KErrInUse)
       
  1472 				{
       
  1473 				SPageInfo::TPagedState state = pageInfo->PagedState();
       
  1474 				if (state == SPageInfo::EPagedOld ||
       
  1475 					state == SPageInfo::EPagedOldestClean ||
       
  1476 					state == SPageInfo::EPagedOldestDirty)
       
  1477 					{
       
  1478 					// The restrict operation failed, but the page was left in an old state.  This
       
  1479 					// can happen when:
       
  1480 					//  
       
  1481 					//  - pages are in the process of being pinned - the mapping will veto the
       
  1482 					//    restriction
       
  1483 					//  - pages are rejuvenated and then become quickly become old again
       
  1484 					//  
       
  1485 					// In the second instance the page will be needlessly rejuvenated because we
       
  1486 					// can't tell that it has actually been restricted by another thread
       
  1487 					RemovePage(pageInfo);
       
  1488 					AddAsYoungestPage(pageInfo);
       
  1489 					retry = ETrue;
       
  1490 					}
       
  1491 				}
       
  1492 			}
       
  1493 		}
       
  1494 	while (retry);
  1391 	}
  1495 	}
  1392 
  1496 
  1393 
  1497 
  1394 void DPager::RemoveExcessPages()
  1498 void DPager::RemoveExcessPages()
  1395 	{
  1499 	{
  1396 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
  1500 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
  1397 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1501 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1398 	while(HaveTooManyPages())
  1502 	while(HaveTooManyPages())
  1399 		ReturnPageToSystem();
  1503 		TryReturnOldestPageToSystem();
  1400 	}
  1504 	}
  1401 
  1505 
  1402 
  1506 
  1403 void DPager::RejuvenatePageTable(TPte* aPt)
  1507 void DPager::RejuvenatePageTable(TPte* aPt)
  1404 	{
  1508 	{
  1686 	}
  1790 	}
  1687 
  1791 
  1688 
  1792 
  1689 void DPager::Pin(SPageInfo* aPageInfo, TPinArgs& aPinArgs)
  1793 void DPager::Pin(SPageInfo* aPageInfo, TPinArgs& aPinArgs)
  1690 	{
  1794 	{
       
  1795 	TRACE(("DPager::Pin %08x", aPageInfo->PhysAddr()));
       
  1796 	
  1691 	__ASSERT_CRITICAL;
  1797 	__ASSERT_CRITICAL;
  1692 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1798 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1693 	__NK_ASSERT_DEBUG(aPinArgs.HaveSufficientPages(1));
  1799 	__NK_ASSERT_DEBUG(aPinArgs.HaveSufficientPages(1));
  1694 
  1800 
  1695 	aPageInfo->IncPinCount();
  1801 	aPageInfo->IncPinCount();
  1835 			break;
  1941 			break;
  1836 			}
  1942 			}
  1837 		}
  1943 		}
  1838 	while(TryGrowLiveList());
  1944 	while(TryGrowLiveList());
  1839 
  1945 
       
  1946 	if (!ok)
       
  1947 		{// Failed to allocate enough pages so free any excess..
       
  1948 
       
  1949 		// If there are too many pages they should all be unused free pages otherwise 
       
  1950 		// the ram alloc lock may be released by RemoveExcessPages().
       
  1951 		__NK_ASSERT_DEBUG(	!HaveTooManyPages() ||
       
  1952 							(iMinimumPageCount + iNumberOfFreePages - iMaximumPageCount
       
  1953 							<= iOldestCleanCount));
       
  1954 		RemoveExcessPages();
       
  1955 		}
       
  1956 	__NK_ASSERT_DEBUG((iMinimumPageCount + iNumberOfFreePages) <= iMaximumPageCount);
  1840 	MmuLock::Unlock();
  1957 	MmuLock::Unlock();
  1841 	RamAllocLock::Unlock();
  1958 	RamAllocLock::Unlock();
  1842 	return ok;
  1959 	return ok;
  1843 	}
  1960 	}
  1844 
  1961 
  1849 	__ASSERT_CRITICAL;
  1966 	__ASSERT_CRITICAL;
  1850 
  1967 
  1851 	RamAllocLock::Lock();
  1968 	RamAllocLock::Lock();
  1852 	MmuLock::Lock();
  1969 	MmuLock::Lock();
  1853 
  1970 
  1854 	iNumberOfFreePages += aNumPages;
  1971 	while (aNumPages)
  1855 	RemoveExcessPages();
  1972 		{
  1856 
  1973 		SPageInfo* pageInfo;
       
  1974 		// This may release the ram alloc lock but it will flash the mmulock
       
  1975 		// if not all pages could be added in one go, i.e. freePages != aNumPages.
       
  1976 		TUint freePages = AllowAddFreePages(pageInfo, aNumPages);
       
  1977 		iNumberOfFreePages += freePages;
       
  1978 		aNumPages -= freePages;
       
  1979 		if (pageInfo)
       
  1980 			ReturnPageToSystem(*pageInfo);
       
  1981 		}
       
  1982 
       
  1983 	__NK_ASSERT_DEBUG((iMinimumPageCount + iNumberOfFreePages) <= iMaximumPageCount);
  1857 	MmuLock::Unlock();
  1984 	MmuLock::Unlock();
  1858 	RamAllocLock::Unlock();
  1985 	RamAllocLock::Unlock();
  1859 	}
  1986 	}
  1860 
  1987 
  1861 
  1988 
  1991 	}
  2118 	}
  1992 
  2119 
  1993 
  2120 
  1994 TInt DPager::ResizeLiveList(TUint aMinimumPageCount, TUint aMaximumPageCount)
  2121 TInt DPager::ResizeLiveList(TUint aMinimumPageCount, TUint aMaximumPageCount)
  1995 	{
  2122 	{
  1996 	TRACE(("DPager::ResizeLiveList(%d,%d) current young=%d old=%d min=%d free=%d max=%d",aMinimumPageCount,aMaximumPageCount,iYoungCount,iOldCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
  2123 	TRACE(("DPager::ResizeLiveList(%d,%d) current: %d %d %d %d, %d %d %d",
       
  2124 		   aMinimumPageCount,aMaximumPageCount,
       
  2125 		   iYoungCount,iOldCount,iOldestCleanCount,iOldestDirtyCount,
       
  2126 		   iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
  1997 	__NK_ASSERT_DEBUG(CacheInitialised());
  2127 	__NK_ASSERT_DEBUG(CacheInitialised());
  1998 
  2128 
  1999 	if(!aMaximumPageCount)
  2129 	if(!aMaximumPageCount)
  2000 		{
  2130 		{
  2001 		aMinimumPageCount = iInitMinimumPageCount;
  2131 		aMinimumPageCount = iInitMinimumPageCount;
  2003 		}
  2133 		}
  2004 	if (aMaximumPageCount > KAbsoluteMaxPageCount)
  2134 	if (aMaximumPageCount > KAbsoluteMaxPageCount)
  2005 		aMaximumPageCount = KAbsoluteMaxPageCount;
  2135 		aMaximumPageCount = KAbsoluteMaxPageCount;
  2006 
  2136 
  2007 	// Min must not be greater than max...
  2137 	// Min must not be greater than max...
  2008 	if(aMinimumPageCount>aMaximumPageCount)
  2138 	if(aMinimumPageCount > aMaximumPageCount)
  2009 		return KErrArgument;
  2139 		return KErrArgument;
  2010 
  2140 	
  2011 	NKern::ThreadEnterCS();
  2141 	NKern::ThreadEnterCS();
  2012 	RamAllocLock::Lock();
  2142 	RamAllocLock::Lock();
       
  2143 
       
  2144 	// We must hold this otherwise StealOrAllocPage will release the RamAllocLock while waiting for
       
  2145 	// it.  Note this method is not used in producton, so it's ok to hold both locks for longer than
       
  2146 	// would otherwise happen.
       
  2147 	PageCleaningLock::Lock();  
  2013 
  2148 
  2014 	MmuLock::Lock();
  2149 	MmuLock::Lock();
  2015 
  2150 
  2016 	__NK_ASSERT_ALWAYS(iYoungOldRatio);
  2151 	__NK_ASSERT_ALWAYS(iYoungOldRatio);
  2017 
  2152 
  2018 	// Make sure aMinimumPageCount is not less than absolute minimum we can cope with...
  2153 	// Make sure aMinimumPageCount is not less than absolute minimum we can cope with...
  2019 	iMinimumPageLimit = iMinYoungPages * (1 + iYoungOldRatio) / iYoungOldRatio
  2154 	iMinimumPageLimit = iMinYoungPages * (1 + iYoungOldRatio) / iYoungOldRatio
  2020 						+ DPageReadRequest::ReservedPagesRequired();
  2155 						+ DPageReadRequest::ReservedPagesRequired();
  2021 	if(iMinimumPageLimit<iAbsoluteMinPageCount)
  2156 	if(iMinimumPageLimit < iAbsoluteMinPageCount)
  2022 		iMinimumPageLimit = iAbsoluteMinPageCount;
  2157 		iMinimumPageLimit = iAbsoluteMinPageCount;
  2023 	if(aMinimumPageCount<iMinimumPageLimit+iReservePageCount)
  2158 	if(aMinimumPageCount < iMinimumPageLimit + iReservePageCount)
  2024 		aMinimumPageCount = iMinimumPageLimit+iReservePageCount;
  2159 		aMinimumPageCount = iMinimumPageLimit + iReservePageCount;
  2025 	if(aMaximumPageCount<aMinimumPageCount)
  2160 	if(aMaximumPageCount < aMinimumPageCount)
  2026 		aMaximumPageCount=aMinimumPageCount;
  2161 		aMaximumPageCount = aMinimumPageCount;
  2027 
  2162 
  2028 	// Increase iMaximumPageCount?
  2163 	// Increase iMaximumPageCount?
  2029 	if(aMaximumPageCount > iMaximumPageCount)
  2164 	if(aMaximumPageCount > iMaximumPageCount)
  2030 		iMaximumPageCount = aMaximumPageCount;
  2165 		iMaximumPageCount = aMaximumPageCount;
  2031 
  2166 
  2032 	// Reduce iMinimumPageCount?
  2167 	// Reduce iMinimumPageCount?
  2033 	TInt spare = iMinimumPageCount-aMinimumPageCount;
  2168 	if(aMinimumPageCount < iMinimumPageCount)
  2034 	if(spare>0)
  2169 		{
  2035 		{
  2170 		iNumberOfFreePages += iMinimumPageCount - aMinimumPageCount;
  2036 		iMinimumPageCount -= spare;
  2171 		iMinimumPageCount = aMinimumPageCount;
  2037 		iNumberOfFreePages += spare;
       
  2038 		}
  2172 		}
  2039 
  2173 
  2040 	// Increase iMinimumPageCount?
  2174 	// Increase iMinimumPageCount?
  2041 	TInt r=KErrNone;
  2175 	TInt r = KErrNone;
  2042 	while(iMinimumPageCount<aMinimumPageCount)
  2176 	while(aMinimumPageCount > iMinimumPageCount)
  2043 		{
  2177 		{
  2044 		TUint newMin = aMinimumPageCount;
  2178 		TUint newMin = MinU(aMinimumPageCount, iMinimumPageCount + iNumberOfFreePages);
  2045 		TUint maxMin = iMinimumPageCount+iNumberOfFreePages;
  2179 		
  2046 		if(newMin>maxMin)
  2180 		if (newMin == iMinimumPageCount)
  2047 			newMin = maxMin;
  2181 			{
  2048 
  2182 			// have to add pages before we can increase minimum page count
  2049 		TUint delta = newMin-iMinimumPageCount;
  2183 			if(!TryGrowLiveList())
  2050 		if(delta)
  2184 				{
  2051 			{
  2185 				r = KErrNoMemory;
       
  2186 				break;
       
  2187 				}
       
  2188 			}
       
  2189 		else
       
  2190 			{
       
  2191 			iNumberOfFreePages -= newMin - iMinimumPageCount;
  2052 			iMinimumPageCount = newMin;
  2192 			iMinimumPageCount = newMin;
  2053 			iNumberOfFreePages -= delta;
       
  2054 			continue;
       
  2055 			}
       
  2056 
       
  2057 		if(!TryGrowLiveList())
       
  2058 			{
       
  2059 			r=KErrNoMemory;
       
  2060 			break;
       
  2061 			}
  2193 			}
  2062 		}
  2194 		}
  2063 
  2195 
  2064 	// Reduce iMaximumPageCount?
  2196 	// Reduce iMaximumPageCount?
  2065 	while(iMaximumPageCount>aMaximumPageCount)
  2197 	while(aMaximumPageCount < iMaximumPageCount)
  2066 		{
  2198 		{
  2067 		TUint newMax = aMaximumPageCount;
  2199 		TUint newMax = MaxU(aMaximumPageCount, iMinimumPageCount + iNumberOfFreePages);
  2068 		TUint minMax = iMinimumPageCount+iNumberOfFreePages;
  2200 
  2069 		if(newMax<minMax)
  2201 		if (newMax == iMaximumPageCount)
  2070 			newMax = minMax;
  2202 			{
  2071 
  2203 			// have to remove pages before we can reduce maximum page count
  2072 		TUint delta = iMaximumPageCount-newMax;
  2204 			TryReturnOldestPageToSystem();
  2073 		if(delta)
  2205 			}
       
  2206 		else
  2074 			{
  2207 			{
  2075 			iMaximumPageCount = newMax;
  2208 			iMaximumPageCount = newMax;
  2076 			continue;
  2209 			}
  2077 			}
  2210 		}
  2078 
  2211 	
  2079 		ReturnPageToSystem();
  2212 	TRACE(("DPager::ResizeLiveList end: %d %d %d %d, %d %d %d",
  2080 		}
  2213 		   iYoungCount,iOldCount,iOldestCleanCount,iOldestDirtyCount,
  2081 
  2214 		   iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
  2082 	TRACE(("DPager::ResizeLiveList end with young=%d old=%d min=%d free=%d max=%d",iYoungCount,iOldCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
  2215 	
       
  2216 	__NK_ASSERT_DEBUG((iMinimumPageCount + iNumberOfFreePages) <= iMaximumPageCount);
  2083 
  2217 
  2084 #ifdef BTRACE_KERNEL_MEMORY
  2218 #ifdef BTRACE_KERNEL_MEMORY
  2085 	BTrace4(BTrace::EKernelMemory,BTrace::EKernelMemoryDemandPagingCache,iMinimumPageCount << KPageShift);
  2219 	BTrace4(BTrace::EKernelMemory,BTrace::EKernelMemoryDemandPagingCache,iMinimumPageCount << KPageShift);
  2086 #endif
  2220 #endif
  2087 
  2221 
  2088 	MmuLock::Unlock();
  2222 	MmuLock::Unlock();
  2089 
  2223 
       
  2224 	PageCleaningLock::Unlock();
  2090 	RamAllocLock::Unlock();
  2225 	RamAllocLock::Unlock();
  2091 	NKern::ThreadLeaveCS();
  2226 	NKern::ThreadLeaveCS();
  2092 
  2227 
  2093 	return r;
  2228 	return r;
       
  2229 	}
       
  2230 
       
  2231 
       
  2232 TUint RequiredOldestPages(TUint aPagesToClean, TBool aCleanInSequence)
       
  2233 	{
       
  2234 	return aCleanInSequence ? aPagesToClean * 8 : aPagesToClean;
       
  2235 	}
       
  2236 
       
  2237 
       
  2238 void DPager::SetPagesToClean(TUint aPagesToClean)
       
  2239 	{
       
  2240 	TRACE(("WDP: Pager will attempt to clean %d pages", aPagesToClean));
       
  2241 	__NK_ASSERT_ALWAYS(aPagesToClean > 0 && aPagesToClean <= KMaxPagesToClean);
       
  2242 	MmuLock::Lock();
       
  2243 	iPagesToClean = aPagesToClean;
       
  2244 	iMaxOldestPages = MaxU(KDefaultMaxOldestPages,
       
  2245 						   RequiredOldestPages(iPagesToClean, iCleanInSequence));
       
  2246 	MmuLock::Unlock();
       
  2247 	TRACE(("WDP: Maximum %d oldest pages", iMaxOldestPages));	
       
  2248 	}
       
  2249 
       
  2250 
       
  2251 TUint DPager::PagesToClean()
       
  2252 	{
       
  2253 	return iPagesToClean;
       
  2254 	}
       
  2255 
       
  2256 
       
  2257 void DPager::SetCleanInSequence(TBool aCleanInSequence)
       
  2258 	{
       
  2259 	TRACE(("WDP: Sequential page colour set to %d", aCleanInSequence));
       
  2260 	MmuLock::Lock();
       
  2261 	iCleanInSequence = aCleanInSequence;
       
  2262 	iMaxOldestPages = MaxU(KDefaultMaxOldestPages,
       
  2263 						   RequiredOldestPages(iPagesToClean, iCleanInSequence));
       
  2264 	MmuLock::Unlock();	
       
  2265 	TRACE(("WDP: Maximum %d oldest pages", iMaxOldestPages));
  2094 	}
  2266 	}
  2095 
  2267 
  2096 
  2268 
  2097 // WARNING THIS METHOD MAY HOLD THE RAM ALLOC LOCK FOR EXCESSIVE PERIODS.  DON'T USE THIS IN ANY PRODUCTION CODE.
  2269 // WARNING THIS METHOD MAY HOLD THE RAM ALLOC LOCK FOR EXCESSIVE PERIODS.  DON'T USE THIS IN ANY PRODUCTION CODE.
  2098 void DPager::FlushAll()
  2270 void DPager::FlushAll()
  2143 			}
  2315 			}
  2144 		pi = piNext;
  2316 		pi = piNext;
  2145 		}
  2317 		}
  2146 	while(piMap<piMapEnd);
  2318 	while(piMap<piMapEnd);
  2147 	MmuLock::Unlock();
  2319 	MmuLock::Unlock();
       
  2320 	PageCleaningLock::Unlock();
  2148 
  2321 
  2149 	// reduce live page list to a minimum
  2322 	// reduce live page list to a minimum
  2150 	while(GetFreePages(1)) {}; 
  2323 	while(GetFreePages(1)) {}; 
  2151 
  2324 
  2152 	TRACE(("DPager::FlushAll() end with young=%d old=%d min=%d free=%d max=%d",iYoungCount,iOldCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
  2325 	TRACE(("DPager::FlushAll() end with young=%d old=%d min=%d free=%d max=%d",iYoungCount,iOldCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
  2153 
  2326 
  2154 	PageCleaningLock::Unlock();
       
  2155 	RamAllocLock::Unlock();
  2327 	RamAllocLock::Unlock();
  2156 	NKern::ThreadLeaveCS();
  2328 	NKern::ThreadLeaveCS();
  2157 	}
  2329 	}
  2158 
  2330 
  2159 
  2331 
  2434 		ThePager.ResetBenchmarkData((TPagingBenchmark)index);
  2606 		ThePager.ResetBenchmarkData((TPagingBenchmark)index);
  2435 		}
  2607 		}
  2436 		return KErrNone;
  2608 		return KErrNone;
  2437 #endif
  2609 #endif
  2438 
  2610 
       
  2611 	case EVMHalGetPhysicalAccessSupported:
       
  2612 		if ((K::MemModelAttributes & EMemModelAttrDataPaging) == 0)
       
  2613 			return KErrNotSupported;
       
  2614 		return GetPhysicalAccessSupported();
       
  2615 		
       
  2616 	case EVMHalGetUsePhysicalAccess:
       
  2617 		if ((K::MemModelAttributes & EMemModelAttrDataPaging) == 0)
       
  2618 			return KErrNotSupported;
       
  2619 		return GetUsePhysicalAccess();
       
  2620 
       
  2621 	case EVMHalSetUsePhysicalAccess:
       
  2622 		if(!TheCurrentThread->HasCapability(ECapabilityWriteDeviceData,__PLATSEC_DIAGNOSTIC_STRING("Checked by VMHalFunction(EVMHalSetUsePhysicalAccess)")))
       
  2623 			K::UnlockedPlatformSecurityPanic();
       
  2624 		if ((K::MemModelAttributes & EMemModelAttrDataPaging) == 0)
       
  2625 			return KErrNotSupported;
       
  2626 		if ((TUint)a1 > 1)
       
  2627 			return KErrArgument;
       
  2628 		SetUsePhysicalAccess((TBool)a1);
       
  2629 		return KErrNone;
       
  2630 		
       
  2631 	case EVMHalGetPreferredDataWriteSize:
       
  2632 		if ((K::MemModelAttributes & EMemModelAttrDataPaging) == 0)
       
  2633 			return KErrNotSupported;
       
  2634 		return GetPreferredDataWriteSize();
       
  2635 		
       
  2636 	case EVMHalGetDataWriteSize:
       
  2637 		if ((K::MemModelAttributes & EMemModelAttrDataPaging) == 0)
       
  2638 			return KErrNotSupported;
       
  2639 		return __e32_find_ms1_32(ThePager.PagesToClean());
       
  2640 		
       
  2641 	case EVMHalSetDataWriteSize:
       
  2642 		if(!TheCurrentThread->HasCapability(ECapabilityWriteDeviceData,__PLATSEC_DIAGNOSTIC_STRING("Checked by VMHalFunction(EVMHalSetDataWriteSize)")))
       
  2643 			K::UnlockedPlatformSecurityPanic();
       
  2644 		if ((K::MemModelAttributes & EMemModelAttrDataPaging) == 0)
       
  2645 			return KErrNotSupported;
       
  2646 		return SetDataWriteSize((TUint)a1);
       
  2647 	
  2439 	default:
  2648 	default:
  2440 		return KErrNotSupported;
  2649 		return KErrNotSupported;
  2441 		}
  2650 		}
  2442 	}
  2651 	}
  2443 
  2652 
  2486 //
  2695 //
  2487 // Paging request management...
  2696 // Paging request management...
  2488 //
  2697 //
  2489 
  2698 
  2490 //
  2699 //
  2491 // DPagingRequest
  2700 // DPagingRequestBase
  2492 //
  2701 //
  2493 
  2702 
  2494 DPagingRequest::DPagingRequest()
  2703 
  2495 	: iMutex(NULL), iUseRegionCount(0)
  2704 TLinAddr DPagingRequestBase::MapPages(TUint aColour, TUint aCount, TPhysAddr* aPages)
  2496 	{
       
  2497 	}
       
  2498 
       
  2499 
       
  2500 void DPagingRequest::SetUseContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
       
  2501 	{
       
  2502 	__ASSERT_SYSTEM_LOCK;
       
  2503 	__NK_ASSERT_DEBUG(iUseRegionCount == 0);
       
  2504 	__NK_ASSERT_DEBUG(aCount > 0 && aCount <= EMaxPages);
       
  2505 	for (TUint i = 0 ; i < aCount ; ++i)
       
  2506 		{
       
  2507 		iUseRegionMemory[i] = aMemory;
       
  2508 		iUseRegionIndex[i] = aIndex + i;		
       
  2509 		}
       
  2510 	iUseRegionCount = aCount;
       
  2511 	}
       
  2512 
       
  2513 
       
  2514 void DPagingRequest::SetUseDiscontiguous(DMemoryObject** aMemory, TUint* aIndex, TUint aCount)
       
  2515 	{
       
  2516 	__ASSERT_SYSTEM_LOCK;
       
  2517 	__NK_ASSERT_DEBUG(iUseRegionCount == 0);
       
  2518 	__NK_ASSERT_DEBUG(aCount > 0 && aCount <= EMaxPages);
       
  2519 	for (TUint i = 0 ; i < aCount ; ++i)
       
  2520 		{
       
  2521 		iUseRegionMemory[i] = aMemory[i];
       
  2522 		iUseRegionIndex[i] = aIndex[i];
       
  2523 		}
       
  2524 	iUseRegionCount = aCount;
       
  2525 	}
       
  2526 
       
  2527 
       
  2528 void DPagingRequest::ResetUse()
       
  2529 	{
       
  2530 	__ASSERT_SYSTEM_LOCK;
       
  2531 	__NK_ASSERT_DEBUG(iUseRegionCount > 0);
       
  2532 	iUseRegionCount = 0;
       
  2533 	}
       
  2534 
       
  2535 
       
  2536 TBool DPagingRequest::CheckUseContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
       
  2537 	{
       
  2538 	if (iUseRegionCount != aCount)
       
  2539 		return EFalse;
       
  2540 	for (TUint i = 0 ; i < iUseRegionCount ; ++i)
       
  2541 		{
       
  2542 		if (iUseRegionMemory[i] != aMemory || iUseRegionIndex[i] != aIndex + i)
       
  2543 			return EFalse;
       
  2544 		}
       
  2545 	return ETrue;
       
  2546 	}
       
  2547 
       
  2548 
       
  2549 TBool DPagingRequest::CheckUseDiscontiguous(DMemoryObject** aMemory, TUint* aIndex, TUint aCount)
       
  2550 	{
       
  2551 	if (iUseRegionCount != aCount)
       
  2552 		return EFalse;
       
  2553 	for (TUint i = 0 ; i < iUseRegionCount ; ++i)
       
  2554 		{
       
  2555 		if (iUseRegionMemory[i] != aMemory[i] || iUseRegionIndex[i] != aIndex[i])
       
  2556 			return EFalse;
       
  2557 		}
       
  2558 	return ETrue;
       
  2559 	}
       
  2560 
       
  2561 
       
  2562  TBool DPagingRequest::IsCollisionContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
       
  2563 	{
       
  2564 	// note this could be optimised as most of the time we will be checking read/read collusions,
       
  2565 	// both of which will be contiguous
       
  2566 	__ASSERT_SYSTEM_LOCK;
       
  2567 	for (TUint i = 0 ; i < iUseRegionCount ; ++i)
       
  2568 		{
       
  2569 		if (iUseRegionMemory[i] == aMemory &&
       
  2570 			TUint(iUseRegionIndex[i] - aIndex) < aCount)
       
  2571 			return ETrue;
       
  2572 		}
       
  2573 	return EFalse;
       
  2574 	}
       
  2575 
       
  2576 
       
  2577 TLinAddr DPagingRequest::MapPages(TUint aColour, TUint aCount, TPhysAddr* aPages)
       
  2578 	{
  2705 	{
  2579 	__NK_ASSERT_DEBUG(iMutex->iCleanup.iThread == &Kern::CurrentThread());
  2706 	__NK_ASSERT_DEBUG(iMutex->iCleanup.iThread == &Kern::CurrentThread());
  2580 	return iTempMapping.Map(aPages,aCount,aColour);
  2707 	return iTempMapping.Map(aPages,aCount,aColour);
  2581 	}
  2708 	}
  2582 
  2709 
  2583 
  2710 
  2584 void DPagingRequest::UnmapPages(TBool aIMBRequired)
  2711 void DPagingRequestBase::UnmapPages(TBool aIMBRequired)
  2585 	{
  2712 	{
  2586 	__NK_ASSERT_DEBUG(iMutex->iCleanup.iThread == &Kern::CurrentThread());
  2713 	__NK_ASSERT_DEBUG(iMutex->iCleanup.iThread == &Kern::CurrentThread());
  2587 	iTempMapping.Unmap(aIMBRequired);
  2714 	iTempMapping.Unmap(aIMBRequired);
  2588 	}
  2715 	}
  2589 
  2716 
  2590 //
       
  2591 // DPoolPagingRequest
       
  2592 //
       
  2593 
       
  2594 DPoolPagingRequest::DPoolPagingRequest(DPagingRequestPool::TGroup& aPoolGroup) :
       
  2595 	iPoolGroup(aPoolGroup)
       
  2596 	{
       
  2597 	}
       
  2598 
       
  2599 
       
  2600 void DPoolPagingRequest::Release()
       
  2601 	{
       
  2602 	NKern::LockSystem();
       
  2603 	ResetUse();
       
  2604 	Signal();
       
  2605 	}
       
  2606 
       
  2607 
       
  2608 void DPoolPagingRequest::Wait()
       
  2609 	{
       
  2610 	__ASSERT_SYSTEM_LOCK;
       
  2611 	++iUsageCount;
       
  2612 	TInt r = iMutex->Wait();
       
  2613 	__NK_ASSERT_ALWAYS(r == KErrNone);
       
  2614 	}
       
  2615 
       
  2616 
       
  2617 void DPoolPagingRequest::Signal()
       
  2618 	{
       
  2619 	__ASSERT_SYSTEM_LOCK;
       
  2620 	iPoolGroup.Signal(this);
       
  2621 	}
       
  2622 
  2717 
  2623 //
  2718 //
  2624 // DPageReadRequest
  2719 // DPageReadRequest
  2625 //
  2720 //
  2626 
  2721 
       
  2722 
  2627 TInt DPageReadRequest::iAllocNext = 0;
  2723 TInt DPageReadRequest::iAllocNext = 0;
  2628 
  2724 
       
  2725 
       
  2726 TUint DPageReadRequest::ReservedPagesRequired()
       
  2727 	{
       
  2728 	return iAllocNext*EMaxPages;
       
  2729 	}
       
  2730 
       
  2731 
  2629 DPageReadRequest::DPageReadRequest(DPagingRequestPool::TGroup& aPoolGroup) :
  2732 DPageReadRequest::DPageReadRequest(DPagingRequestPool::TGroup& aPoolGroup) :
  2630 	DPoolPagingRequest(aPoolGroup)
  2733 	iPoolGroup(aPoolGroup)
  2631 	{
  2734 	{
  2632 	// allocate space for mapping pages whilst they're being loaded...
       
  2633 	iTempMapping.Alloc(EMaxPages);
  2735 	iTempMapping.Alloc(EMaxPages);
  2634 	}
  2736 	}
       
  2737 
  2635 
  2738 
  2636 TInt DPageReadRequest::Construct()
  2739 TInt DPageReadRequest::Construct()
  2637 	{
  2740 	{
  2638 	// allocate id and mutex...
  2741 	// allocate id and mutex...
  2639 	TUint id = (TUint)__e32_atomic_add_ord32(&iAllocNext, 1);
  2742 	TUint id = (TUint)__e32_atomic_add_ord32(&iAllocNext, 1);
  2664 
  2767 
  2665 	return r;
  2768 	return r;
  2666 	}
  2769 	}
  2667 
  2770 
  2668 
  2771 
       
  2772 void DPageReadRequest::Release()
       
  2773 	{
       
  2774 	NKern::LockSystem();
       
  2775 	ResetUse();
       
  2776 	Signal();
       
  2777 	}
       
  2778 
       
  2779 
       
  2780 void DPageReadRequest::Wait()
       
  2781 	{
       
  2782 	__ASSERT_SYSTEM_LOCK;
       
  2783 	++iUsageCount;
       
  2784 	TInt r = iMutex->Wait();
       
  2785 	__NK_ASSERT_ALWAYS(r == KErrNone);
       
  2786 	}
       
  2787 
       
  2788 
       
  2789 void DPageReadRequest::Signal()
       
  2790 	{
       
  2791 	__ASSERT_SYSTEM_LOCK;
       
  2792 	__NK_ASSERT_DEBUG(iUsageCount > 0);
       
  2793 	if (--iUsageCount == 0)
       
  2794 		iPoolGroup.iFreeList.AddHead(&iLink);
       
  2795 	iMutex->Signal();
       
  2796 	}
       
  2797 
       
  2798 
       
  2799 void DPageReadRequest::SetUseContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
       
  2800 	{
       
  2801 	__ASSERT_SYSTEM_LOCK;
       
  2802 	__NK_ASSERT_DEBUG(aMemory != NULL && aCount <= EMaxPages);
       
  2803 	__NK_ASSERT_DEBUG(iMemory == NULL);
       
  2804 	iMemory = aMemory;
       
  2805 	iIndex = aIndex;
       
  2806 	iCount = aCount;
       
  2807 	}
       
  2808 
       
  2809 
       
  2810 void DPageReadRequest::ResetUse()
       
  2811 	{
       
  2812 	__ASSERT_SYSTEM_LOCK;
       
  2813 	__NK_ASSERT_DEBUG(iMemory != NULL);
       
  2814 	iMemory = NULL;
       
  2815 	}
       
  2816 
       
  2817 
       
  2818  TBool DPageReadRequest::IsCollisionContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
       
  2819 	{
       
  2820 	__ASSERT_SYSTEM_LOCK;
       
  2821 	return iMemory == aMemory && aIndex < iIndex + iCount && aIndex + aCount > iIndex;
       
  2822 	}
       
  2823 
       
  2824 
       
  2825 TBool DPageReadRequest::CheckUseContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
       
  2826 	{
       
  2827 	return iMemory == aMemory && iIndex == aIndex && iCount == aCount;
       
  2828 	}
       
  2829 
       
  2830 
  2669 //
  2831 //
  2670 // DPageWriteRequest
  2832 // DPageWriteRequest
  2671 //
  2833 //
  2672 
  2834 
  2673 
  2835 
  2674 DPageWriteRequest::DPageWriteRequest()
  2836 DPageWriteRequest::DPageWriteRequest()
  2675 	{
  2837 	{
  2676 	iMutex = ThePageCleaningLock;
  2838 	iMutex = ThePageCleaningLock;
  2677 	// allocate space for mapping pages whilst they're being loaded...
  2839 	iTempMapping.Alloc(EMaxPages);
  2678 	iTempMapping.Alloc(KMaxPagesToClean);
       
  2679 	}
  2840 	}
  2680 
  2841 
  2681 
  2842 
  2682 void DPageWriteRequest::Release()
  2843 void DPageWriteRequest::Release()
  2683 	{
  2844 	{
  2684 	NKern::LockSystem();
  2845 	NKern::LockSystem();
  2685 	ResetUse();
  2846 	ResetUse();
  2686 	NKern::UnlockSystem();
  2847 	NKern::UnlockSystem();
  2687 	}
  2848 	}
  2688 
  2849 
       
  2850 
       
  2851 void DPageWriteRequest::SetUseDiscontiguous(DMemoryObject** aMemory, TUint* aIndex, TUint aCount)
       
  2852 	{
       
  2853 	__ASSERT_SYSTEM_LOCK;
       
  2854 	__NK_ASSERT_DEBUG(iUseRegionCount == 0);
       
  2855 	__NK_ASSERT_DEBUG(aCount > 0 && aCount <= EMaxPages);
       
  2856 	for (TUint i = 0 ; i < aCount ; ++i)
       
  2857 		{
       
  2858 		iUseRegionMemory[i] = aMemory[i];
       
  2859 		iUseRegionIndex[i] = aIndex[i];
       
  2860 		}
       
  2861 	iUseRegionCount = aCount;
       
  2862 	}
       
  2863 
       
  2864 
       
  2865 void DPageWriteRequest::ResetUse()
       
  2866 	{
       
  2867 	__ASSERT_SYSTEM_LOCK;
       
  2868 	__NK_ASSERT_DEBUG(iUseRegionCount > 0);
       
  2869 	iUseRegionCount = 0;
       
  2870 	}
       
  2871 
       
  2872 
       
  2873 TBool DPageWriteRequest::CheckUseContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
       
  2874 	{
       
  2875 	if (iUseRegionCount != aCount)
       
  2876 		return EFalse;
       
  2877 	for (TUint i = 0 ; i < iUseRegionCount ; ++i)
       
  2878 		{
       
  2879 		if (iUseRegionMemory[i] != aMemory || iUseRegionIndex[i] != aIndex + i)
       
  2880 			return EFalse;
       
  2881 		}
       
  2882 	return ETrue;
       
  2883 	}
       
  2884 
       
  2885 
       
  2886  TBool DPageWriteRequest::IsCollisionContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
       
  2887 	{
       
  2888 	// note this could be optimised as most of the time we will be checking read/read collusions,
       
  2889 	// both of which will be contiguous
       
  2890 	__ASSERT_SYSTEM_LOCK;
       
  2891 	for (TUint i = 0 ; i < iUseRegionCount ; ++i)
       
  2892 		{
       
  2893 		if (iUseRegionMemory[i] == aMemory &&
       
  2894 			TUint(iUseRegionIndex[i] - aIndex) < aCount)
       
  2895 			return ETrue;
       
  2896 		}
       
  2897 	return EFalse;
       
  2898 	}
  2689 
  2899 
  2690 //
  2900 //
  2691 // DPagingRequestPool
  2901 // DPagingRequestPool
  2692 //
  2902 //
  2693 
  2903 
  2700 		DPageReadRequest* req = new DPageReadRequest(iPageReadRequests);
  2910 		DPageReadRequest* req = new DPageReadRequest(iPageReadRequests);
  2701 		__NK_ASSERT_ALWAYS(req);
  2911 		__NK_ASSERT_ALWAYS(req);
  2702 		TInt r = req->Construct();
  2912 		TInt r = req->Construct();
  2703 		__NK_ASSERT_ALWAYS(r==KErrNone);
  2913 		__NK_ASSERT_ALWAYS(r==KErrNone);
  2704 		iPageReadRequests.iRequests[i] = req;
  2914 		iPageReadRequests.iRequests[i] = req;
  2705 		iPageReadRequests.iFreeList.Add(req);
  2915 		iPageReadRequests.iFreeList.Add(&req->iLink);
  2706 		}
  2916 		}
  2707 
  2917 
  2708 	if (aWriteRequest)
  2918 	if (aWriteRequest)
  2709 		{
  2919 		{
  2710 		iPageWriteRequest = new DPageWriteRequest();
  2920 		iPageWriteRequest = new DPageWriteRequest();
  2721 
  2931 
  2722 DPageReadRequest* DPagingRequestPool::AcquirePageReadRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
  2932 DPageReadRequest* DPagingRequestPool::AcquirePageReadRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
  2723 	{
  2933 	{
  2724 	NKern::LockSystem();
  2934 	NKern::LockSystem();
  2725 
  2935 
  2726 	DPoolPagingRequest* req;
  2936 	DPageReadRequest* req;
  2727 	
  2937 	
  2728 	// check for collision with existing write
  2938 	// check for collision with existing write
  2729 	if(iPageWriteRequest && iPageWriteRequest->IsCollisionContiguous(aMemory,aIndex,aCount))
  2939 	if(iPageWriteRequest && iPageWriteRequest->IsCollisionContiguous(aMemory,aIndex,aCount))
  2730 		{
  2940 		{
  2731 		NKern::UnlockSystem();
  2941 		NKern::UnlockSystem();
  2782 
  2992 
  2783 
  2993 
  2784 DPagingRequestPool::TGroup::TGroup(TUint aNumRequests)
  2994 DPagingRequestPool::TGroup::TGroup(TUint aNumRequests)
  2785 	{
  2995 	{
  2786 	iNumRequests = aNumRequests;
  2996 	iNumRequests = aNumRequests;
  2787 	iRequests = new DPoolPagingRequest*[aNumRequests];
  2997 	iRequests = new DPageReadRequest*[aNumRequests];
  2788 	__NK_ASSERT_ALWAYS(iRequests);
  2998 	__NK_ASSERT_ALWAYS(iRequests);
  2789 	}
  2999 	}
  2790 
  3000 
  2791 
  3001 
  2792 DPoolPagingRequest* DPagingRequestPool::TGroup::FindCollisionContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
  3002 DPageReadRequest* DPagingRequestPool::TGroup::FindCollisionContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
  2793 	{
  3003 	{
  2794 	__ASSERT_SYSTEM_LOCK;
  3004 	__ASSERT_SYSTEM_LOCK;
  2795 	DPoolPagingRequest** ptr = iRequests;
  3005 	DPageReadRequest** ptr = iRequests;
  2796 	DPoolPagingRequest** ptrEnd = ptr+iNumRequests;
  3006 	DPageReadRequest** ptrEnd = ptr+iNumRequests;
  2797 	while(ptr<ptrEnd)
  3007 	while(ptr<ptrEnd)
  2798 		{
  3008 		{
  2799 		DPoolPagingRequest* req = *ptr++;
  3009 		DPageReadRequest* req = *ptr++;
  2800 		if(req->IsCollisionContiguous(aMemory,aIndex,aCount))
  3010 		if(req->IsCollisionContiguous(aMemory,aIndex,aCount))
  2801 			return req;
  3011 			return req;
  2802 		}
  3012 		}
  2803 	return 0;
  3013 	return 0;
  2804 	}
  3014 	}
  2805 
  3015 
  2806 
  3016 
  2807 static TUint32 RandomSeed = 33333;
  3017 static TUint32 RandomSeed = 33333;
  2808 
  3018 
  2809 DPoolPagingRequest* DPagingRequestPool::TGroup::GetRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
  3019 DPageReadRequest* DPagingRequestPool::TGroup::GetRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
  2810 	{
  3020 	{
  2811 	__NK_ASSERT_DEBUG(iNumRequests > 0);
  3021 	__NK_ASSERT_DEBUG(iNumRequests > 0);
  2812 
  3022 
  2813 	// try using an existing request which collides with this region...
  3023 	// try using an existing request which collides with this region...
  2814 	DPoolPagingRequest* req  = FindCollisionContiguous(aMemory,aIndex,aCount);
  3024 	DPageReadRequest* req  = FindCollisionContiguous(aMemory,aIndex,aCount);
  2815 	if(!req)
  3025 	if(!req)
  2816 		{
  3026 		{
  2817 		// use a free request...
  3027 		// use a free request...
  2818 		req = (DPoolPagingRequest*)iFreeList.GetFirst();
  3028 		SDblQueLink* first = iFreeList.GetFirst();
  2819 		if(req)
  3029 		if(first)
  2820 			{
  3030 			{
  2821 			// free requests aren't being used...
  3031 			// free requests aren't being used...
  2822 			__NK_ASSERT_DEBUG(req->iUsageCount == 0);
  3032 			req = _LOFF(first, DPageReadRequest, iLink);
       
  3033 			__NK_ASSERT_DEBUG(req->ThreadsWaiting() == 0);
  2823 			}
  3034 			}
  2824 		else
  3035 		else
  2825 			{
  3036 			{
  2826 			// pick a random request...
  3037 			// pick a random request...
  2827 			RandomSeed = RandomSeed*69069+1; // next 'random' number
  3038 			RandomSeed = RandomSeed*69069+1; // next 'random' number
  2828 			TUint index = (TUint64(RandomSeed) * TUint64(iNumRequests)) >> 32;
  3039 			TUint index = (TUint64(RandomSeed) * TUint64(iNumRequests)) >> 32;
  2829 			req = iRequests[index];
  3040 			req = iRequests[index];
  2830 			__NK_ASSERT_DEBUG(req->iUsageCount > 0); // we only pick random when none are free
  3041 			__NK_ASSERT_DEBUG(req->ThreadsWaiting() > 0); // we only pick random when none are free
  2831 			}
  3042 			}
  2832 		}
  3043 		}
  2833 
  3044 
  2834 	// wait for chosen request object...
  3045 	// wait for chosen request object...
  2835 	req->Wait();
  3046 	req->Wait();
  2836 
  3047 
  2837 	return req;
  3048 	return req;
  2838 	}
       
  2839 
       
  2840 
       
  2841 void DPagingRequestPool::TGroup::Signal(DPoolPagingRequest* aRequest)
       
  2842 	{
       
  2843 	// if there are no threads waiting on the mutex then return it to the free pool...
       
  2844 	__NK_ASSERT_DEBUG(aRequest->iUsageCount > 0);
       
  2845 	if (--aRequest->iUsageCount==0)
       
  2846 		iFreeList.AddHead(aRequest);
       
  2847 
       
  2848 	aRequest->iMutex->Signal();
       
  2849 	}
  3049 	}
  2850 
  3050 
  2851 
  3051 
  2852 /**
  3052 /**
  2853 Register the specified paging device with the kernel.
  3053 Register the specified paging device with the kernel.