41 /* On a 32 bit system without PAE can't have more than 2^(32-KPageShift) pages. |
41 /* On a 32 bit system without PAE can't have more than 2^(32-KPageShift) pages. |
42 * Subtract 1 so it doesn't overflow when converted to bytes. |
42 * Subtract 1 so it doesn't overflow when converted to bytes. |
43 */ |
43 */ |
44 const TUint KAbsoluteMaxPageCount = (1u<<(32-KPageShift))-1u; |
44 const TUint KAbsoluteMaxPageCount = (1u<<(32-KPageShift))-1u; |
45 |
45 |
46 /* |
46 /** |
47 Limit the maximum number of oldest pages to bound the time taken by SelectPagesToClean(), which is |
47 Default limit for the maximum number of oldest pages. |
48 called with the MmuLock held. |
48 |
|
49 If the data paging device sets iPreferredWriteShift, then this is increased if necessary to allow |
|
50 that many pages to be present. |
|
51 |
|
52 This limit exists to make our live list implementation a closer approximation to LRU, and to bound |
|
53 the time taken by SelectSequentialPagesToClean(), which is called with the MmuLock held. |
49 */ |
54 */ |
50 const TUint KMaxOldestPages = 32; |
55 const TUint KDefaultMaxOldestPages = 32; |
51 |
56 |
52 static DMutex* ThePageCleaningLock = NULL; |
57 static DMutex* ThePageCleaningLock = NULL; |
53 |
58 |
54 DPager ThePager; |
59 DPager ThePager; |
55 |
60 |
56 |
61 |
57 DPager::DPager() |
62 DPager::DPager() : |
58 : iMinimumPageCount(0), iMaximumPageCount(0), iYoungOldRatio(0), |
63 iMinimumPageCount(0), |
59 iYoungCount(0), iOldCount(0), iOldestCleanCount(0), |
64 iMaximumPageCount(0), |
60 iNumberOfFreePages(0), iReservePageCount(0), iMinimumPageLimit(0) |
65 iYoungOldRatio(0), |
|
66 iYoungCount(0), |
|
67 iOldCount(0), |
|
68 iOldestCleanCount(0), |
|
69 iMaxOldestPages(KDefaultMaxOldestPages), |
|
70 iNumberOfFreePages(0), |
|
71 iReservePageCount(0), |
|
72 iMinimumPageLimit(0), |
|
73 iPagesToClean(1) |
61 #ifdef __DEMAND_PAGING_BENCHMARKS__ |
74 #ifdef __DEMAND_PAGING_BENCHMARKS__ |
62 , iBenchmarkLock(TSpinLock::EOrderGenericIrqHigh3) |
75 , iBenchmarkLock(TSpinLock::EOrderGenericIrqHigh3) |
63 #endif |
76 #endif |
64 { |
77 { |
65 } |
78 } |
498 link = iOldestCleanList.Last(); |
515 link = iOldestCleanList.Last(); |
499 } |
516 } |
500 else if (iOldestDirtyCount) |
517 else if (iOldestDirtyCount) |
501 { |
518 { |
502 __NK_ASSERT_DEBUG(!iOldestDirtyList.IsEmpty()); |
519 __NK_ASSERT_DEBUG(!iOldestDirtyList.IsEmpty()); |
|
520 |
|
521 // see if we can clean multiple dirty pages in one go... |
|
522 if (iPagesToClean > 1 && iOldestDirtyCount > 1) |
|
523 { |
|
524 if (!PageCleaningLock::IsHeld()) |
|
525 { |
|
526 // temporarily release ram alloc mutex and acquire page cleaning mutex |
|
527 MmuLock::Unlock(); |
|
528 RamAllocLock::Unlock(); |
|
529 PageCleaningLock::Lock(); |
|
530 MmuLock::Lock(); |
|
531 pageCleaningLockAcquired = ETrue; |
|
532 } |
|
533 |
|
534 // there may be clean pages now if we've waited on the page cleaning mutex, if so don't |
|
535 // bother cleaning but just restart |
|
536 if (iOldestCleanCount == 0 && iOldestDirtyCount >= 1) |
|
537 CleanSomePages(EFalse); |
|
538 |
|
539 if (pageCleaningLockAcquired) |
|
540 { |
|
541 // release page cleaning mutex and re-aquire ram alloc mutex |
|
542 MmuLock::Unlock(); |
|
543 PageCleaningLock::Unlock(); |
|
544 RamAllocLock::Lock(); |
|
545 MmuLock::Lock(); |
|
546 } |
|
547 |
|
548 return 1; // tell caller to restart their operation |
|
549 } |
|
550 |
503 link = iOldestDirtyList.Last(); |
551 link = iOldestDirtyList.Last(); |
504 } |
552 } |
505 else if (iOldCount) |
553 else if (iOldCount) |
506 { |
554 { |
507 __NK_ASSERT_DEBUG(!iOldList.IsEmpty()); |
555 __NK_ASSERT_DEBUG(!iOldList.IsEmpty()); |
513 __NK_ASSERT_ALWAYS(!iYoungList.IsEmpty()); |
561 __NK_ASSERT_ALWAYS(!iYoungList.IsEmpty()); |
514 link = iYoungList.Last(); |
562 link = iYoungList.Last(); |
515 } |
563 } |
516 SPageInfo* pageInfo = SPageInfo::FromLink(link); |
564 SPageInfo* pageInfo = SPageInfo::FromLink(link); |
517 |
565 |
518 if (pageInfo->IsDirty() && !PageCleaningLock::IsHeld()) |
566 if (pageInfo->IsDirty()) |
519 return 1; |
567 { |
520 |
568 MmuLock::Unlock(); |
|
569 PageCleaningLock::Lock(); |
|
570 MmuLock::Lock(); |
|
571 pageCleaningLockAcquired = ETrue; |
|
572 } |
|
573 |
521 // try to steal it from owning object... |
574 // try to steal it from owning object... |
522 TInt r = StealPage(pageInfo); |
575 TInt r = StealPage(pageInfo); |
523 if (r == KErrNone) |
576 if (r == KErrNone) |
524 { |
577 { |
525 BalanceAges(); |
578 BalanceAges(); |
526 aPageInfoOut = pageInfo; |
579 aPageInfoOut = pageInfo; |
527 } |
580 } |
|
581 |
|
582 if (pageCleaningLockAcquired) |
|
583 { |
|
584 MmuLock::Unlock(); |
|
585 PageCleaningLock::Unlock(); |
|
586 MmuLock::Lock(); |
|
587 } |
528 |
588 |
529 return r; |
589 return r; |
530 } |
590 } |
531 |
591 |
532 |
592 |
533 SPageInfo* DPager::StealOldestPage() |
593 template <class T, TUint maxObjects> class TSequentialColourSelector |
534 { |
|
535 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
536 TBool pageCleaningLockHeld = EFalse; |
|
537 for(;;) |
|
538 { |
|
539 SPageInfo* pageInfo = NULL; |
|
540 TInt r = TryStealOldestPage(pageInfo); |
|
541 |
|
542 if (r == KErrNone) |
|
543 { |
|
544 if (pageCleaningLockHeld) |
|
545 { |
|
546 MmuLock::Unlock(); |
|
547 PageCleaningLock::Unlock(); |
|
548 MmuLock::Lock(); |
|
549 } |
|
550 return pageInfo; |
|
551 } |
|
552 else if (r == 1) |
|
553 { |
|
554 __NK_ASSERT_ALWAYS(!pageCleaningLockHeld); |
|
555 MmuLock::Unlock(); |
|
556 PageCleaningLock::Lock(); |
|
557 MmuLock::Lock(); |
|
558 pageCleaningLockHeld = ETrue; |
|
559 } |
|
560 // else retry... |
|
561 } |
|
562 } |
|
563 |
|
564 #ifdef __CPU_CACHE_HAS_COLOUR |
|
565 |
|
566 template <class T, TInt maxObjects> class TSequentialColourSelector |
|
567 { |
594 { |
568 public: |
595 public: |
569 static const TInt KMaxLength = maxObjects; |
596 static const TUint KMaxSearchLength = _ALIGN_UP(maxObjects, KPageColourCount); |
570 static const TInt KArrayLength = _ALIGN_UP(KMaxLength, KPageColourCount); |
|
571 |
597 |
572 FORCE_INLINE TSequentialColourSelector() |
598 FORCE_INLINE TSequentialColourSelector(TUint aTargetLength) |
573 { |
599 { |
574 memclr(this, sizeof(*this)); |
600 memclr(this, sizeof(*this)); |
|
601 __NK_ASSERT_DEBUG(aTargetLength <= maxObjects); |
|
602 iTargetLength = aTargetLength; |
|
603 iSearchLength = _ALIGN_UP(aTargetLength, KPageColourCount); |
575 } |
604 } |
576 |
605 |
577 FORCE_INLINE TBool FoundLongestSequence() |
606 FORCE_INLINE TBool FoundLongestSequence() |
578 { |
607 { |
579 return iLongestLength >= KMaxLength; |
608 return iLongestLength >= iTargetLength; |
580 } |
609 } |
581 |
610 |
582 FORCE_INLINE void AddCandidate(T* aObject, TInt aColour) |
611 FORCE_INLINE void AddCandidate(T* aObject, TUint aColour) |
583 { |
612 { |
584 // allocate objects to slots based on colour |
613 // allocate objects to slots based on colour |
585 for (TInt i = aColour ; i < KArrayLength ; i += KPageColourCount) |
614 for (TUint i = aColour ; i < iSearchLength ; i += KPageColourCount) |
586 { |
615 { |
587 if (!iSlot[i]) |
616 if (!iSlot[i]) |
588 { |
617 { |
589 iSlot[i] = aObject; |
618 iSlot[i] = aObject; |
590 iSeqLength[i] = i == 0 ? 1 : iSeqLength[i - 1] + 1; |
619 iSeqLength[i] = i == 0 ? 1 : iSeqLength[i - 1] + 1; |
591 TInt j = i + 1; |
620 TUint j = i + 1; |
592 while(j < KArrayLength && iSeqLength[j]) |
621 while(j < iSearchLength && iSeqLength[j]) |
593 iSeqLength[j++] += iSeqLength[i]; |
622 iSeqLength[j++] += iSeqLength[i]; |
594 TInt currentLength = iSeqLength[j - 1]; |
623 TUint currentLength = iSeqLength[j - 1]; |
595 if (currentLength > iLongestLength) |
624 if (currentLength > iLongestLength) |
596 { |
625 { |
597 iLongestLength = currentLength; |
626 iLongestLength = currentLength; |
598 iLongestStart = j - currentLength; |
627 iLongestStart = j - currentLength; |
599 } |
628 } |
600 break; |
629 break; |
601 } |
630 } |
602 } |
631 } |
603 } |
632 } |
604 |
633 |
605 FORCE_INLINE TInt FindLongestRun(T** aObjectsOut) |
634 FORCE_INLINE TUint FindLongestRun(T** aObjectsOut) |
606 { |
635 { |
607 if (iLongestLength == 0) |
636 if (iLongestLength == 0) |
608 return 0; |
637 return 0; |
609 |
638 |
610 if (iLongestLength < KMaxLength && iSlot[0] && iSlot[KArrayLength - 1]) |
639 if (iLongestLength < iTargetLength && iSlot[0] && iSlot[iSearchLength - 1]) |
611 { |
640 { |
612 // check possibility of wrapping |
641 // check possibility of wrapping |
613 |
642 |
614 TInt i = 1; |
643 TInt i = 1; |
615 while (iSlot[i]) ++i; // find first hole |
644 while (iSlot[i]) ++i; // find first hole |
616 TInt wrappedLength = iSeqLength[KArrayLength - 1] + iSeqLength[i - 1]; |
645 TUint wrappedLength = iSeqLength[iSearchLength - 1] + iSeqLength[i - 1]; |
617 if (wrappedLength > iLongestLength) |
646 if (wrappedLength > iLongestLength) |
618 { |
647 { |
619 iLongestLength = wrappedLength; |
648 iLongestLength = wrappedLength; |
620 iLongestStart = KArrayLength - iSeqLength[KArrayLength - 1]; |
649 iLongestStart = iSearchLength - iSeqLength[iSearchLength - 1]; |
621 } |
650 } |
622 } |
651 } |
623 |
652 |
624 iLongestLength = Min(iLongestLength, KMaxLength); |
653 iLongestLength = MinU(iLongestLength, iTargetLength); |
625 |
654 |
626 __NK_ASSERT_DEBUG(iLongestStart >= 0 && iLongestStart < KArrayLength); |
655 __NK_ASSERT_DEBUG(iLongestStart < iSearchLength); |
627 __NK_ASSERT_DEBUG(iLongestStart + iLongestLength < 2 * KArrayLength); |
656 __NK_ASSERT_DEBUG(iLongestStart + iLongestLength < 2 * iSearchLength); |
628 |
657 |
629 TInt len = Min(iLongestLength, KArrayLength - iLongestStart); |
658 TUint len = MinU(iLongestLength, iSearchLength - iLongestStart); |
630 wordmove(aObjectsOut, &iSlot[iLongestStart], len * sizeof(T*)); |
659 wordmove(aObjectsOut, &iSlot[iLongestStart], len * sizeof(T*)); |
631 wordmove(aObjectsOut + len, &iSlot[0], (iLongestLength - len) * sizeof(T*)); |
660 wordmove(aObjectsOut + len, &iSlot[0], (iLongestLength - len) * sizeof(T*)); |
632 |
661 |
633 return iLongestLength; |
662 return iLongestLength; |
634 } |
663 } |
635 |
664 |
636 private: |
665 private: |
637 T* iSlot[KArrayLength]; |
666 TUint iTargetLength; |
638 TInt8 iSeqLength[KArrayLength]; |
667 TUint iSearchLength; |
639 TInt iLongestStart; |
668 TUint iLongestStart; |
640 TInt iLongestLength; |
669 TUint iLongestLength; |
|
670 T* iSlot[KMaxSearchLength]; |
|
671 TUint8 iSeqLength[KMaxSearchLength]; |
641 }; |
672 }; |
642 |
673 |
643 TInt DPager::SelectPagesToClean(SPageInfo** aPageInfosOut) |
674 |
644 { |
675 TInt DPager::SelectSequentialPagesToClean(SPageInfo** aPageInfosOut) |
645 // select up to KMaxPagesToClean oldest dirty pages with sequential page colours |
676 { |
|
677 // select up to iPagesToClean oldest dirty pages with sequential page colours |
646 |
678 |
647 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
679 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
648 |
680 |
649 TSequentialColourSelector<SPageInfo, KMaxPagesToClean> selector; |
681 TSequentialColourSelector<SPageInfo, KMaxPagesToClean> selector(iPagesToClean); |
650 |
682 |
651 SDblQueLink* link = iOldestDirtyList.Last(); |
683 SDblQueLink* link = iOldestDirtyList.Last(); |
652 while (link != &iOldestDirtyList.iA) |
684 while (link != &iOldestDirtyList.iA) |
653 { |
685 { |
654 SPageInfo* pi = SPageInfo::FromLink(link); |
686 SPageInfo* pi = SPageInfo::FromLink(link); |
689 link = link->iPrev; |
720 link = link->iPrev; |
690 } |
721 } |
691 return pageCount; |
722 return pageCount; |
692 } |
723 } |
693 |
724 |
694 #endif |
|
695 |
|
696 |
725 |
697 TInt DPager::CleanSomePages(TBool aBackground) |
726 TInt DPager::CleanSomePages(TBool aBackground) |
698 { |
727 { |
|
728 TRACE(("DPager::CleanSomePages")); |
|
729 |
699 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
730 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
700 __NK_ASSERT_DEBUG(PageCleaningLock::IsHeld()); |
731 __NK_ASSERT_DEBUG(PageCleaningLock::IsHeld()); |
701 // ram alloc lock may or may not be held |
732 // ram alloc lock may or may not be held |
702 |
733 |
703 SPageInfo* pageInfos[KMaxPagesToClean]; |
734 SPageInfo* pageInfos[KMaxPagesToClean]; |
704 TInt pageCount = SelectPagesToClean(&pageInfos[0]); |
735 TInt pageCount; |
|
736 if (iCleanInSequence) |
|
737 pageCount = SelectSequentialPagesToClean(&pageInfos[0]); |
|
738 else |
|
739 pageCount = SelectOldestPagesToClean(&pageInfos[0]); |
705 |
740 |
706 if (pageCount == 0) |
741 if (pageCount == 0) |
|
742 { |
|
743 TRACE2(("DPager::CleanSomePages no pages to clean", pageCount)); |
|
744 TRACE2((" page counts %d, %d, %d, %d", |
|
745 iYoungCount, iOldCount, iOldestCleanCount, iOldestDirtyCount)); |
707 return 0; |
746 return 0; |
|
747 } |
708 |
748 |
709 TheDataPagedMemoryManager->CleanPages(pageCount, pageInfos, aBackground); |
749 TheDataPagedMemoryManager->CleanPages(pageCount, pageInfos, aBackground); |
710 |
750 |
711 for (TInt i = 0 ; i < pageCount ; ++i) |
751 for (TInt i = 0 ; i < pageCount ; ++i) |
712 { |
752 { |
812 } |
852 } |
813 |
853 |
814 |
854 |
815 TInt DPager::DiscardAndAllocPage(SPageInfo* aPageInfo, TZonePageType aPageType) |
855 TInt DPager::DiscardAndAllocPage(SPageInfo* aPageInfo, TZonePageType aPageType) |
816 { |
856 { |
817 TInt r = DiscardPage(aPageInfo, KRamZoneInvalidId, EFalse); |
857 TInt r = DiscardPage(aPageInfo, KRamZoneInvalidId, M::EMoveDisMoveDirty); |
818 if (r == KErrNone) |
858 if (r == KErrNone) |
819 { |
859 { |
820 TheMmu.MarkPageAllocated(aPageInfo->PhysAddr(), aPageType); |
860 TheMmu.MarkPageAllocated(aPageInfo->PhysAddr(), aPageType); |
821 } |
861 } |
822 // Flash the ram alloc lock as we may have had to write a page out to swap. |
|
823 RamAllocLock::Unlock(); |
|
824 RamAllocLock::Lock(); |
|
825 return r; |
862 return r; |
826 } |
863 } |
827 |
864 |
828 |
865 |
829 static TBool DiscardCanStealPage(SPageInfo* aOldPageInfo, TBool aBlockRest) |
866 static TBool DiscardCanStealPage(SPageInfo* aOldPageInfo, TBool aMoveDirty) |
830 { |
867 { |
831 // If the page is pinned or if the page is dirty and a general defrag is being performed then |
868 // If the page is pinned or if the page is dirty and a general defrag is being performed then |
832 // don't attempt to steal it |
869 // don't attempt to steal it |
833 return aOldPageInfo->Type() == SPageInfo::EUnused || |
870 return aOldPageInfo->Type() == SPageInfo::EUnused || |
834 (aOldPageInfo->PagedState() != SPageInfo::EPagedPinned && (!aBlockRest || !aOldPageInfo->IsDirty())); |
871 (aOldPageInfo->PagedState() != SPageInfo::EPagedPinned && (!aMoveDirty || !aOldPageInfo->IsDirty())); |
835 } |
872 } |
836 |
873 |
837 |
874 |
838 TInt DPager::DiscardPage(SPageInfo* aOldPageInfo, TUint aBlockZoneId, TBool aBlockRest) |
875 TInt DPager::DiscardPage(SPageInfo* aOldPageInfo, TUint aBlockZoneId, TUint aMoveDisFlags) |
839 { |
876 { |
840 // todo: assert MmuLock not released |
877 // todo: assert MmuLock not released |
841 |
878 |
842 TRACE(("> DPager::DiscardPage %08x", aOldPageInfo)); |
879 TRACE(("> DPager::DiscardPage %08x", aOldPageInfo)); |
843 |
880 |
844 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
881 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
845 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
882 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
846 |
883 TBool moveDirty = (aMoveDisFlags & M::EMoveDisMoveDirty) != 0; |
847 if (!DiscardCanStealPage(aOldPageInfo, aBlockRest)) |
884 TBool blockRest = (aMoveDisFlags & M::EMoveDisBlockRest) != 0; |
|
885 |
|
886 if (!DiscardCanStealPage(aOldPageInfo, moveDirty)) |
848 { |
887 { |
849 // The page is pinned or is dirty and this is a general defrag so move the page. |
888 // The page is pinned or is dirty and this is a general defrag so move the page. |
850 DMemoryObject* memory = aOldPageInfo->Owner(); |
889 DMemoryObject* memory = aOldPageInfo->Owner(); |
851 // Page must be managed if it is pinned or dirty. |
890 // Page must be managed if it is pinned or dirty. |
852 __NK_ASSERT_DEBUG(aOldPageInfo->Type()==SPageInfo::EManaged); |
891 __NK_ASSERT_DEBUG(aOldPageInfo->Type()==SPageInfo::EManaged); |
853 __NK_ASSERT_DEBUG(memory); |
892 __NK_ASSERT_DEBUG(memory); |
854 MmuLock::Unlock(); |
893 MmuLock::Unlock(); |
855 TPhysAddr newAddr; |
894 TPhysAddr newAddr; |
856 TRACE2(("DPager::DiscardPage delegating pinned/dirty page to manager")); |
895 TRACE2(("DPager::DiscardPage delegating pinned/dirty page to manager")); |
857 TInt r = memory->iManager->MovePage(memory, aOldPageInfo, newAddr, aBlockZoneId, aBlockRest); |
896 TInt r = memory->iManager->MovePage(memory, aOldPageInfo, newAddr, aBlockZoneId, blockRest); |
858 TRACE(("< DPager::DiscardPage %d", r)); |
897 TRACE(("< DPager::DiscardPage %d", r)); |
859 return r; |
898 return r; |
860 } |
899 } |
861 |
900 |
862 TInt r = KErrNone; |
901 TInt r = KErrNone; |
1064 if(pageInfo) |
1116 if(pageInfo) |
1065 goto done; |
1117 goto done; |
1066 MmuLock::Lock(); |
1118 MmuLock::Lock(); |
1067 } |
1119 } |
1068 |
1120 |
1069 // try stealing a clean page... |
1121 // otherwise steal a page from the live list... |
1070 if (iOldestCleanCount) |
|
1071 goto try_steal_oldest_page; |
|
1072 |
|
1073 // see if we can clean multiple dirty pages in one go... |
|
1074 if (KMaxPagesToClean > 1 && iOldestDirtyCount > 1) |
|
1075 { |
|
1076 // if we don't hold the page cleaning mutex then temporarily release ram alloc mutex and |
|
1077 // acquire page cleaning mutex; if we hold it already just proceed |
|
1078 if (!pageCleaningLockHeld) |
|
1079 { |
|
1080 MmuLock::Unlock(); |
|
1081 RamAllocLock::Unlock(); |
|
1082 PageCleaningLock::Lock(); |
|
1083 MmuLock::Lock(); |
|
1084 } |
|
1085 |
|
1086 // there may be clean pages now if we've waited on the page cleaning mutex, if so don't |
|
1087 // bother cleaning but just restart |
|
1088 if (iOldestCleanCount == 0) |
|
1089 CleanSomePages(EFalse); |
|
1090 |
|
1091 if (!pageCleaningLockHeld) |
|
1092 { |
|
1093 MmuLock::Unlock(); |
|
1094 PageCleaningLock::Unlock(); |
|
1095 RamAllocLock::Lock(); |
|
1096 MmuLock::Lock(); |
|
1097 } |
|
1098 |
|
1099 if (iOldestCleanCount > 0) |
|
1100 goto find_a_page; |
|
1101 } |
|
1102 |
|
1103 // as a last resort, steal a page from the live list... |
|
1104 |
|
1105 try_steal_oldest_page: |
1122 try_steal_oldest_page: |
1106 __NK_ASSERT_ALWAYS(iOldestCleanCount|iOldestDirtyCount|iOldCount|iYoungCount); |
1123 __NK_ASSERT_ALWAYS(iOldestCleanCount|iOldestDirtyCount|iOldCount|iYoungCount); |
1107 r = TryStealOldestPage(pageInfo); |
1124 r = TryStealOldestPage(pageInfo); |
1108 // if this fails we restart whole process |
1125 |
1109 if (r < KErrNone) |
1126 // if this fails we restart whole process. |
|
1127 // failure can be either KErrInUse if the page was used while we were stealing, or 1 to indicate |
|
1128 // that some pages were cleaned and the operation should be restarted |
|
1129 if (r != KErrNone) |
1110 goto find_a_page; |
1130 goto find_a_page; |
1111 |
1131 |
1112 // if we need to clean, acquire page cleaning mutex for life of this function |
|
1113 if (r == 1) |
|
1114 { |
|
1115 __NK_ASSERT_ALWAYS(!pageCleaningLockHeld); |
|
1116 MmuLock::Unlock(); |
|
1117 PageCleaningLock::Lock(); |
|
1118 MmuLock::Lock(); |
|
1119 pageCleaningLockHeld = ETrue; |
|
1120 goto find_a_page; |
|
1121 } |
|
1122 |
|
1123 // otherwise we're done! |
1132 // otherwise we're done! |
1124 __NK_ASSERT_DEBUG(r == KErrNone); |
|
1125 MmuLock::Unlock(); |
1133 MmuLock::Unlock(); |
1126 |
1134 |
1127 // make page state same as a freshly allocated page... |
1135 // make page state same as a freshly allocated page... |
1128 pagePhys = pageInfo->PhysAddr(); |
1136 pagePhys = pageInfo->PhysAddr(); |
1129 TheMmu.PagesAllocated(&pagePhys,1,aAllocFlags); |
1137 TheMmu.PagesAllocated(&pagePhys,1,aAllocFlags); |
1130 |
1138 |
1131 done: |
1139 done: |
1132 if (pageCleaningLockHeld) |
|
1133 PageCleaningLock::Unlock(); |
|
1134 RamAllocLock::Unlock(); |
1140 RamAllocLock::Unlock(); |
1135 |
1141 |
1136 return pageInfo; |
1142 return pageInfo; |
1137 } |
1143 } |
1138 |
1144 |
1327 |
1333 |
1328 |
1334 |
1329 void DPager::BalanceAges() |
1335 void DPager::BalanceAges() |
1330 { |
1336 { |
1331 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
1337 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
1332 TBool restrictPage = EFalse; |
1338 TBool retry; |
1333 SPageInfo* pageInfo = NULL; |
1339 do |
1334 TUint oldestCount = iOldestCleanCount + iOldestDirtyCount; |
1340 { |
1335 if((iOldCount + oldestCount) * iYoungOldRatio < iYoungCount) |
1341 retry = EFalse; |
1336 { |
1342 TBool restrictPage = EFalse; |
1337 // Need more old pages so make one young page into an old page... |
1343 SPageInfo* pageInfo = NULL; |
1338 __NK_ASSERT_DEBUG(!iYoungList.IsEmpty()); |
1344 TUint oldestCount = iOldestCleanCount + iOldestDirtyCount; |
1339 __NK_ASSERT_DEBUG(iYoungCount); |
1345 if((iOldCount + oldestCount) * iYoungOldRatio < iYoungCount) |
1340 SDblQueLink* link = iYoungList.Last()->Deque(); |
1346 { |
1341 --iYoungCount; |
1347 // Need more old pages so make one young page into an old page... |
1342 |
1348 __NK_ASSERT_DEBUG(!iYoungList.IsEmpty()); |
1343 pageInfo = SPageInfo::FromLink(link); |
1349 __NK_ASSERT_DEBUG(iYoungCount); |
1344 pageInfo->SetPagedState(SPageInfo::EPagedOld); |
1350 SDblQueLink* link = iYoungList.Last()->Deque(); |
1345 |
1351 --iYoungCount; |
1346 iOldList.AddHead(link); |
1352 |
1347 ++iOldCount; |
1353 pageInfo = SPageInfo::FromLink(link); |
1348 |
1354 pageInfo->SetPagedState(SPageInfo::EPagedOld); |
1349 Event(EEventPageAged,pageInfo); |
1355 |
1350 // Delay restricting the page until it is safe to release the MmuLock. |
1356 iOldList.AddHead(link); |
1351 restrictPage = ETrue; |
1357 ++iOldCount; |
1352 } |
1358 |
1353 |
1359 Event(EEventPageAged,pageInfo); |
1354 // Check we have enough oldest pages. |
1360 // Delay restricting the page until it is safe to release the MmuLock. |
1355 if (oldestCount < KMaxOldestPages && |
1361 restrictPage = ETrue; |
1356 oldestCount * iOldOldestRatio < iOldCount) |
1362 } |
1357 { |
1363 |
1358 __NK_ASSERT_DEBUG(!iOldList.IsEmpty()); |
1364 // Check we have enough oldest pages. |
1359 __NK_ASSERT_DEBUG(iOldCount); |
1365 if (oldestCount < iMaxOldestPages && |
1360 SDblQueLink* link = iOldList.Last()->Deque(); |
1366 oldestCount * iOldOldestRatio < iOldCount) |
1361 --iOldCount; |
1367 { |
1362 |
1368 __NK_ASSERT_DEBUG(!iOldList.IsEmpty()); |
1363 SPageInfo* oldestPageInfo = SPageInfo::FromLink(link); |
1369 __NK_ASSERT_DEBUG(iOldCount); |
1364 if (oldestPageInfo->IsDirty()) |
1370 SDblQueLink* link = iOldList.Last()->Deque(); |
1365 { |
1371 --iOldCount; |
1366 oldestPageInfo->SetPagedState(SPageInfo::EPagedOldestDirty); |
1372 |
1367 iOldestDirtyList.AddHead(link); |
1373 SPageInfo* oldestPageInfo = SPageInfo::FromLink(link); |
1368 ++iOldestDirtyCount; |
1374 if (oldestPageInfo->IsDirty()) |
1369 PageCleaner::NotifyPagesToClean(); |
1375 { |
1370 Event(EEventPageAgedDirty,oldestPageInfo); |
1376 oldestPageInfo->SetOldestPage(SPageInfo::EPagedOldestDirty); |
1371 } |
1377 iOldestDirtyList.AddHead(link); |
1372 else |
1378 ++iOldestDirtyCount; |
1373 { |
1379 PageCleaner::NotifyPagesToClean(); |
1374 oldestPageInfo->SetPagedState(SPageInfo::EPagedOldestClean); |
1380 Event(EEventPageAgedDirty,oldestPageInfo); |
1375 iOldestCleanList.AddHead(link); |
1381 } |
1376 ++iOldestCleanCount; |
1382 else |
1377 Event(EEventPageAgedClean,oldestPageInfo); |
1383 { |
1378 } |
1384 oldestPageInfo->SetOldestPage(SPageInfo::EPagedOldestClean); |
1379 } |
1385 iOldestCleanList.AddHead(link); |
1380 |
1386 ++iOldestCleanCount; |
1381 if (restrictPage) |
1387 Event(EEventPageAgedClean,oldestPageInfo); |
1382 { |
1388 } |
1383 // Make the recently aged old page inaccessible. This is done last as it |
1389 } |
1384 // will release the MmuLock and therefore the page counts may otherwise change. |
1390 |
1385 RestrictPage(pageInfo,ERestrictPagesNoAccessForOldPage); |
1391 if (restrictPage) |
1386 } |
1392 { |
|
1393 // Make the recently aged old page inaccessible. This is done last as it will release |
|
1394 // the MmuLock and therefore the page counts may otherwise change. |
|
1395 TInt r = RestrictPage(pageInfo,ERestrictPagesNoAccessForOldPage); |
|
1396 |
|
1397 if (r == KErrInUse) |
|
1398 { |
|
1399 SPageInfo::TPagedState state = pageInfo->PagedState(); |
|
1400 if (state == SPageInfo::EPagedOld || |
|
1401 state == SPageInfo::EPagedOldestClean || |
|
1402 state == SPageInfo::EPagedOldestDirty) |
|
1403 { |
|
1404 // The restrict operation failed, but the page was left in an old state. This |
|
1405 // can happen when: |
|
1406 // |
|
1407 // - pages are in the process of being pinned - the mapping will veto the |
|
1408 // restriction |
|
1409 // - pages are rejuvenated and then become quickly become old again |
|
1410 // |
|
1411 // In the second instance the page will be needlessly rejuvenated because we |
|
1412 // can't tell that it has actually been restricted by another thread |
|
1413 RemovePage(pageInfo); |
|
1414 AddAsYoungestPage(pageInfo); |
|
1415 retry = ETrue; |
|
1416 } |
|
1417 } |
|
1418 } |
|
1419 } |
|
1420 while (retry); |
1387 } |
1421 } |
1388 |
1422 |
1389 |
1423 |
1390 void DPager::RemoveExcessPages() |
1424 void DPager::RemoveExcessPages() |
1391 { |
1425 { |
1392 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
1426 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
1393 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
1427 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
1394 while(HaveTooManyPages()) |
1428 while(HaveTooManyPages()) |
1395 ReturnPageToSystem(); |
1429 TryReturnOldestPageToSystem(); |
1396 } |
1430 } |
1397 |
1431 |
1398 |
1432 |
1399 void DPager::RejuvenatePageTable(TPte* aPt) |
1433 void DPager::RejuvenatePageTable(TPte* aPt) |
1400 { |
1434 { |
2024 // Increase iMaximumPageCount? |
2068 // Increase iMaximumPageCount? |
2025 if(aMaximumPageCount > iMaximumPageCount) |
2069 if(aMaximumPageCount > iMaximumPageCount) |
2026 iMaximumPageCount = aMaximumPageCount; |
2070 iMaximumPageCount = aMaximumPageCount; |
2027 |
2071 |
2028 // Reduce iMinimumPageCount? |
2072 // Reduce iMinimumPageCount? |
2029 TInt spare = iMinimumPageCount-aMinimumPageCount; |
2073 if(aMinimumPageCount < iMinimumPageCount) |
2030 if(spare>0) |
2074 { |
2031 { |
2075 iNumberOfFreePages += iMinimumPageCount - aMinimumPageCount; |
2032 iMinimumPageCount -= spare; |
2076 iMinimumPageCount = aMinimumPageCount; |
2033 iNumberOfFreePages += spare; |
|
2034 } |
2077 } |
2035 |
2078 |
2036 // Increase iMinimumPageCount? |
2079 // Increase iMinimumPageCount? |
2037 TInt r=KErrNone; |
2080 TInt r=KErrNone; |
2038 while(iMinimumPageCount<aMinimumPageCount) |
2081 while(aMinimumPageCount > iMinimumPageCount) |
2039 { |
2082 { |
2040 TUint newMin = aMinimumPageCount; |
2083 TUint newMin = MinU(aMinimumPageCount, iMinimumPageCount + iNumberOfFreePages); |
2041 TUint maxMin = iMinimumPageCount+iNumberOfFreePages; |
2084 |
2042 if(newMin>maxMin) |
2085 if (newMin == iMinimumPageCount) |
2043 newMin = maxMin; |
2086 { |
2044 |
2087 // have to add pages before we can increase minimum page count |
2045 TUint delta = newMin-iMinimumPageCount; |
2088 if(!TryGrowLiveList()) |
2046 if(delta) |
2089 { |
2047 { |
2090 r=KErrNoMemory; |
|
2091 break; |
|
2092 } |
|
2093 } |
|
2094 else |
|
2095 { |
|
2096 iNumberOfFreePages -= newMin - iMinimumPageCount; |
2048 iMinimumPageCount = newMin; |
2097 iMinimumPageCount = newMin; |
2049 iNumberOfFreePages -= delta; |
|
2050 continue; |
|
2051 } |
|
2052 |
|
2053 if(!TryGrowLiveList()) |
|
2054 { |
|
2055 r=KErrNoMemory; |
|
2056 break; |
|
2057 } |
2098 } |
2058 } |
2099 } |
2059 |
2100 |
2060 // Reduce iMaximumPageCount? |
2101 // Reduce iMaximumPageCount? |
2061 while(iMaximumPageCount>aMaximumPageCount) |
2102 while(aMaximumPageCount < iMaximumPageCount) |
2062 { |
2103 { |
2063 TUint newMax = aMaximumPageCount; |
2104 TUint newMax = MaxU(aMaximumPageCount, iMinimumPageCount + iNumberOfFreePages); |
2064 TUint minMax = iMinimumPageCount+iNumberOfFreePages; |
2105 |
2065 if(newMax<minMax) |
2106 if (newMax == iMaximumPageCount) |
2066 newMax = minMax; |
2107 { |
2067 |
2108 // have to remove pages before we can reduce maximum page count |
2068 TUint delta = iMaximumPageCount-newMax; |
2109 TryReturnOldestPageToSystem(); |
2069 if(delta) |
2110 } |
|
2111 else |
2070 { |
2112 { |
2071 iMaximumPageCount = newMax; |
2113 iMaximumPageCount = newMax; |
2072 continue; |
2114 } |
2073 } |
2115 } |
2074 |
2116 |
2075 ReturnPageToSystem(); |
2117 TRACE(("DPager::ResizeLiveList end: %d %d %d %d, %d %d %d", |
2076 } |
2118 iYoungCount,iOldCount,iOldestCleanCount,iOldestDirtyCount, |
2077 |
2119 iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount)); |
2078 TRACE(("DPager::ResizeLiveList end with young=%d old=%d min=%d free=%d max=%d",iYoungCount,iOldCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount)); |
2120 |
|
2121 __NK_ASSERT_DEBUG((iMinimumPageCount + iNumberOfFreePages) <= iMaximumPageCount); |
2079 |
2122 |
2080 #ifdef BTRACE_KERNEL_MEMORY |
2123 #ifdef BTRACE_KERNEL_MEMORY |
2081 BTrace4(BTrace::EKernelMemory,BTrace::EKernelMemoryDemandPagingCache,iMinimumPageCount << KPageShift); |
2124 BTrace4(BTrace::EKernelMemory,BTrace::EKernelMemoryDemandPagingCache,iMinimumPageCount << KPageShift); |
2082 #endif |
2125 #endif |
2083 |
2126 |
2084 MmuLock::Unlock(); |
2127 MmuLock::Unlock(); |
2085 |
2128 |
|
2129 PageCleaningLock::Unlock(); |
2086 RamAllocLock::Unlock(); |
2130 RamAllocLock::Unlock(); |
2087 NKern::ThreadLeaveCS(); |
2131 NKern::ThreadLeaveCS(); |
2088 |
2132 |
2089 return r; |
2133 return r; |
|
2134 } |
|
2135 |
|
2136 |
|
2137 TUint RequiredOldestPages(TUint aPagesToClean, TBool aCleanInSequence) |
|
2138 { |
|
2139 return aCleanInSequence ? aPagesToClean * 8 : aPagesToClean; |
|
2140 } |
|
2141 |
|
2142 |
|
2143 void DPager::SetPagesToClean(TUint aPagesToClean) |
|
2144 { |
|
2145 TRACE(("WDP: Pager will attempt to clean %d pages", aPagesToClean)); |
|
2146 __NK_ASSERT_ALWAYS(aPagesToClean > 0 && aPagesToClean <= KMaxPagesToClean); |
|
2147 MmuLock::Lock(); |
|
2148 iPagesToClean = aPagesToClean; |
|
2149 iMaxOldestPages = MaxU(KDefaultMaxOldestPages, |
|
2150 RequiredOldestPages(iPagesToClean, iCleanInSequence)); |
|
2151 MmuLock::Unlock(); |
|
2152 TRACE(("WDP: Maximum %d oldest pages", iMaxOldestPages)); |
|
2153 } |
|
2154 |
|
2155 |
|
2156 TUint DPager::PagesToClean() |
|
2157 { |
|
2158 return iPagesToClean; |
|
2159 } |
|
2160 |
|
2161 |
|
2162 void DPager::SetCleanInSequence(TBool aCleanInSequence) |
|
2163 { |
|
2164 TRACE(("WDP: Sequential page colour set to %d", aCleanInSequence)); |
|
2165 MmuLock::Lock(); |
|
2166 iCleanInSequence = aCleanInSequence; |
|
2167 iMaxOldestPages = MaxU(KDefaultMaxOldestPages, |
|
2168 RequiredOldestPages(iPagesToClean, iCleanInSequence)); |
|
2169 MmuLock::Unlock(); |
|
2170 TRACE(("WDP: Maximum %d oldest pages", iMaxOldestPages)); |
2090 } |
2171 } |
2091 |
2172 |
2092 |
2173 |
2093 // WARNING THIS METHOD MAY HOLD THE RAM ALLOC LOCK FOR EXCESSIVE PERIODS. DON'T USE THIS IN ANY PRODUCTION CODE. |
2174 // WARNING THIS METHOD MAY HOLD THE RAM ALLOC LOCK FOR EXCESSIVE PERIODS. DON'T USE THIS IN ANY PRODUCTION CODE. |
2094 void DPager::FlushAll() |
2175 void DPager::FlushAll() |
2336 ThePager.ResetBenchmarkData((TPagingBenchmark)index); |
2417 ThePager.ResetBenchmarkData((TPagingBenchmark)index); |
2337 } |
2418 } |
2338 return KErrNone; |
2419 return KErrNone; |
2339 #endif |
2420 #endif |
2340 |
2421 |
|
2422 case EVMHalGetPhysicalAccessSupported: |
|
2423 if ((K::MemModelAttributes & EMemModelAttrDataPaging) == 0) |
|
2424 return KErrNotSupported; |
|
2425 return GetPhysicalAccessSupported(); |
|
2426 |
|
2427 case EVMHalGetUsePhysicalAccess: |
|
2428 if ((K::MemModelAttributes & EMemModelAttrDataPaging) == 0) |
|
2429 return KErrNotSupported; |
|
2430 return GetUsePhysicalAccess(); |
|
2431 |
|
2432 case EVMHalSetUsePhysicalAccess: |
|
2433 if(!TheCurrentThread->HasCapability(ECapabilityWriteDeviceData,__PLATSEC_DIAGNOSTIC_STRING("Checked by VMHalFunction(EVMHalSetUsePhysicalAccess)"))) |
|
2434 K::UnlockedPlatformSecurityPanic(); |
|
2435 if ((K::MemModelAttributes & EMemModelAttrDataPaging) == 0) |
|
2436 return KErrNotSupported; |
|
2437 if ((TUint)a1 > 1) |
|
2438 return KErrArgument; |
|
2439 SetUsePhysicalAccess((TBool)a1); |
|
2440 return KErrNone; |
|
2441 |
|
2442 case EVMHalGetPreferredDataWriteSize: |
|
2443 if ((K::MemModelAttributes & EMemModelAttrDataPaging) == 0) |
|
2444 return KErrNotSupported; |
|
2445 return GetPreferredDataWriteSize(); |
|
2446 |
|
2447 case EVMHalGetDataWriteSize: |
|
2448 if ((K::MemModelAttributes & EMemModelAttrDataPaging) == 0) |
|
2449 return KErrNotSupported; |
|
2450 return __e32_find_ms1_32(ThePager.PagesToClean()); |
|
2451 |
|
2452 case EVMHalSetDataWriteSize: |
|
2453 if(!TheCurrentThread->HasCapability(ECapabilityWriteDeviceData,__PLATSEC_DIAGNOSTIC_STRING("Checked by VMHalFunction(EVMHalSetDataWriteSize)"))) |
|
2454 K::UnlockedPlatformSecurityPanic(); |
|
2455 if ((K::MemModelAttributes & EMemModelAttrDataPaging) == 0) |
|
2456 return KErrNotSupported; |
|
2457 return SetDataWriteSize((TUint)a1); |
|
2458 |
2341 default: |
2459 default: |
2342 return KErrNotSupported; |
2460 return KErrNotSupported; |
2343 } |
2461 } |
2344 } |
2462 } |
2345 |
2463 |
2388 // |
2506 // |
2389 // Paging request management... |
2507 // Paging request management... |
2390 // |
2508 // |
2391 |
2509 |
2392 // |
2510 // |
2393 // DPagingRequest |
2511 // DPagingRequestBase |
2394 // |
2512 // |
2395 |
2513 |
2396 DPagingRequest::DPagingRequest() |
2514 |
2397 : iMutex(NULL), iUseRegionCount(0) |
2515 TLinAddr DPagingRequestBase::MapPages(TUint aColour, TUint aCount, TPhysAddr* aPages) |
2398 { |
|
2399 } |
|
2400 |
|
2401 |
|
2402 void DPagingRequest::SetUseContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
|
2403 { |
|
2404 __ASSERT_SYSTEM_LOCK; |
|
2405 __NK_ASSERT_DEBUG(iUseRegionCount == 0); |
|
2406 __NK_ASSERT_DEBUG(aCount > 0 && aCount <= EMaxPages); |
|
2407 for (TUint i = 0 ; i < aCount ; ++i) |
|
2408 { |
|
2409 iUseRegionMemory[i] = aMemory; |
|
2410 iUseRegionIndex[i] = aIndex + i; |
|
2411 } |
|
2412 iUseRegionCount = aCount; |
|
2413 } |
|
2414 |
|
2415 |
|
2416 void DPagingRequest::SetUseDiscontiguous(DMemoryObject** aMemory, TUint* aIndex, TUint aCount) |
|
2417 { |
|
2418 __ASSERT_SYSTEM_LOCK; |
|
2419 __NK_ASSERT_DEBUG(iUseRegionCount == 0); |
|
2420 __NK_ASSERT_DEBUG(aCount > 0 && aCount <= EMaxPages); |
|
2421 for (TUint i = 0 ; i < aCount ; ++i) |
|
2422 { |
|
2423 iUseRegionMemory[i] = aMemory[i]; |
|
2424 iUseRegionIndex[i] = aIndex[i]; |
|
2425 } |
|
2426 iUseRegionCount = aCount; |
|
2427 } |
|
2428 |
|
2429 |
|
2430 void DPagingRequest::ResetUse() |
|
2431 { |
|
2432 __ASSERT_SYSTEM_LOCK; |
|
2433 __NK_ASSERT_DEBUG(iUseRegionCount > 0); |
|
2434 iUseRegionCount = 0; |
|
2435 } |
|
2436 |
|
2437 |
|
2438 TBool DPagingRequest::CheckUseContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
|
2439 { |
|
2440 if (iUseRegionCount != aCount) |
|
2441 return EFalse; |
|
2442 for (TUint i = 0 ; i < iUseRegionCount ; ++i) |
|
2443 { |
|
2444 if (iUseRegionMemory[i] != aMemory || iUseRegionIndex[i] != aIndex + i) |
|
2445 return EFalse; |
|
2446 } |
|
2447 return ETrue; |
|
2448 } |
|
2449 |
|
2450 |
|
2451 TBool DPagingRequest::CheckUseDiscontiguous(DMemoryObject** aMemory, TUint* aIndex, TUint aCount) |
|
2452 { |
|
2453 if (iUseRegionCount != aCount) |
|
2454 return EFalse; |
|
2455 for (TUint i = 0 ; i < iUseRegionCount ; ++i) |
|
2456 { |
|
2457 if (iUseRegionMemory[i] != aMemory[i] || iUseRegionIndex[i] != aIndex[i]) |
|
2458 return EFalse; |
|
2459 } |
|
2460 return ETrue; |
|
2461 } |
|
2462 |
|
2463 |
|
2464 TBool DPagingRequest::IsCollisionContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
|
2465 { |
|
2466 // note this could be optimised as most of the time we will be checking read/read collusions, |
|
2467 // both of which will be contiguous |
|
2468 __ASSERT_SYSTEM_LOCK; |
|
2469 for (TUint i = 0 ; i < iUseRegionCount ; ++i) |
|
2470 { |
|
2471 if (iUseRegionMemory[i] == aMemory && |
|
2472 TUint(iUseRegionIndex[i] - aIndex) < aCount) |
|
2473 return ETrue; |
|
2474 } |
|
2475 return EFalse; |
|
2476 } |
|
2477 |
|
2478 |
|
2479 TLinAddr DPagingRequest::MapPages(TUint aColour, TUint aCount, TPhysAddr* aPages) |
|
2480 { |
2516 { |
2481 __NK_ASSERT_DEBUG(iMutex->iCleanup.iThread == &Kern::CurrentThread()); |
2517 __NK_ASSERT_DEBUG(iMutex->iCleanup.iThread == &Kern::CurrentThread()); |
2482 return iTempMapping.Map(aPages,aCount,aColour); |
2518 return iTempMapping.Map(aPages,aCount,aColour); |
2483 } |
2519 } |
2484 |
2520 |
2485 |
2521 |
2486 void DPagingRequest::UnmapPages(TBool aIMBRequired) |
2522 void DPagingRequestBase::UnmapPages(TBool aIMBRequired) |
2487 { |
2523 { |
2488 __NK_ASSERT_DEBUG(iMutex->iCleanup.iThread == &Kern::CurrentThread()); |
2524 __NK_ASSERT_DEBUG(iMutex->iCleanup.iThread == &Kern::CurrentThread()); |
2489 iTempMapping.Unmap(aIMBRequired); |
2525 iTempMapping.Unmap(aIMBRequired); |
2490 } |
2526 } |
2491 |
2527 |
2492 // |
|
2493 // DPoolPagingRequest |
|
2494 // |
|
2495 |
|
2496 DPoolPagingRequest::DPoolPagingRequest(DPagingRequestPool::TGroup& aPoolGroup) : |
|
2497 iPoolGroup(aPoolGroup) |
|
2498 { |
|
2499 } |
|
2500 |
|
2501 |
|
2502 void DPoolPagingRequest::Release() |
|
2503 { |
|
2504 NKern::LockSystem(); |
|
2505 ResetUse(); |
|
2506 Signal(); |
|
2507 } |
|
2508 |
|
2509 |
|
2510 void DPoolPagingRequest::Wait() |
|
2511 { |
|
2512 __ASSERT_SYSTEM_LOCK; |
|
2513 ++iUsageCount; |
|
2514 TInt r = iMutex->Wait(); |
|
2515 __NK_ASSERT_ALWAYS(r == KErrNone); |
|
2516 } |
|
2517 |
|
2518 |
|
2519 void DPoolPagingRequest::Signal() |
|
2520 { |
|
2521 __ASSERT_SYSTEM_LOCK; |
|
2522 iPoolGroup.Signal(this); |
|
2523 } |
|
2524 |
2528 |
2525 // |
2529 // |
2526 // DPageReadRequest |
2530 // DPageReadRequest |
2527 // |
2531 // |
2528 |
2532 |
|
2533 |
2529 TInt DPageReadRequest::iAllocNext = 0; |
2534 TInt DPageReadRequest::iAllocNext = 0; |
2530 |
2535 |
|
2536 |
|
2537 TUint DPageReadRequest::ReservedPagesRequired() |
|
2538 { |
|
2539 return iAllocNext*EMaxPages; |
|
2540 } |
|
2541 |
|
2542 |
2531 DPageReadRequest::DPageReadRequest(DPagingRequestPool::TGroup& aPoolGroup) : |
2543 DPageReadRequest::DPageReadRequest(DPagingRequestPool::TGroup& aPoolGroup) : |
2532 DPoolPagingRequest(aPoolGroup) |
2544 iPoolGroup(aPoolGroup) |
2533 { |
2545 { |
2534 // allocate space for mapping pages whilst they're being loaded... |
|
2535 iTempMapping.Alloc(EMaxPages); |
2546 iTempMapping.Alloc(EMaxPages); |
2536 } |
2547 } |
|
2548 |
2537 |
2549 |
2538 TInt DPageReadRequest::Construct() |
2550 TInt DPageReadRequest::Construct() |
2539 { |
2551 { |
2540 // allocate id and mutex... |
2552 // allocate id and mutex... |
2541 TUint id = (TUint)__e32_atomic_add_ord32(&iAllocNext, 1); |
2553 TUint id = (TUint)__e32_atomic_add_ord32(&iAllocNext, 1); |
2566 |
2578 |
2567 return r; |
2579 return r; |
2568 } |
2580 } |
2569 |
2581 |
2570 |
2582 |
|
2583 void DPageReadRequest::Release() |
|
2584 { |
|
2585 NKern::LockSystem(); |
|
2586 ResetUse(); |
|
2587 Signal(); |
|
2588 } |
|
2589 |
|
2590 |
|
2591 void DPageReadRequest::Wait() |
|
2592 { |
|
2593 __ASSERT_SYSTEM_LOCK; |
|
2594 ++iUsageCount; |
|
2595 TInt r = iMutex->Wait(); |
|
2596 __NK_ASSERT_ALWAYS(r == KErrNone); |
|
2597 } |
|
2598 |
|
2599 |
|
2600 void DPageReadRequest::Signal() |
|
2601 { |
|
2602 __ASSERT_SYSTEM_LOCK; |
|
2603 __NK_ASSERT_DEBUG(iUsageCount > 0); |
|
2604 if (--iUsageCount == 0) |
|
2605 iPoolGroup.iFreeList.AddHead(&iLink); |
|
2606 iMutex->Signal(); |
|
2607 } |
|
2608 |
|
2609 |
|
2610 void DPageReadRequest::SetUseContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
|
2611 { |
|
2612 __ASSERT_SYSTEM_LOCK; |
|
2613 __NK_ASSERT_DEBUG(aMemory != NULL && aCount <= EMaxPages); |
|
2614 __NK_ASSERT_DEBUG(iMemory == NULL); |
|
2615 iMemory = aMemory; |
|
2616 iIndex = aIndex; |
|
2617 iCount = aCount; |
|
2618 } |
|
2619 |
|
2620 |
|
2621 void DPageReadRequest::ResetUse() |
|
2622 { |
|
2623 __ASSERT_SYSTEM_LOCK; |
|
2624 __NK_ASSERT_DEBUG(iMemory != NULL); |
|
2625 iMemory = NULL; |
|
2626 } |
|
2627 |
|
2628 |
|
2629 TBool DPageReadRequest::IsCollisionContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
|
2630 { |
|
2631 __ASSERT_SYSTEM_LOCK; |
|
2632 return iMemory == aMemory && aIndex < iIndex + iCount && aIndex + aCount > iIndex; |
|
2633 } |
|
2634 |
|
2635 |
|
2636 TBool DPageReadRequest::CheckUseContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
|
2637 { |
|
2638 return iMemory == aMemory && iIndex == aIndex && iCount == aCount; |
|
2639 } |
|
2640 |
|
2641 |
2571 // |
2642 // |
2572 // DPageWriteRequest |
2643 // DPageWriteRequest |
2573 // |
2644 // |
2574 |
2645 |
2575 |
2646 |
2576 DPageWriteRequest::DPageWriteRequest() |
2647 DPageWriteRequest::DPageWriteRequest() |
2577 { |
2648 { |
2578 iMutex = ThePageCleaningLock; |
2649 iMutex = ThePageCleaningLock; |
2579 // allocate space for mapping pages whilst they're being loaded... |
2650 iTempMapping.Alloc(EMaxPages); |
2580 iTempMapping.Alloc(KMaxPagesToClean); |
|
2581 } |
2651 } |
2582 |
2652 |
2583 |
2653 |
2584 void DPageWriteRequest::Release() |
2654 void DPageWriteRequest::Release() |
2585 { |
2655 { |
2586 NKern::LockSystem(); |
2656 NKern::LockSystem(); |
2587 ResetUse(); |
2657 ResetUse(); |
2588 NKern::UnlockSystem(); |
2658 NKern::UnlockSystem(); |
2589 } |
2659 } |
2590 |
2660 |
|
2661 |
|
2662 void DPageWriteRequest::SetUseDiscontiguous(DMemoryObject** aMemory, TUint* aIndex, TUint aCount) |
|
2663 { |
|
2664 __ASSERT_SYSTEM_LOCK; |
|
2665 __NK_ASSERT_DEBUG(iUseRegionCount == 0); |
|
2666 __NK_ASSERT_DEBUG(aCount > 0 && aCount <= EMaxPages); |
|
2667 for (TUint i = 0 ; i < aCount ; ++i) |
|
2668 { |
|
2669 iUseRegionMemory[i] = aMemory[i]; |
|
2670 iUseRegionIndex[i] = aIndex[i]; |
|
2671 } |
|
2672 iUseRegionCount = aCount; |
|
2673 } |
|
2674 |
|
2675 |
|
2676 void DPageWriteRequest::ResetUse() |
|
2677 { |
|
2678 __ASSERT_SYSTEM_LOCK; |
|
2679 __NK_ASSERT_DEBUG(iUseRegionCount > 0); |
|
2680 iUseRegionCount = 0; |
|
2681 } |
|
2682 |
|
2683 |
|
2684 TBool DPageWriteRequest::CheckUseContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
|
2685 { |
|
2686 if (iUseRegionCount != aCount) |
|
2687 return EFalse; |
|
2688 for (TUint i = 0 ; i < iUseRegionCount ; ++i) |
|
2689 { |
|
2690 if (iUseRegionMemory[i] != aMemory || iUseRegionIndex[i] != aIndex + i) |
|
2691 return EFalse; |
|
2692 } |
|
2693 return ETrue; |
|
2694 } |
|
2695 |
|
2696 |
|
2697 TBool DPageWriteRequest::IsCollisionContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
|
2698 { |
|
2699 // note this could be optimised as most of the time we will be checking read/read collusions, |
|
2700 // both of which will be contiguous |
|
2701 __ASSERT_SYSTEM_LOCK; |
|
2702 for (TUint i = 0 ; i < iUseRegionCount ; ++i) |
|
2703 { |
|
2704 if (iUseRegionMemory[i] == aMemory && |
|
2705 TUint(iUseRegionIndex[i] - aIndex) < aCount) |
|
2706 return ETrue; |
|
2707 } |
|
2708 return EFalse; |
|
2709 } |
2591 |
2710 |
2592 // |
2711 // |
2593 // DPagingRequestPool |
2712 // DPagingRequestPool |
2594 // |
2713 // |
2595 |
2714 |
2684 |
2803 |
2685 |
2804 |
2686 DPagingRequestPool::TGroup::TGroup(TUint aNumRequests) |
2805 DPagingRequestPool::TGroup::TGroup(TUint aNumRequests) |
2687 { |
2806 { |
2688 iNumRequests = aNumRequests; |
2807 iNumRequests = aNumRequests; |
2689 iRequests = new DPoolPagingRequest*[aNumRequests]; |
2808 iRequests = new DPageReadRequest*[aNumRequests]; |
2690 __NK_ASSERT_ALWAYS(iRequests); |
2809 __NK_ASSERT_ALWAYS(iRequests); |
2691 } |
2810 } |
2692 |
2811 |
2693 |
2812 |
2694 DPoolPagingRequest* DPagingRequestPool::TGroup::FindCollisionContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
2813 DPageReadRequest* DPagingRequestPool::TGroup::FindCollisionContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
2695 { |
2814 { |
2696 __ASSERT_SYSTEM_LOCK; |
2815 __ASSERT_SYSTEM_LOCK; |
2697 DPoolPagingRequest** ptr = iRequests; |
2816 DPageReadRequest** ptr = iRequests; |
2698 DPoolPagingRequest** ptrEnd = ptr+iNumRequests; |
2817 DPageReadRequest** ptrEnd = ptr+iNumRequests; |
2699 while(ptr<ptrEnd) |
2818 while(ptr<ptrEnd) |
2700 { |
2819 { |
2701 DPoolPagingRequest* req = *ptr++; |
2820 DPageReadRequest* req = *ptr++; |
2702 if(req->IsCollisionContiguous(aMemory,aIndex,aCount)) |
2821 if(req->IsCollisionContiguous(aMemory,aIndex,aCount)) |
2703 return req; |
2822 return req; |
2704 } |
2823 } |
2705 return 0; |
2824 return 0; |
2706 } |
2825 } |
2707 |
2826 |
2708 |
2827 |
2709 static TUint32 RandomSeed = 33333; |
2828 static TUint32 RandomSeed = 33333; |
2710 |
2829 |
2711 DPoolPagingRequest* DPagingRequestPool::TGroup::GetRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
2830 DPageReadRequest* DPagingRequestPool::TGroup::GetRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
2712 { |
2831 { |
2713 __NK_ASSERT_DEBUG(iNumRequests > 0); |
2832 __NK_ASSERT_DEBUG(iNumRequests > 0); |
2714 |
2833 |
2715 // try using an existing request which collides with this region... |
2834 // try using an existing request which collides with this region... |
2716 DPoolPagingRequest* req = FindCollisionContiguous(aMemory,aIndex,aCount); |
2835 DPageReadRequest* req = FindCollisionContiguous(aMemory,aIndex,aCount); |
2717 if(!req) |
2836 if(!req) |
2718 { |
2837 { |
2719 // use a free request... |
2838 // use a free request... |
2720 req = (DPoolPagingRequest*)iFreeList.GetFirst(); |
2839 SDblQueLink* first = iFreeList.GetFirst(); |
2721 if(req) |
2840 if(first) |
2722 { |
2841 { |
2723 // free requests aren't being used... |
2842 // free requests aren't being used... |
2724 __NK_ASSERT_DEBUG(req->iUsageCount == 0); |
2843 req = _LOFF(first, DPageReadRequest, iLink); |
|
2844 __NK_ASSERT_DEBUG(req->ThreadsWaiting() == 0); |
2725 } |
2845 } |
2726 else |
2846 else |
2727 { |
2847 { |
2728 // pick a random request... |
2848 // pick a random request... |
2729 RandomSeed = RandomSeed*69069+1; // next 'random' number |
2849 RandomSeed = RandomSeed*69069+1; // next 'random' number |
2730 TUint index = (TUint64(RandomSeed) * TUint64(iNumRequests)) >> 32; |
2850 TUint index = (TUint64(RandomSeed) * TUint64(iNumRequests)) >> 32; |
2731 req = iRequests[index]; |
2851 req = iRequests[index]; |
2732 __NK_ASSERT_DEBUG(req->iUsageCount > 0); // we only pick random when none are free |
2852 __NK_ASSERT_DEBUG(req->ThreadsWaiting() > 0); // we only pick random when none are free |
2733 } |
2853 } |
2734 } |
2854 } |
2735 |
2855 |
2736 // wait for chosen request object... |
2856 // wait for chosen request object... |
2737 req->Wait(); |
2857 req->Wait(); |
2738 |
2858 |
2739 return req; |
2859 return req; |
2740 } |
|
2741 |
|
2742 |
|
2743 void DPagingRequestPool::TGroup::Signal(DPoolPagingRequest* aRequest) |
|
2744 { |
|
2745 // if there are no threads waiting on the mutex then return it to the free pool... |
|
2746 __NK_ASSERT_DEBUG(aRequest->iUsageCount > 0); |
|
2747 if (--aRequest->iUsageCount==0) |
|
2748 iFreeList.AddHead(aRequest); |
|
2749 |
|
2750 aRequest->iMutex->Signal(); |
|
2751 } |
2860 } |
2752 |
2861 |
2753 |
2862 |
2754 /** |
2863 /** |
2755 Register the specified paging device with the kernel. |
2864 Register the specified paging device with the kernel. |