25 #include "mmanager.h" |
25 #include "mmanager.h" |
26 #include "mptalloc.h" |
26 #include "mptalloc.h" |
27 #include "mpagearray.h" |
27 #include "mpagearray.h" |
28 #include "mswap.h" |
28 #include "mswap.h" |
29 #include "mthrash.h" |
29 #include "mthrash.h" |
|
30 #include "mpagecleaner.h" |
|
31 |
30 #include "cache_maintenance.inl" |
32 #include "cache_maintenance.inl" |
31 |
33 |
32 |
34 |
33 const TUint16 KDefaultYoungOldRatio = 3; |
35 const TUint16 KDefaultYoungOldRatio = 3; |
34 const TUint16 KDefaultMinPages = 256; |
36 const TUint16 KDefaultMinPages = 256; |
35 #ifdef _USE_OLDEST_LISTS |
|
36 const TUint16 KDefaultOldOldestRatio = 3; |
37 const TUint16 KDefaultOldOldestRatio = 3; |
37 #endif |
|
38 |
38 |
39 const TUint KMinOldPages = 1; |
39 const TUint KMinOldPages = 1; |
40 |
40 |
41 /* On a 32 bit system without PAE can't have more than 2^(32-KPageShift) pages. |
41 /* On a 32 bit system without PAE can't have more than 2^(32-KPageShift) pages. |
42 * Subtract 1 so it doesn't overflow when converted to bytes. |
42 * Subtract 1 so it doesn't overflow when converted to bytes. |
43 */ |
43 */ |
44 const TUint KAbsoluteMaxPageCount = (1u<<(32-KPageShift))-1u; |
44 const TUint KAbsoluteMaxPageCount = (1u<<(32-KPageShift))-1u; |
45 |
45 |
46 |
46 /* |
|
47 Limit the maximum number of oldest pages to bound the time taken by SelectPagesToClean(), which is |
|
48 called with the MmuLock held. |
|
49 */ |
|
50 const TUint KMaxOldestPages = 32; |
|
51 |
|
52 static DMutex* ThePageCleaningLock = NULL; |
47 |
53 |
48 DPager ThePager; |
54 DPager ThePager; |
49 |
55 |
50 |
56 |
51 DPager::DPager() |
57 DPager::DPager() |
52 : iMinimumPageCount(0), iMaximumPageCount(0), iYoungOldRatio(0), |
58 : iMinimumPageCount(0), iMaximumPageCount(0), iYoungOldRatio(0), |
53 iYoungCount(0),iOldCount(0),iNumberOfFreePages(0) |
59 iYoungCount(0), iOldCount(0), iOldestCleanCount(0), |
54 { |
60 iNumberOfFreePages(0), iReservePageCount(0), iMinimumPageLimit(0) |
55 } |
61 #ifdef __DEMAND_PAGING_BENCHMARKS__ |
56 |
62 , iBenchmarkLock(TSpinLock::EOrderGenericIrqHigh3) |
57 |
63 #endif |
58 void DPager::Init2() |
64 { |
59 { |
65 } |
60 TRACEB(("DPager::Init2()")); |
66 |
|
67 |
|
68 void DPager::InitCache() |
|
69 { |
|
70 // |
|
71 // This routine doesn't acquire any mutexes because it should be called before the system |
|
72 // is fully up and running. I.e. called before another thread can preempt this. |
|
73 // |
|
74 TRACEB(("DPager::InitCache()")); |
|
75 // If any pages have been reserved then they will have already been allocated and |
|
76 // therefore should be counted as part of iMinimumPageCount. |
|
77 __NK_ASSERT_DEBUG(iReservePageCount == iMinimumPageCount); |
|
78 __NK_ASSERT_DEBUG(!CacheInitialised()); |
61 |
79 |
62 #if defined(__CPU_ARM) |
80 #if defined(__CPU_ARM) |
63 |
81 |
64 /** Minimum number of young pages the demand paging live list may have. |
82 /** Minimum number of young pages the demand paging live list may have. |
65 Need at least 4 mapped pages to guarantee to be able to execute all ARM instructions, |
83 Need at least 4 mapped pages to guarantee to be able to execute all ARM instructions, |
88 #error Unknown CPU |
106 #error Unknown CPU |
89 #endif |
107 #endif |
90 |
108 |
91 #ifdef __SMP__ |
109 #ifdef __SMP__ |
92 // Adjust min page count so that all CPUs are guaranteed to make progress. |
110 // Adjust min page count so that all CPUs are guaranteed to make progress. |
93 // NOTE: Can't use NKern::NumberOfCpus here because we haven't finished booting yet and will |
111 TInt numberOfCpus = NKern::NumberOfCpus(); |
94 // always have only one CPU running at this point... |
112 iMinYoungPages *= numberOfCpus; |
95 |
|
96 // TODO: Before we can enable this the base test configuration needs |
|
97 // updating to have a sufficient minimum page size... |
|
98 // |
|
99 // iMinYoungPages *= KMaxCpus; |
|
100 #endif |
113 #endif |
101 |
114 |
102 // A minimum young/old ratio of 1 means that we need at least twice iMinYoungPages pages... |
115 // A minimum young/old ratio of 1 means that we need at least twice iMinYoungPages pages... |
103 iAbsoluteMinPageCount = 2*iMinYoungPages; |
116 iAbsoluteMinPageCount = 2*iMinYoungPages; |
104 |
117 |
105 __NK_ASSERT_DEBUG(KMinOldPages<=iAbsoluteMinPageCount/2); |
118 __NK_ASSERT_DEBUG(KMinOldPages<=iAbsoluteMinPageCount/2); |
106 |
119 |
107 // initialise live list... |
120 // Read any paging config data. |
108 TUint minimumPageCount = 0; |
|
109 TUint maximumPageCount = 0; |
|
110 |
|
111 SDemandPagingConfig config = TheRomHeader().iDemandPagingConfig; |
121 SDemandPagingConfig config = TheRomHeader().iDemandPagingConfig; |
112 |
122 |
113 iMinimumPageCount = KDefaultMinPages; |
123 // Set the list ratios... |
114 if(minimumPageCount) |
124 iYoungOldRatio = KDefaultYoungOldRatio; |
115 iMinimumPageCount = minimumPageCount; |
125 if(config.iYoungOldRatio) |
|
126 iYoungOldRatio = config.iYoungOldRatio; |
|
127 iOldOldestRatio = KDefaultOldOldestRatio; |
|
128 if(config.iSpare[2]) |
|
129 iOldOldestRatio = config.iSpare[2]; |
|
130 |
|
131 // Set the minimum page counts... |
|
132 iMinimumPageLimit = iMinYoungPages * (1 + iYoungOldRatio) / iYoungOldRatio |
|
133 + DPageReadRequest::ReservedPagesRequired(); |
|
134 |
|
135 if(iMinimumPageLimit < iAbsoluteMinPageCount) |
|
136 iMinimumPageLimit = iAbsoluteMinPageCount; |
|
137 |
|
138 if (K::MemModelAttributes & (EMemModelAttrRomPaging | EMemModelAttrCodePaging | EMemModelAttrDataPaging)) |
|
139 iMinimumPageCount = KDefaultMinPages; |
|
140 else |
|
141 {// No paging is enabled so set the minimum cache size to the minimum |
|
142 // allowable with the current young old ratio. |
|
143 iMinimumPageCount = iMinYoungPages * (iYoungOldRatio + 1); |
|
144 } |
|
145 |
116 if(config.iMinPages) |
146 if(config.iMinPages) |
117 iMinimumPageCount = config.iMinPages; |
147 iMinimumPageCount = config.iMinPages; |
118 if(iMinimumPageCount<iAbsoluteMinPageCount) |
148 if(iMinimumPageCount < iAbsoluteMinPageCount) |
119 iMinimumPageCount = iAbsoluteMinPageCount; |
149 iMinimumPageCount = iAbsoluteMinPageCount; |
|
150 if (iMinimumPageLimit + iReservePageCount > iMinimumPageCount) |
|
151 iMinimumPageCount = iMinimumPageLimit + iReservePageCount; |
|
152 |
120 iInitMinimumPageCount = iMinimumPageCount; |
153 iInitMinimumPageCount = iMinimumPageCount; |
121 |
154 |
|
155 // Set the maximum page counts... |
122 iMaximumPageCount = KMaxTInt; |
156 iMaximumPageCount = KMaxTInt; |
123 if(maximumPageCount) |
|
124 iMaximumPageCount = maximumPageCount; |
|
125 if(config.iMaxPages) |
157 if(config.iMaxPages) |
126 iMaximumPageCount = config.iMaxPages; |
158 iMaximumPageCount = config.iMaxPages; |
127 if (iMaximumPageCount > KAbsoluteMaxPageCount) |
159 if (iMaximumPageCount > KAbsoluteMaxPageCount) |
128 iMaximumPageCount = KAbsoluteMaxPageCount; |
160 iMaximumPageCount = KAbsoluteMaxPageCount; |
129 iInitMaximumPageCount = iMaximumPageCount; |
161 iInitMaximumPageCount = iMaximumPageCount; |
130 |
162 |
131 iYoungOldRatio = KDefaultYoungOldRatio; |
163 TRACEB(("DPager::InitCache() live list min=%d max=%d ratio=%d",iMinimumPageCount,iMaximumPageCount,iYoungOldRatio)); |
132 if(config.iYoungOldRatio) |
164 |
133 iYoungOldRatio = config.iYoungOldRatio; |
165 // Verify the page counts are valid. |
|
166 __NK_ASSERT_ALWAYS(iMaximumPageCount >= iMinimumPageCount); |
|
167 TUint minOldAndOldest = iMinimumPageCount / (1 + iYoungOldRatio); |
|
168 __NK_ASSERT_ALWAYS(minOldAndOldest >= KMinOldPages); |
|
169 __NK_ASSERT_ALWAYS(iMinimumPageCount >= minOldAndOldest); |
|
170 |
|
171 // Need at least iMinYoungPages pages mapped to execute worst case CPU instruction |
|
172 TUint minYoung = iMinimumPageCount - minOldAndOldest; |
|
173 __NK_ASSERT_ALWAYS(minYoung >= iMinYoungPages); |
|
174 |
|
175 // Verify that the young old ratio can be met even when there is only the |
|
176 // minimum number of old pages. |
134 TInt ratioLimit = (iMinimumPageCount-KMinOldPages)/KMinOldPages; |
177 TInt ratioLimit = (iMinimumPageCount-KMinOldPages)/KMinOldPages; |
135 if(iYoungOldRatio>ratioLimit) |
178 __NK_ASSERT_ALWAYS(iYoungOldRatio <= ratioLimit); |
136 iYoungOldRatio = ratioLimit; |
179 |
137 |
|
138 #ifdef _USE_OLDEST_LISTS |
|
139 iOldOldestRatio = KDefaultOldOldestRatio; |
|
140 if(config.iSpare[2]) |
|
141 iOldOldestRatio = config.iSpare[2]; |
|
142 #endif |
|
143 |
|
144 iMinimumPageLimit = (iMinYoungPages * (1 + iYoungOldRatio)) / iYoungOldRatio; |
|
145 if(iMinimumPageLimit<iAbsoluteMinPageCount) |
|
146 iMinimumPageLimit = iAbsoluteMinPageCount; |
|
147 |
|
148 TRACEB(("DPager::Init2() live list min=%d max=%d ratio=%d",iMinimumPageCount,iMaximumPageCount,iYoungOldRatio)); |
|
149 |
|
150 if(iMaximumPageCount<iMinimumPageCount) |
|
151 __NK_ASSERT_ALWAYS(0); |
|
152 |
|
153 // |
|
154 // This routine doesn't acquire any mutexes because it should be called before the system |
|
155 // is fully up and running. I.e. called before another thread can preempt this. |
|
156 // |
|
157 |
|
158 // Calculate page counts |
|
159 TUint minOldAndOldest = iMinimumPageCount / (1 + iYoungOldRatio); |
|
160 if(minOldAndOldest < KMinOldPages) |
|
161 __NK_ASSERT_ALWAYS(0); |
|
162 if (iMinimumPageCount < minOldAndOldest) |
|
163 __NK_ASSERT_ALWAYS(0); |
|
164 TUint minYoung = iMinimumPageCount - minOldAndOldest; |
|
165 if(minYoung < iMinYoungPages) |
|
166 __NK_ASSERT_ALWAYS(0); // Need at least iMinYoungPages pages mapped to execute worst case CPU instruction |
|
167 #ifdef _USE_OLDEST_LISTS |
|
168 // There should always be enough old pages to allow the oldest lists ratio. |
180 // There should always be enough old pages to allow the oldest lists ratio. |
169 TUint oldestCount = minOldAndOldest / (1 + iOldOldestRatio); |
181 TUint oldestCount = minOldAndOldest / (1 + iOldOldestRatio); |
170 if (!oldestCount) |
182 __NK_ASSERT_ALWAYS(oldestCount); |
171 __NK_ASSERT_ALWAYS(0); |
183 |
172 #endif |
|
173 iNumberOfFreePages = 0; |
184 iNumberOfFreePages = 0; |
174 iNumberOfDirtyPages = 0; |
185 iNumberOfDirtyPages = 0; |
175 |
186 |
176 // Allocate RAM pages and put them all on the old list |
187 // Allocate RAM pages and put them all on the old list. |
|
188 // Reserved pages have already been allocated and already placed on the |
|
189 // old list so don't allocate them again. |
177 RamAllocLock::Lock(); |
190 RamAllocLock::Lock(); |
178 iYoungCount = 0; |
191 iYoungCount = 0; |
179 iOldCount = 0; |
192 iOldCount = 0; |
180 #ifdef _USE_OLDEST_LISTS |
|
181 iOldestCleanCount = 0; |
|
182 iOldestDirtyCount = 0; |
193 iOldestDirtyCount = 0; |
183 #endif |
194 __NK_ASSERT_DEBUG(iOldestCleanCount == iReservePageCount); |
184 Mmu& m = TheMmu; |
195 Mmu& m = TheMmu; |
185 for(TUint i=0; i<iMinimumPageCount; i++) |
196 for(TUint i = iReservePageCount; i < iMinimumPageCount; i++) |
186 { |
197 { |
187 // Allocate a single page |
198 // Allocate a single page |
188 TPhysAddr pagePhys; |
199 TPhysAddr pagePhys; |
189 TInt r = m.AllocRam(&pagePhys, 1, |
200 TInt r = m.AllocRam(&pagePhys, 1, |
190 (Mmu::TRamAllocFlags)(EMemAttNormalCached|Mmu::EAllocNoWipe|Mmu::EAllocNoPagerReclaim), |
201 (Mmu::TRamAllocFlags)(EMemAttNormalCached|Mmu::EAllocNoWipe|Mmu::EAllocNoPagerReclaim), |
480 break; |
475 break; |
481 } |
476 } |
482 } |
477 } |
483 |
478 |
484 |
479 |
485 SPageInfo* DPager::StealOldestPage() |
480 TInt DPager::TryStealOldestPage(SPageInfo*& aPageInfoOut) |
486 { |
481 { |
487 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
482 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
488 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
483 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
489 |
484 |
|
485 // find oldest page in list... |
|
486 SDblQueLink* link; |
|
487 if (iOldestCleanCount) |
|
488 { |
|
489 __NK_ASSERT_DEBUG(!iOldestCleanList.IsEmpty()); |
|
490 link = iOldestCleanList.Last(); |
|
491 } |
|
492 else if (iOldestDirtyCount) |
|
493 { |
|
494 __NK_ASSERT_DEBUG(!iOldestDirtyList.IsEmpty()); |
|
495 link = iOldestDirtyList.Last(); |
|
496 } |
|
497 else if (iOldCount) |
|
498 { |
|
499 __NK_ASSERT_DEBUG(!iOldList.IsEmpty()); |
|
500 link = iOldList.Last(); |
|
501 } |
|
502 else |
|
503 { |
|
504 __NK_ASSERT_DEBUG(iYoungCount); |
|
505 __NK_ASSERT_ALWAYS(!iYoungList.IsEmpty()); |
|
506 link = iYoungList.Last(); |
|
507 } |
|
508 SPageInfo* pageInfo = SPageInfo::FromLink(link); |
|
509 |
|
510 if (pageInfo->IsDirty() && !PageCleaningLock::IsHeld()) |
|
511 return 1; |
|
512 |
|
513 // try to steal it from owning object... |
|
514 TInt r = StealPage(pageInfo); |
|
515 if (r == KErrNone) |
|
516 { |
|
517 BalanceAges(); |
|
518 aPageInfoOut = pageInfo; |
|
519 } |
|
520 |
|
521 return r; |
|
522 } |
|
523 |
|
524 |
|
525 SPageInfo* DPager::StealOldestPage() |
|
526 { |
|
527 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
528 TBool pageCleaningLockHeld = EFalse; |
490 for(;;) |
529 for(;;) |
491 { |
530 { |
492 // find oldest page in list... |
531 SPageInfo* pageInfo = NULL; |
493 SDblQueLink* link; |
532 TInt r = TryStealOldestPage(pageInfo); |
494 #ifdef _USE_OLDEST_LISTS |
533 |
495 if (iOldestCleanCount) |
534 if (r == KErrNone) |
496 { |
535 { |
497 __NK_ASSERT_DEBUG(!iOldestCleanList.IsEmpty()); |
536 if (pageCleaningLockHeld) |
498 link = iOldestCleanList.Last(); |
537 { |
499 } |
538 MmuLock::Unlock(); |
500 else if (iOldestDirtyCount) |
539 PageCleaningLock::Unlock(); |
501 { |
540 MmuLock::Lock(); |
502 __NK_ASSERT_DEBUG(!iOldestDirtyList.IsEmpty()); |
541 } |
503 link = iOldestDirtyList.Last(); |
542 return pageInfo; |
504 } |
543 } |
505 else if (iOldCount) |
544 else if (r == 1) |
|
545 { |
|
546 __NK_ASSERT_ALWAYS(!pageCleaningLockHeld); |
|
547 MmuLock::Unlock(); |
|
548 PageCleaningLock::Lock(); |
|
549 MmuLock::Lock(); |
|
550 pageCleaningLockHeld = ETrue; |
|
551 } |
|
552 // else retry... |
|
553 } |
|
554 } |
|
555 |
|
556 #ifdef __CPU_CACHE_HAS_COLOUR |
|
557 |
|
558 template <class T, TInt maxObjects> class TSequentialColourSelector |
|
559 { |
|
560 public: |
|
561 static const TInt KMaxLength = maxObjects; |
|
562 static const TInt KArrayLength = _ALIGN_UP(KMaxLength, KPageColourCount); |
|
563 |
|
564 FORCE_INLINE TSequentialColourSelector() |
|
565 { |
|
566 memclr(this, sizeof(*this)); |
|
567 } |
|
568 |
|
569 FORCE_INLINE TBool FoundLongestSequence() |
|
570 { |
|
571 return iLongestLength >= KMaxLength; |
|
572 } |
|
573 |
|
574 FORCE_INLINE void AddCandidate(T* aObject, TInt aColour) |
|
575 { |
|
576 // allocate objects to slots based on colour |
|
577 for (TInt i = aColour ; i < KArrayLength ; i += KPageColourCount) |
|
578 { |
|
579 if (!iSlot[i]) |
|
580 { |
|
581 iSlot[i] = aObject; |
|
582 iSeqLength[i] = i == 0 ? 1 : iSeqLength[i - 1] + 1; |
|
583 TInt j = i + 1; |
|
584 while(j < KArrayLength && iSeqLength[j]) |
|
585 iSeqLength[j++] += iSeqLength[i]; |
|
586 TInt currentLength = iSeqLength[j - 1]; |
|
587 if (currentLength > iLongestLength) |
|
588 { |
|
589 iLongestLength = currentLength; |
|
590 iLongestStart = j - currentLength; |
|
591 } |
|
592 break; |
|
593 } |
|
594 } |
|
595 } |
|
596 |
|
597 FORCE_INLINE TInt FindLongestRun(T** aObjectsOut) |
|
598 { |
|
599 if (iLongestLength == 0) |
|
600 return 0; |
|
601 |
|
602 if (iLongestLength < KMaxLength && iSlot[0] && iSlot[KArrayLength - 1]) |
|
603 { |
|
604 // check possibility of wrapping |
|
605 |
|
606 TInt i = 1; |
|
607 while (iSlot[i]) ++i; // find first hole |
|
608 TInt wrappedLength = iSeqLength[KArrayLength - 1] + iSeqLength[i - 1]; |
|
609 if (wrappedLength > iLongestLength) |
|
610 { |
|
611 iLongestLength = wrappedLength; |
|
612 iLongestStart = KArrayLength - iSeqLength[KArrayLength - 1]; |
|
613 } |
|
614 } |
|
615 |
|
616 iLongestLength = Min(iLongestLength, KMaxLength); |
|
617 |
|
618 __NK_ASSERT_DEBUG(iLongestStart >= 0 && iLongestStart < KArrayLength); |
|
619 __NK_ASSERT_DEBUG(iLongestStart + iLongestLength < 2 * KArrayLength); |
|
620 |
|
621 TInt len = Min(iLongestLength, KArrayLength - iLongestStart); |
|
622 wordmove(aObjectsOut, &iSlot[iLongestStart], len * sizeof(T*)); |
|
623 wordmove(aObjectsOut + len, &iSlot[0], (iLongestLength - len) * sizeof(T*)); |
|
624 |
|
625 return iLongestLength; |
|
626 } |
|
627 |
|
628 private: |
|
629 T* iSlot[KArrayLength]; |
|
630 TInt8 iSeqLength[KArrayLength]; |
|
631 TInt iLongestStart; |
|
632 TInt iLongestLength; |
|
633 }; |
|
634 |
|
635 TInt DPager::SelectPagesToClean(SPageInfo** aPageInfosOut) |
|
636 { |
|
637 // select up to KMaxPagesToClean oldest dirty pages with sequential page colours |
|
638 |
|
639 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
640 |
|
641 TSequentialColourSelector<SPageInfo, KMaxPagesToClean> selector; |
|
642 |
|
643 SDblQueLink* link = iOldestDirtyList.Last(); |
|
644 while (link != &iOldestDirtyList.iA) |
|
645 { |
|
646 SPageInfo* pi = SPageInfo::FromLink(link); |
|
647 if (!pi->IsWritable()) |
|
648 { |
|
649 // the page may be in the process of being restricted, stolen or decommitted, but don't |
|
650 // check for this as it will occur infrequently and will be detected by CheckModified |
|
651 // anyway |
|
652 TInt colour = pi->Index() & KPageColourMask; |
|
653 selector.AddCandidate(pi, colour); |
|
654 if (selector.FoundLongestSequence()) |
|
655 break; |
|
656 } |
|
657 link = link->iPrev; |
|
658 } |
|
659 |
|
660 return selector.FindLongestRun(aPageInfosOut); |
|
661 } |
|
662 |
506 #else |
663 #else |
507 if (iOldCount) |
664 |
|
665 TInt DPager::SelectPagesToClean(SPageInfo** aPageInfosOut) |
|
666 { |
|
667 // no page colouring restrictions, so just take up to KMaxPagesToClean oldest dirty pages |
|
668 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
669 TInt pageCount = 0; |
|
670 SDblQueLink* link = iOldestDirtyList.Last(); |
|
671 while (link != &iOldestDirtyList.iA && pageCount < KMaxPagesToClean) |
|
672 { |
|
673 SPageInfo* pi = SPageInfo::FromLink(link); |
|
674 if (!pi->IsWritable()) |
|
675 { |
|
676 // the page may be in the process of being restricted, stolen or decommitted, but don't |
|
677 // check for this as it will occur infrequently and will be detected by CheckModified |
|
678 // anyway |
|
679 aPageInfosOut[pageCount++] = pi; |
|
680 } |
|
681 link = link->iPrev; |
|
682 } |
|
683 return pageCount; |
|
684 } |
|
685 |
508 #endif |
686 #endif |
509 { |
687 |
510 __NK_ASSERT_DEBUG(!iOldList.IsEmpty()); |
688 |
511 link = iOldList.Last(); |
689 TInt DPager::CleanSomePages(TBool aBackground) |
512 } |
690 { |
513 else |
691 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
514 { |
692 __NK_ASSERT_DEBUG(PageCleaningLock::IsHeld()); |
515 __NK_ASSERT_DEBUG(iYoungCount); |
693 // ram alloc lock may or may not be held |
516 __NK_ASSERT_ALWAYS(!iYoungList.IsEmpty()); |
694 |
517 link = iYoungList.Last(); |
695 SPageInfo* pageInfos[KMaxPagesToClean]; |
518 } |
696 TInt pageCount = SelectPagesToClean(&pageInfos[0]); |
519 SPageInfo* pageInfo = SPageInfo::FromLink(link); |
697 |
520 |
698 if (pageCount == 0) |
521 // steal it from owning object... |
699 return 0; |
522 TInt r = StealPage(pageInfo); |
700 |
523 |
701 TheDataPagedMemoryManager->CleanPages(pageCount, pageInfos, aBackground); |
524 BalanceAges(); |
702 |
525 |
703 for (TInt i = 0 ; i < pageCount ; ++i) |
526 if(r==KErrNone) |
704 { |
527 return pageInfo; // done |
705 SPageInfo* pi = pageInfos[i]; |
528 |
706 if (pi) |
529 // loop back and try again |
707 { |
530 } |
708 __NK_ASSERT_DEBUG(pi->PagedState() == SPageInfo::EPagedOldestDirty && iOldestDirtyCount); |
|
709 __NK_ASSERT_DEBUG(!pi->IsDirty() && !pi->IsWritable()); |
|
710 |
|
711 pi->iLink.Deque(); |
|
712 iOldestCleanList.AddHead(&pi->iLink); |
|
713 --iOldestDirtyCount; |
|
714 ++iOldestCleanCount; |
|
715 pi->SetPagedState(SPageInfo::EPagedOldestClean); |
|
716 } |
|
717 } |
|
718 |
|
719 return pageCount; |
|
720 } |
|
721 |
|
722 |
|
723 TBool DPager::HasPagesToClean() |
|
724 { |
|
725 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
726 return iOldestDirtyCount > 0; |
531 } |
727 } |
532 |
728 |
533 |
729 |
534 TInt DPager::RestrictPage(SPageInfo* aPageInfo, TRestrictPagesType aRestriction) |
730 TInt DPager::RestrictPage(SPageInfo* aPageInfo, TRestrictPagesType aRestriction) |
535 { |
731 { |
606 TRACE(("DPager::StealPage returns %d",r)); |
802 TRACE(("DPager::StealPage returns %d",r)); |
607 return r; |
803 return r; |
608 } |
804 } |
609 |
805 |
610 |
806 |
|
807 static TBool DiscardCanStealPage(SPageInfo* aOldPageInfo, TBool aBlockRest) |
|
808 { |
|
809 // If the page is pinned or if the page is dirty and a general defrag is being performed then |
|
810 // don't attempt to steal it |
|
811 return aOldPageInfo->Type() == SPageInfo::EUnused || |
|
812 (aOldPageInfo->PagedState() != SPageInfo::EPagedPinned && (!aBlockRest || !aOldPageInfo->IsDirty())); |
|
813 } |
|
814 |
|
815 |
611 TInt DPager::DiscardPage(SPageInfo* aOldPageInfo, TUint aBlockZoneId, TBool aBlockRest) |
816 TInt DPager::DiscardPage(SPageInfo* aOldPageInfo, TUint aBlockZoneId, TBool aBlockRest) |
612 { |
817 { |
|
818 // todo: assert MmuLock not released |
|
819 |
|
820 TRACE(("> DPager::DiscardPage %08x", aOldPageInfo)); |
|
821 |
613 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
822 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
614 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
823 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
615 |
824 |
616 TInt r; |
825 if (!DiscardCanStealPage(aOldPageInfo, aBlockRest)) |
617 // If the page is pinned or if the page is dirty and a general defrag is being |
826 { |
618 // performed then don't attempt to steal it. |
827 // The page is pinned or is dirty and this is a general defrag so move the page. |
619 if (aOldPageInfo->Type() != SPageInfo::EUnused && |
|
620 (aOldPageInfo->PagedState() == SPageInfo::EPagedPinned || |
|
621 (aBlockRest && aOldPageInfo->IsDirty()))) |
|
622 {// The page is pinned or is dirty and this is a general defrag so move the page. |
|
623 DMemoryObject* memory = aOldPageInfo->Owner(); |
828 DMemoryObject* memory = aOldPageInfo->Owner(); |
624 // Page must be managed if it is pinned or dirty. |
829 // Page must be managed if it is pinned or dirty. |
625 __NK_ASSERT_DEBUG(aOldPageInfo->Type()==SPageInfo::EManaged); |
830 __NK_ASSERT_DEBUG(aOldPageInfo->Type()==SPageInfo::EManaged); |
626 __NK_ASSERT_DEBUG(memory); |
831 __NK_ASSERT_DEBUG(memory); |
627 MmuLock::Unlock(); |
832 MmuLock::Unlock(); |
628 TPhysAddr newAddr; |
833 TPhysAddr newAddr; |
629 return memory->iManager->MovePage(memory, aOldPageInfo, newAddr, aBlockZoneId, aBlockRest); |
834 TRACE2(("DPager::DiscardPage delegating pinned/dirty page to manager")); |
630 } |
835 TInt r = memory->iManager->MovePage(memory, aOldPageInfo, newAddr, aBlockZoneId, aBlockRest); |
631 |
836 TRACE(("< DPager::DiscardPage %d", r)); |
632 if (!iNumberOfFreePages) |
837 return r; |
633 { |
838 } |
634 // Allocate a new page for the live list as it has reached its minimum size. |
839 |
|
840 TInt r = KErrNone; |
|
841 SPageInfo* newPageInfo = NULL; |
|
842 TBool havePageCleaningLock = EFalse; |
|
843 |
|
844 TBool needNewPage; |
|
845 TBool needPageCleaningLock; |
|
846 while(needNewPage = (iNumberOfFreePages == 0 && newPageInfo == NULL), |
|
847 needPageCleaningLock = (aOldPageInfo->IsDirty() && !havePageCleaningLock), |
|
848 needNewPage || needPageCleaningLock) |
|
849 { |
635 MmuLock::Unlock(); |
850 MmuLock::Unlock(); |
636 SPageInfo* newPageInfo = GetPageFromSystem((Mmu::TRamAllocFlags)(EMemAttNormalCached|Mmu::EAllocNoWipe), |
851 |
637 aBlockZoneId, aBlockRest); |
852 if (needNewPage) |
638 if (!newPageInfo) |
853 { |
639 return KErrNoMemory; |
854 // Allocate a new page for the live list as it has reached its minimum size. |
|
855 TUint flags = EMemAttNormalCached | Mmu::EAllocNoWipe; |
|
856 newPageInfo = GetPageFromSystem((Mmu::TRamAllocFlags)flags, aBlockZoneId, aBlockRest); |
|
857 if (!newPageInfo) |
|
858 { |
|
859 TRACE(("< DPager::DiscardPage KErrNoMemory")); |
|
860 r = KErrNoMemory; |
|
861 MmuLock::Lock(); |
|
862 break; |
|
863 } |
|
864 } |
|
865 |
|
866 if (needPageCleaningLock) |
|
867 { |
|
868 // Acquire the page cleaning mutex so StealPage can clean it |
|
869 PageCleaningLock::Lock(); |
|
870 havePageCleaningLock = ETrue; |
|
871 } |
640 |
872 |
641 // Re-acquire the mmulock and re-check that the page is not pinned or dirty. |
873 // Re-acquire the mmulock and re-check that the page is not pinned or dirty. |
642 MmuLock::Lock(); |
874 MmuLock::Lock(); |
643 if (aOldPageInfo->Type() != SPageInfo::EUnused && |
875 if (!DiscardCanStealPage(aOldPageInfo, aBlockRest)) |
644 (aOldPageInfo->PagedState() == SPageInfo::EPagedPinned || |
876 { |
645 (aBlockRest && aOldPageInfo->IsDirty()))) |
877 // Page is now pinned or dirty so give up as it is in use. |
646 {// Page is now pinned or dirty so give up as it is inuse. |
878 r = KErrInUse; |
647 ReturnPageToSystem(*newPageInfo); |
879 break; |
648 MmuLock::Unlock(); |
880 } |
649 return KErrInUse; |
881 } |
650 } |
882 |
651 |
883 if (r == KErrNone) |
|
884 { |
652 // Attempt to steal the page |
885 // Attempt to steal the page |
653 r = StealPage(aOldPageInfo); |
886 r = StealPage(aOldPageInfo); // temporarily releases MmuLock if page is dirty |
654 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
887 } |
655 |
888 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
656 if (r == KErrCompletion) |
889 |
657 {// This was a page table that has been freed but added to the |
890 if (r == KErrCompletion) |
658 // live list as a free page. Remove from live list and continue. |
891 {// This was a page table that has been freed but added to the |
659 __NK_ASSERT_DEBUG(!aOldPageInfo->IsDirty()); |
892 // live list as a free page. Remove from live list and continue. |
660 RemovePage(aOldPageInfo); |
893 __NK_ASSERT_DEBUG(!aOldPageInfo->IsDirty()); |
661 r = KErrNone; |
894 RemovePage(aOldPageInfo); |
662 } |
895 r = KErrNone; |
663 |
896 } |
664 if (r == KErrNone) |
897 |
665 {// Add the new page to the live list as discarding the old page |
898 if (r == KErrNone && iNumberOfFreePages == 0) |
666 // will reduce the live list below the minimum. |
899 { |
|
900 if (newPageInfo) |
|
901 { |
|
902 // Add a new page to the live list if we have one as discarding the old page will reduce |
|
903 // the live list below the minimum. |
667 AddAsFreePage(newPageInfo); |
904 AddAsFreePage(newPageInfo); |
668 // We've successfully discarded the page so return it to the free pool. |
905 newPageInfo = NULL; |
669 ReturnPageToSystem(*aOldPageInfo); |
906 } |
670 BalanceAges(); |
907 else |
671 } |
908 { |
672 else |
909 // Otherwise the live list shrank when page was being cleaned so have to give up |
673 { |
910 AddAsFreePage(aOldPageInfo); |
674 // New page not required so just return it to the system. This is safe as |
911 BalanceAges(); // temporarily releases MmuLock |
675 // iNumberOfFreePages will have this page counted but as it is not on the live list |
912 r = KErrInUse; |
676 // noone else can touch it. |
913 } |
677 ReturnPageToSystem(*newPageInfo); |
914 } |
678 } |
915 |
679 } |
916 if (r == KErrNone) |
680 else |
917 { |
681 { |
918 // We've successfully discarded the page and ensured the live list is large enough, so |
682 // Attempt to steal the page |
919 // return it to the free pool. |
683 r = StealPage(aOldPageInfo); |
920 ReturnPageToSystem(*aOldPageInfo); // temporarily releases MmuLock |
684 |
921 BalanceAges(); // temporarily releases MmuLock |
685 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
922 } |
686 |
923 |
687 if (r == KErrCompletion) |
924 if (newPageInfo) |
688 {// This was a page table that has been freed but added to the |
925 { |
689 // live list as a free page. Remove from live list. |
926 // New page not required so just return it to the system. This is safe as |
690 __NK_ASSERT_DEBUG(!aOldPageInfo->IsDirty()); |
927 // iNumberOfFreePages will have this page counted but as it is not on the live list noone |
691 RemovePage(aOldPageInfo); |
928 // else can touch it. |
692 r = KErrNone; |
929 if (iNumberOfFreePages == 0) |
693 } |
930 AddAsFreePage(newPageInfo); |
694 |
931 else |
695 if (r == KErrNone) |
932 ReturnPageToSystem(*newPageInfo); // temporarily releases MmuLock |
696 {// We've successfully discarded the page so return it to the free pool. |
933 } |
697 ReturnPageToSystem(*aOldPageInfo); |
934 |
698 BalanceAges(); |
935 if (havePageCleaningLock) |
699 } |
936 { |
700 } |
937 // Release the page cleaning mutex |
|
938 MmuLock::Unlock(); |
|
939 PageCleaningLock::Unlock(); |
|
940 MmuLock::Lock(); |
|
941 } |
|
942 |
701 MmuLock::Unlock(); |
943 MmuLock::Unlock(); |
|
944 TRACE(("< DPager::DiscardPage returns %d", r)); |
702 return r; |
945 return r; |
703 } |
946 } |
704 |
947 |
705 |
948 |
706 TBool DPager::TryGrowLiveList() |
949 TBool DPager::TryGrowLiveList() |
799 if(pageInfo) |
1039 if(pageInfo) |
800 goto done; |
1040 goto done; |
801 MmuLock::Lock(); |
1041 MmuLock::Lock(); |
802 } |
1042 } |
803 |
1043 |
|
1044 // try stealing a clean page... |
|
1045 if (iOldestCleanCount) |
|
1046 goto try_steal_oldest_page; |
|
1047 |
|
1048 // see if we can clean multiple dirty pages in one go... |
|
1049 if (KMaxPagesToClean > 1 && iOldestDirtyCount > 1) |
|
1050 { |
|
1051 // if we don't hold the page cleaning mutex then temporarily release ram alloc mutex and |
|
1052 // acquire page cleaning mutex; if we hold it already just proceed |
|
1053 if (!pageCleaningLockHeld) |
|
1054 { |
|
1055 MmuLock::Unlock(); |
|
1056 RamAllocLock::Unlock(); |
|
1057 PageCleaningLock::Lock(); |
|
1058 MmuLock::Lock(); |
|
1059 } |
|
1060 |
|
1061 // there may be clean pages now if we've waited on the page cleaning mutex, if so don't |
|
1062 // bother cleaning but just restart |
|
1063 if (iOldestCleanCount == 0) |
|
1064 CleanSomePages(EFalse); |
|
1065 |
|
1066 if (!pageCleaningLockHeld) |
|
1067 { |
|
1068 MmuLock::Unlock(); |
|
1069 PageCleaningLock::Unlock(); |
|
1070 RamAllocLock::Lock(); |
|
1071 MmuLock::Lock(); |
|
1072 } |
|
1073 |
|
1074 if (iOldestCleanCount > 0) |
|
1075 goto find_a_page; |
|
1076 } |
|
1077 |
804 // as a last resort, steal a page from the live list... |
1078 // as a last resort, steal a page from the live list... |
805 get_oldest: |
1079 |
806 #ifdef _USE_OLDEST_LISTS |
1080 try_steal_oldest_page: |
807 __NK_ASSERT_ALWAYS(iOldestCleanCount|iOldestDirtyCount|iOldCount|iYoungCount); |
1081 __NK_ASSERT_ALWAYS(iOldestCleanCount|iOldestDirtyCount|iOldCount|iYoungCount); |
808 #else |
1082 r = TryStealOldestPage(pageInfo); |
809 __NK_ASSERT_ALWAYS(iOldCount|iYoungCount); |
1083 // if this fails we restart whole process |
810 #endif |
1084 if (r < KErrNone) |
811 pageInfo = StealOldestPage(); |
1085 goto find_a_page; |
|
1086 |
|
1087 // if we need to clean, acquire page cleaning mutex for life of this function |
|
1088 if (r == 1) |
|
1089 { |
|
1090 __NK_ASSERT_ALWAYS(!pageCleaningLockHeld); |
|
1091 MmuLock::Unlock(); |
|
1092 PageCleaningLock::Lock(); |
|
1093 MmuLock::Lock(); |
|
1094 pageCleaningLockHeld = ETrue; |
|
1095 goto find_a_page; |
|
1096 } |
|
1097 |
|
1098 // otherwise we're done! |
|
1099 __NK_ASSERT_DEBUG(r == KErrNone); |
812 MmuLock::Unlock(); |
1100 MmuLock::Unlock(); |
813 |
1101 |
814 // make page state same as a freshly allocated page... |
1102 // make page state same as a freshly allocated page... |
815 pagePhys = pageInfo->PhysAddr(); |
1103 pagePhys = pageInfo->PhysAddr(); |
816 TheMmu.PagesAllocated(&pagePhys,1,aAllocFlags); |
1104 TheMmu.PagesAllocated(&pagePhys,1,aAllocFlags); |
817 |
1105 |
818 done: |
1106 done: |
|
1107 if (pageCleaningLockHeld) |
|
1108 PageCleaningLock::Unlock(); |
819 RamAllocLock::Unlock(); |
1109 RamAllocLock::Unlock(); |
|
1110 |
820 return pageInfo; |
1111 return pageInfo; |
821 } |
1112 } |
822 |
1113 |
823 |
1114 |
824 TBool DPager::GetFreePages(TInt aNumPages) |
1115 TBool DPager::GetFreePages(TInt aNumPages) |
2044 #ifdef __DEMAND_PAGING_BENCHMARKS__ |
2320 #ifdef __DEMAND_PAGING_BENCHMARKS__ |
2045 |
2321 |
2046 void DPager::ResetBenchmarkData(TPagingBenchmark aBm) |
2322 void DPager::ResetBenchmarkData(TPagingBenchmark aBm) |
2047 { |
2323 { |
2048 SPagingBenchmarkInfo& info = iBenchmarkInfo[aBm]; |
2324 SPagingBenchmarkInfo& info = iBenchmarkInfo[aBm]; |
|
2325 __SPIN_LOCK_IRQ(iBenchmarkLock); |
2049 info.iCount = 0; |
2326 info.iCount = 0; |
2050 info.iTotalTime = 0; |
2327 info.iTotalTime = 0; |
2051 info.iMaxTime = 0; |
2328 info.iMaxTime = 0; |
2052 info.iMinTime = KMaxTInt; |
2329 info.iMinTime = KMaxTInt; |
|
2330 __SPIN_UNLOCK_IRQ(iBenchmarkLock); |
2053 } |
2331 } |
2054 |
2332 |
2055 void DPager::RecordBenchmarkData(TPagingBenchmark aBm, TUint32 aStartTime, TUint32 aEndTime) |
2333 void DPager::RecordBenchmarkData(TPagingBenchmark aBm, TUint32 aStartTime, TUint32 aEndTime, TUint aCount) |
2056 { |
2334 { |
2057 SPagingBenchmarkInfo& info = iBenchmarkInfo[aBm]; |
2335 SPagingBenchmarkInfo& info = iBenchmarkInfo[aBm]; |
2058 ++info.iCount; |
|
2059 #if !defined(HIGH_RES_TIMER) || defined(HIGH_RES_TIMER_COUNTS_UP) |
2336 #if !defined(HIGH_RES_TIMER) || defined(HIGH_RES_TIMER_COUNTS_UP) |
2060 TInt64 elapsed = aEndTime - aStartTime; |
2337 TInt64 elapsed = aEndTime - aStartTime; |
2061 #else |
2338 #else |
2062 TInt64 elapsed = aStartTime - aEndTime; |
2339 TInt64 elapsed = aStartTime - aEndTime; |
2063 #endif |
2340 #endif |
|
2341 __SPIN_LOCK_IRQ(iBenchmarkLock); |
|
2342 info.iCount += aCount; |
2064 info.iTotalTime += elapsed; |
2343 info.iTotalTime += elapsed; |
2065 if (elapsed > info.iMaxTime) |
2344 if (elapsed > info.iMaxTime) |
2066 info.iMaxTime = elapsed; |
2345 info.iMaxTime = elapsed; |
2067 if (elapsed < info.iMinTime) |
2346 if (elapsed < info.iMinTime) |
2068 info.iMinTime = elapsed; |
2347 info.iMinTime = elapsed; |
|
2348 __SPIN_UNLOCK_IRQ(iBenchmarkLock); |
2069 } |
2349 } |
|
2350 |
|
2351 void DPager::ReadBenchmarkData(TPagingBenchmark aBm, SPagingBenchmarkInfo& aDataOut) |
|
2352 { |
|
2353 __SPIN_LOCK_IRQ(iBenchmarkLock); |
|
2354 aDataOut = iBenchmarkInfo[aBm]; |
|
2355 __SPIN_UNLOCK_IRQ(iBenchmarkLock); |
|
2356 } |
2070 |
2357 |
2071 #endif //__DEMAND_PAGING_BENCHMARKS__ |
2358 #endif //__DEMAND_PAGING_BENCHMARKS__ |
2072 |
2359 |
2073 |
2360 |
2074 // |
2361 // |
2077 |
2364 |
2078 // |
2365 // |
2079 // DPagingRequest |
2366 // DPagingRequest |
2080 // |
2367 // |
2081 |
2368 |
2082 DPagingRequest::DPagingRequest(DPagingRequestPool::TGroup& aPoolGroup) |
2369 DPagingRequest::DPagingRequest() |
2083 : iPoolGroup(aPoolGroup), iUseRegionMemory(0), iUseRegionIndex(0), iUseRegionCount(0) |
2370 : iMutex(NULL), iUseRegionCount(0) |
2084 { |
2371 { |
2085 } |
2372 } |
2086 |
2373 |
2087 |
2374 |
2088 FORCE_INLINE void DPagingRequest::SetUse(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
2375 void DPagingRequest::SetUseContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
2089 { |
2376 { |
2090 __ASSERT_SYSTEM_LOCK; |
2377 __ASSERT_SYSTEM_LOCK; |
2091 iUseRegionMemory = aMemory; |
2378 __NK_ASSERT_DEBUG(iUseRegionCount == 0); |
2092 iUseRegionIndex = aIndex; |
2379 __NK_ASSERT_DEBUG(aCount > 0 && aCount <= EMaxPages); |
|
2380 for (TUint i = 0 ; i < aCount ; ++i) |
|
2381 { |
|
2382 iUseRegionMemory[i] = aMemory; |
|
2383 iUseRegionIndex[i] = aIndex + i; |
|
2384 } |
2093 iUseRegionCount = aCount; |
2385 iUseRegionCount = aCount; |
2094 } |
2386 } |
2095 |
2387 |
2096 |
2388 |
2097 TBool DPagingRequest::CheckUse(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
2389 void DPagingRequest::SetUseDiscontiguous(DMemoryObject** aMemory, TUint* aIndex, TUint aCount) |
2098 { |
2390 { |
2099 return aMemory==iUseRegionMemory |
2391 __ASSERT_SYSTEM_LOCK; |
2100 && TUint(aIndex-iUseRegionIndex) < iUseRegionCount |
2392 __NK_ASSERT_DEBUG(iUseRegionCount == 0); |
2101 && TUint(iUseRegionCount-TUint(aIndex-iUseRegionIndex)) <= aCount; |
2393 __NK_ASSERT_DEBUG(aCount > 0 && aCount <= EMaxPages); |
2102 } |
2394 for (TUint i = 0 ; i < aCount ; ++i) |
2103 |
2395 { |
2104 |
2396 iUseRegionMemory[i] = aMemory[i]; |
2105 void DPagingRequest::Release() |
2397 iUseRegionIndex[i] = aIndex[i]; |
|
2398 } |
|
2399 iUseRegionCount = aCount; |
|
2400 } |
|
2401 |
|
2402 |
|
2403 void DPagingRequest::ResetUse() |
|
2404 { |
|
2405 __ASSERT_SYSTEM_LOCK; |
|
2406 __NK_ASSERT_DEBUG(iUseRegionCount > 0); |
|
2407 iUseRegionCount = 0; |
|
2408 } |
|
2409 |
|
2410 |
|
2411 TBool DPagingRequest::CheckUseContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
|
2412 { |
|
2413 if (iUseRegionCount != aCount) |
|
2414 return EFalse; |
|
2415 for (TUint i = 0 ; i < iUseRegionCount ; ++i) |
|
2416 { |
|
2417 if (iUseRegionMemory[i] != aMemory || iUseRegionIndex[i] != aIndex + i) |
|
2418 return EFalse; |
|
2419 } |
|
2420 return ETrue; |
|
2421 } |
|
2422 |
|
2423 |
|
2424 TBool DPagingRequest::CheckUseDiscontiguous(DMemoryObject** aMemory, TUint* aIndex, TUint aCount) |
|
2425 { |
|
2426 if (iUseRegionCount != aCount) |
|
2427 return EFalse; |
|
2428 for (TUint i = 0 ; i < iUseRegionCount ; ++i) |
|
2429 { |
|
2430 if (iUseRegionMemory[i] != aMemory[i] || iUseRegionIndex[i] != aIndex[i]) |
|
2431 return EFalse; |
|
2432 } |
|
2433 return ETrue; |
|
2434 } |
|
2435 |
|
2436 |
|
2437 TBool DPagingRequest::IsCollisionContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
|
2438 { |
|
2439 // note this could be optimised as most of the time we will be checking read/read collusions, |
|
2440 // both of which will be contiguous |
|
2441 __ASSERT_SYSTEM_LOCK; |
|
2442 for (TUint i = 0 ; i < iUseRegionCount ; ++i) |
|
2443 { |
|
2444 if (iUseRegionMemory[i] == aMemory && |
|
2445 TUint(iUseRegionIndex[i] - aIndex) < aCount) |
|
2446 return ETrue; |
|
2447 } |
|
2448 return EFalse; |
|
2449 } |
|
2450 |
|
2451 |
|
2452 TLinAddr DPagingRequest::MapPages(TUint aColour, TUint aCount, TPhysAddr* aPages) |
|
2453 { |
|
2454 __NK_ASSERT_DEBUG(iMutex->iCleanup.iThread == &Kern::CurrentThread()); |
|
2455 return iTempMapping.Map(aPages,aCount,aColour); |
|
2456 } |
|
2457 |
|
2458 |
|
2459 void DPagingRequest::UnmapPages(TBool aIMBRequired) |
|
2460 { |
|
2461 __NK_ASSERT_DEBUG(iMutex->iCleanup.iThread == &Kern::CurrentThread()); |
|
2462 iTempMapping.Unmap(aIMBRequired); |
|
2463 } |
|
2464 |
|
2465 // |
|
2466 // DPoolPagingRequest |
|
2467 // |
|
2468 |
|
2469 DPoolPagingRequest::DPoolPagingRequest(DPagingRequestPool::TGroup& aPoolGroup) : |
|
2470 iPoolGroup(aPoolGroup) |
|
2471 { |
|
2472 } |
|
2473 |
|
2474 |
|
2475 void DPoolPagingRequest::Release() |
2106 { |
2476 { |
2107 NKern::LockSystem(); |
2477 NKern::LockSystem(); |
2108 SetUse(0,0,0); |
2478 ResetUse(); |
2109 Signal(); |
2479 Signal(); |
2110 } |
2480 } |
2111 |
2481 |
2112 |
2482 |
2113 void DPagingRequest::Wait() |
2483 void DPoolPagingRequest::Wait() |
2114 { |
2484 { |
2115 __ASSERT_SYSTEM_LOCK; |
2485 __ASSERT_SYSTEM_LOCK; |
2116 ++iUsageCount; |
2486 ++iUsageCount; |
2117 TInt r = iMutex->Wait(); |
2487 TInt r = iMutex->Wait(); |
2118 __NK_ASSERT_ALWAYS(r == KErrNone); |
2488 __NK_ASSERT_ALWAYS(r == KErrNone); |
2119 } |
2489 } |
2120 |
2490 |
2121 |
2491 |
2122 void DPagingRequest::Signal() |
2492 void DPoolPagingRequest::Signal() |
2123 { |
2493 { |
2124 __ASSERT_SYSTEM_LOCK; |
2494 __ASSERT_SYSTEM_LOCK; |
2125 iPoolGroup.Signal(this); |
2495 iPoolGroup.Signal(this); |
2126 } |
2496 } |
2127 |
|
2128 |
|
2129 FORCE_INLINE TBool DPagingRequest::IsCollision(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
|
2130 { |
|
2131 __ASSERT_SYSTEM_LOCK; |
|
2132 DMemoryObject* memory = iUseRegionMemory; |
|
2133 TUint index = iUseRegionIndex; |
|
2134 TUint count = iUseRegionCount; |
|
2135 // note, this comparison would fail if either region includes page number KMaxTUint, |
|
2136 // but it isn't possible to create a memory object which is > KMaxTUint pages... |
|
2137 return memory == aMemory && index+count > aIndex && index < aIndex+aCount; |
|
2138 } |
|
2139 |
|
2140 |
|
2141 TLinAddr DPagingRequest::MapPages(TUint aColour, TUint aCount, TPhysAddr* aPages) |
|
2142 { |
|
2143 __NK_ASSERT_DEBUG(iMutex->iCleanup.iThread == &Kern::CurrentThread()); |
|
2144 return iTempMapping.Map(aPages,aCount,aColour); |
|
2145 } |
|
2146 |
|
2147 |
|
2148 void DPagingRequest::UnmapPages(TBool aIMBRequired) |
|
2149 { |
|
2150 __NK_ASSERT_DEBUG(iMutex->iCleanup.iThread == &Kern::CurrentThread()); |
|
2151 iTempMapping.Unmap(aIMBRequired); |
|
2152 } |
|
2153 |
|
2154 |
2497 |
2155 // |
2498 // |
2156 // DPageReadRequest |
2499 // DPageReadRequest |
2157 // |
2500 // |
2158 |
2501 |
2159 TInt DPageReadRequest::iAllocNext = 0; |
2502 TInt DPageReadRequest::iAllocNext = 0; |
|
2503 |
|
2504 DPageReadRequest::DPageReadRequest(DPagingRequestPool::TGroup& aPoolGroup) : |
|
2505 DPoolPagingRequest(aPoolGroup) |
|
2506 { |
|
2507 // allocate space for mapping pages whilst they're being loaded... |
|
2508 iTempMapping.Alloc(EMaxPages); |
|
2509 } |
2160 |
2510 |
2161 TInt DPageReadRequest::Construct() |
2511 TInt DPageReadRequest::Construct() |
2162 { |
2512 { |
2163 // allocate id and mutex... |
2513 // allocate id and mutex... |
2164 TUint id = (TUint)__e32_atomic_add_ord32(&iAllocNext, 1); |
2514 TUint id = (TUint)__e32_atomic_add_ord32(&iAllocNext, 1); |
2188 r = MM::MappingNew(bufferMapping,bufferMemory,ESupervisorReadWrite,KKernelOsAsid); |
2535 r = MM::MappingNew(bufferMapping,bufferMemory,ESupervisorReadWrite,KKernelOsAsid); |
2189 if(r!=KErrNone) |
2536 if(r!=KErrNone) |
2190 return r; |
2537 return r; |
2191 iBuffer = MM::MappingBase(bufferMapping); |
2538 iBuffer = MM::MappingBase(bufferMapping); |
2192 |
2539 |
2193 // ensure there are enough young pages to cope with new request object... |
|
2194 r = ThePager.ResizeLiveList(); |
|
2195 if(r!=KErrNone) |
|
2196 return r; |
|
2197 |
|
2198 return r; |
2540 return r; |
2199 } |
2541 } |
2200 |
2542 |
2201 |
2543 |
2202 // |
2544 // |
2203 // DPageWriteRequest |
2545 // DPageWriteRequest |
2204 // |
2546 // |
2205 |
2547 |
2206 TInt DPageWriteRequest::iAllocNext = 0; |
2548 |
2207 |
2549 DPageWriteRequest::DPageWriteRequest() |
2208 TInt DPageWriteRequest::Construct() |
2550 { |
2209 { |
2551 iMutex = ThePageCleaningLock; |
2210 // allocate id and mutex... |
|
2211 TUint id = (TUint)__e32_atomic_add_ord32(&iAllocNext, 1); |
|
2212 _LIT(KLitPagingRequest,"PageWriteRequest-"); |
|
2213 TBuf<sizeof("PageWriteRequest-")+10> mutexName(KLitPagingRequest); |
|
2214 mutexName.AppendNum(id); |
|
2215 TInt r = K::MutexCreate(iMutex, mutexName, NULL, EFalse, KMutexOrdPageOut); |
|
2216 if(r!=KErrNone) |
|
2217 return r; |
|
2218 |
|
2219 // allocate space for mapping pages whilst they're being loaded... |
2552 // allocate space for mapping pages whilst they're being loaded... |
2220 iTempMapping.Alloc(EMaxPages); |
2553 iTempMapping.Alloc(KMaxPagesToClean); |
2221 |
2554 } |
2222 return r; |
2555 |
|
2556 |
|
2557 void DPageWriteRequest::Release() |
|
2558 { |
|
2559 NKern::LockSystem(); |
|
2560 ResetUse(); |
|
2561 NKern::UnlockSystem(); |
2223 } |
2562 } |
2224 |
2563 |
2225 |
2564 |
2226 // |
2565 // |
2227 // DPagingRequestPool |
2566 // DPagingRequestPool |
2228 // |
2567 // |
2229 |
2568 |
2230 DPagingRequestPool::DPagingRequestPool(TUint aNumPageReadRequest,TUint aNumPageWriteRequest) |
2569 DPagingRequestPool::DPagingRequestPool(TUint aNumPageReadRequest, TBool aWriteRequest) |
2231 : iPageReadRequests(aNumPageReadRequest), iPageWriteRequests(aNumPageWriteRequest) |
2570 : iPageReadRequests(aNumPageReadRequest) |
2232 { |
2571 { |
2233 TUint i; |
2572 TUint i; |
2234 |
|
2235 for(i=0; i<aNumPageReadRequest; ++i) |
2573 for(i=0; i<aNumPageReadRequest; ++i) |
2236 { |
2574 { |
2237 DPageReadRequest* req = new DPageReadRequest(iPageReadRequests); |
2575 DPageReadRequest* req = new DPageReadRequest(iPageReadRequests); |
2238 __NK_ASSERT_ALWAYS(req); |
2576 __NK_ASSERT_ALWAYS(req); |
2239 TInt r = req->Construct(); |
2577 TInt r = req->Construct(); |
2240 __NK_ASSERT_ALWAYS(r==KErrNone); |
2578 __NK_ASSERT_ALWAYS(r==KErrNone); |
2241 iPageReadRequests.iRequests[i] = req; |
2579 iPageReadRequests.iRequests[i] = req; |
2242 iPageReadRequests.iFreeList.Add(req); |
2580 iPageReadRequests.iFreeList.Add(req); |
2243 } |
2581 } |
2244 |
2582 |
2245 for(i=0; i<aNumPageWriteRequest; ++i) |
2583 if (aWriteRequest) |
2246 { |
2584 { |
2247 DPageWriteRequest* req = new DPageWriteRequest(iPageWriteRequests); |
2585 iPageWriteRequest = new DPageWriteRequest(); |
2248 __NK_ASSERT_ALWAYS(req); |
2586 __NK_ASSERT_ALWAYS(iPageWriteRequest); |
2249 TInt r = req->Construct(); |
|
2250 __NK_ASSERT_ALWAYS(r==KErrNone); |
|
2251 iPageWriteRequests.iRequests[i] = req; |
|
2252 iPageWriteRequests.iFreeList.Add(req); |
|
2253 } |
2587 } |
2254 } |
2588 } |
2255 |
2589 |
2256 |
2590 |
2257 DPagingRequestPool::~DPagingRequestPool() |
2591 DPagingRequestPool::~DPagingRequestPool() |
2262 |
2596 |
2263 DPageReadRequest* DPagingRequestPool::AcquirePageReadRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
2597 DPageReadRequest* DPagingRequestPool::AcquirePageReadRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
2264 { |
2598 { |
2265 NKern::LockSystem(); |
2599 NKern::LockSystem(); |
2266 |
2600 |
2267 DPagingRequest* req; |
2601 DPoolPagingRequest* req; |
2268 |
2602 |
2269 // if we collide with page write operation... |
2603 // check for collision with existing write |
2270 req = iPageWriteRequests.FindCollision(aMemory,aIndex,aCount); |
2604 if(iPageWriteRequest && iPageWriteRequest->IsCollisionContiguous(aMemory,aIndex,aCount)) |
2271 if(req) |
2605 { |
2272 { |
2606 NKern::UnlockSystem(); |
2273 // wait until write completes... |
2607 PageCleaningLock::Lock(); |
2274 req->Wait(); |
2608 PageCleaningLock::Unlock(); |
2275 req->Signal(); |
|
2276 return 0; // caller expected to retry if needed |
2609 return 0; // caller expected to retry if needed |
2277 } |
2610 } |
2278 |
2611 |
2279 // get a request object to use... |
2612 // get a request object to use... |
2280 req = iPageReadRequests.GetRequest(aMemory,aIndex,aCount); |
2613 req = iPageReadRequests.GetRequest(aMemory,aIndex,aCount); |
2281 |
2614 |
2282 // check no new requests collide with us... |
2615 // check no new read or write requests collide with us... |
2283 if(iPageWriteRequests.FindCollision(aMemory,aIndex,aCount) |
2616 if ((iPageWriteRequest && iPageWriteRequest->IsCollisionContiguous(aMemory,aIndex,aCount)) || |
2284 || iPageReadRequests.FindCollision(aMemory,aIndex,aCount)) |
2617 iPageReadRequests.FindCollisionContiguous(aMemory,aIndex,aCount)) |
2285 { |
2618 { |
2286 // another operation is colliding with this region, give up and retry... |
2619 // another operation is colliding with this region, give up and retry... |
2287 req->Signal(); |
2620 req->Signal(); |
2288 return 0; // caller expected to retry if needed |
2621 return 0; // caller expected to retry if needed |
2289 } |
2622 } |
2290 |
2623 |
2291 // we have a request object which we can use... |
2624 // we have a request object which we can use... |
2292 req->SetUse(aMemory,aIndex,aCount); |
2625 req->SetUseContiguous(aMemory,aIndex,aCount); |
2293 |
2626 |
2294 NKern::UnlockSystem(); |
2627 NKern::UnlockSystem(); |
2295 return (DPageReadRequest*)req; |
2628 return (DPageReadRequest*)req; |
2296 } |
2629 } |
2297 |
2630 |
2298 |
2631 |
2299 DPageWriteRequest* DPagingRequestPool::AcquirePageWriteRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
2632 DPageWriteRequest* DPagingRequestPool::AcquirePageWriteRequest(DMemoryObject** aMemory, TUint* aIndex, TUint aCount) |
2300 { |
2633 { |
|
2634 __NK_ASSERT_DEBUG(iPageWriteRequest); |
|
2635 __NK_ASSERT_DEBUG(PageCleaningLock::IsHeld()); |
|
2636 |
2301 NKern::LockSystem(); |
2637 NKern::LockSystem(); |
2302 |
2638 |
2303 DPagingRequest* req; |
2639 // Collision with existing read requests is not possible here. For a page to be read it must |
2304 |
2640 // not be present, and for it to be written it must be present and dirty. There is no way for a |
2305 for(;;) |
2641 // page to go between these states without an intervening read on an uninitialised (freshly |
2306 { |
2642 // committed) page, which will wait on the first read request. In other words something like |
2307 // get a request object to use... |
2643 // this: |
2308 req = iPageWriteRequests.GetRequest(aMemory,aIndex,aCount); |
2644 // |
2309 |
2645 // read (blocks), decommit, re-commit, read (waits on mutex), write (now no pending reads!) |
2310 if(iPageWriteRequests.FindCollision(aMemory,aIndex,aCount)) |
2646 // |
2311 { |
2647 // Note that a read request can be outstanding and appear to collide with this write, but only |
2312 // another write operation is colliding with this region, give up and retry... |
2648 // in the case when the thread making the read has blocked just after acquiring the request but |
2313 req->Signal(); |
2649 // before it checks whether the read is still necessasry. This makes it difficult to assert |
2314 // Reacquire the system lock as Signal() above will release it. |
2650 // that no collisions take place. |
2315 NKern::LockSystem(); |
2651 |
2316 continue; |
2652 iPageWriteRequest->SetUseDiscontiguous(aMemory,aIndex,aCount); |
2317 } |
|
2318 |
|
2319 break; |
|
2320 } |
|
2321 |
|
2322 // we have a request object which we can use... |
|
2323 req->SetUse(aMemory,aIndex,aCount); |
|
2324 |
|
2325 NKern::UnlockSystem(); |
2653 NKern::UnlockSystem(); |
2326 return (DPageWriteRequest*)req; |
2654 |
|
2655 return iPageWriteRequest; |
2327 } |
2656 } |
2328 |
2657 |
2329 |
2658 |
2330 DPagingRequestPool::TGroup::TGroup(TUint aNumRequests) |
2659 DPagingRequestPool::TGroup::TGroup(TUint aNumRequests) |
2331 { |
2660 { |
2332 iNumRequests = aNumRequests; |
2661 iNumRequests = aNumRequests; |
2333 iRequests = new DPagingRequest*[aNumRequests]; |
2662 iRequests = new DPoolPagingRequest*[aNumRequests]; |
2334 __NK_ASSERT_ALWAYS(iRequests); |
2663 __NK_ASSERT_ALWAYS(iRequests); |
2335 } |
2664 } |
2336 |
2665 |
2337 |
2666 |
2338 DPagingRequest* DPagingRequestPool::TGroup::FindCollision(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
2667 DPoolPagingRequest* DPagingRequestPool::TGroup::FindCollisionContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
2339 { |
2668 { |
2340 __ASSERT_SYSTEM_LOCK; |
2669 __ASSERT_SYSTEM_LOCK; |
2341 DPagingRequest** ptr = iRequests; |
2670 DPoolPagingRequest** ptr = iRequests; |
2342 DPagingRequest** ptrEnd = ptr+iNumRequests; |
2671 DPoolPagingRequest** ptrEnd = ptr+iNumRequests; |
2343 while(ptr<ptrEnd) |
2672 while(ptr<ptrEnd) |
2344 { |
2673 { |
2345 DPagingRequest* req = *ptr++; |
2674 DPoolPagingRequest* req = *ptr++; |
2346 if(req->IsCollision(aMemory,aIndex,aCount)) |
2675 if(req->IsCollisionContiguous(aMemory,aIndex,aCount)) |
2347 return req; |
2676 return req; |
2348 } |
2677 } |
2349 return 0; |
2678 return 0; |
2350 } |
2679 } |
2351 |
2680 |
2352 |
2681 |
2353 static TUint32 RandomSeed = 33333; |
2682 static TUint32 RandomSeed = 33333; |
2354 |
2683 |
2355 DPagingRequest* DPagingRequestPool::TGroup::GetRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
2684 DPoolPagingRequest* DPagingRequestPool::TGroup::GetRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
2356 { |
2685 { |
2357 __NK_ASSERT_DEBUG(iNumRequests > 0); |
2686 __NK_ASSERT_DEBUG(iNumRequests > 0); |
2358 |
2687 |
2359 // try using an existing request which collides with this region... |
2688 // try using an existing request which collides with this region... |
2360 DPagingRequest* req = FindCollision(aMemory,aIndex,aCount); |
2689 DPoolPagingRequest* req = FindCollisionContiguous(aMemory,aIndex,aCount); |
2361 if(!req) |
2690 if(!req) |
2362 { |
2691 { |
2363 // use a free request... |
2692 // use a free request... |
2364 req = (DPagingRequest*)iFreeList.GetFirst(); |
2693 req = (DPoolPagingRequest*)iFreeList.GetFirst(); |
2365 if(req) |
2694 if(req) |
2366 { |
2695 { |
2367 // free requests aren't being used... |
2696 // free requests aren't being used... |
2368 __NK_ASSERT_DEBUG(req->iUsageCount == 0); |
2697 __NK_ASSERT_DEBUG(req->iUsageCount == 0); |
2369 } |
2698 } |