25 #include "mmanager.h" |
25 #include "mmanager.h" |
26 #include "mptalloc.h" |
26 #include "mptalloc.h" |
27 #include "mpagearray.h" |
27 #include "mpagearray.h" |
28 #include "mswap.h" |
28 #include "mswap.h" |
29 #include "mthrash.h" |
29 #include "mthrash.h" |
|
30 #include "mpagecleaner.h" |
|
31 |
30 #include "cache_maintenance.inl" |
32 #include "cache_maintenance.inl" |
31 |
33 |
32 |
34 |
33 const TUint16 KDefaultYoungOldRatio = 3; |
35 const TUint16 KDefaultYoungOldRatio = 3; |
34 const TUint16 KDefaultMinPages = 256; |
36 const TUint16 KDefaultMinPages = 256; |
35 #ifdef _USE_OLDEST_LISTS |
|
36 const TUint16 KDefaultOldOldestRatio = 3; |
37 const TUint16 KDefaultOldOldestRatio = 3; |
37 #endif |
|
38 |
38 |
39 const TUint KMinOldPages = 1; |
39 const TUint KMinOldPages = 1; |
40 |
40 |
41 /* On a 32 bit system without PAE can't have more than 2^(32-KPageShift) pages. |
41 /* On a 32 bit system without PAE can't have more than 2^(32-KPageShift) pages. |
42 * Subtract 1 so it doesn't overflow when converted to bytes. |
42 * Subtract 1 so it doesn't overflow when converted to bytes. |
43 */ |
43 */ |
44 const TUint KAbsoluteMaxPageCount = (1u<<(32-KPageShift))-1u; |
44 const TUint KAbsoluteMaxPageCount = (1u<<(32-KPageShift))-1u; |
45 |
45 |
46 |
46 /* |
|
47 Limit the maximum number of oldest pages to bound the time taken by SelectPagesToClean(), which is |
|
48 called with the MmuLock held. |
|
49 */ |
|
50 const TUint KMaxOldestPages = 32; |
|
51 |
|
52 static DMutex* ThePageCleaningLock = NULL; |
47 |
53 |
48 DPager ThePager; |
54 DPager ThePager; |
49 |
55 |
50 |
56 |
51 DPager::DPager() |
57 DPager::DPager() |
52 : iMinimumPageCount(0), iMaximumPageCount(0), iYoungOldRatio(0), |
58 : iMinimumPageCount(0), iMaximumPageCount(0), iYoungOldRatio(0), |
53 iYoungCount(0),iOldCount(0), |
59 iYoungCount(0), iOldCount(0), iOldestCleanCount(0), |
54 #ifdef _USE_OLDEST_LISTS |
|
55 iOldestCleanCount(0), |
|
56 #endif |
|
57 iNumberOfFreePages(0), iReservePageCount(0), iMinimumPageLimit(0) |
60 iNumberOfFreePages(0), iReservePageCount(0), iMinimumPageLimit(0) |
|
61 #ifdef __DEMAND_PAGING_BENCHMARKS__ |
|
62 , iBenchmarkLock(TSpinLock::EOrderGenericIrqHigh3) |
|
63 #endif |
58 { |
64 { |
59 } |
65 } |
60 |
66 |
61 |
67 |
62 void DPager::InitCache() |
68 void DPager::InitCache() |
177 // Verify that the young old ratio can be met even when there is only the |
175 // Verify that the young old ratio can be met even when there is only the |
178 // minimum number of old pages. |
176 // minimum number of old pages. |
179 TInt ratioLimit = (iMinimumPageCount-KMinOldPages)/KMinOldPages; |
177 TInt ratioLimit = (iMinimumPageCount-KMinOldPages)/KMinOldPages; |
180 __NK_ASSERT_ALWAYS(iYoungOldRatio <= ratioLimit); |
178 __NK_ASSERT_ALWAYS(iYoungOldRatio <= ratioLimit); |
181 |
179 |
182 #ifdef _USE_OLDEST_LISTS |
|
183 // There should always be enough old pages to allow the oldest lists ratio. |
180 // There should always be enough old pages to allow the oldest lists ratio. |
184 TUint oldestCount = minOldAndOldest / (1 + iOldOldestRatio); |
181 TUint oldestCount = minOldAndOldest / (1 + iOldOldestRatio); |
185 __NK_ASSERT_ALWAYS(oldestCount); |
182 __NK_ASSERT_ALWAYS(oldestCount); |
186 #endif |
|
187 |
183 |
188 iNumberOfFreePages = 0; |
184 iNumberOfFreePages = 0; |
189 iNumberOfDirtyPages = 0; |
185 iNumberOfDirtyPages = 0; |
190 |
186 |
191 // Allocate RAM pages and put them all on the old list. |
187 // Allocate RAM pages and put them all on the old list. |
192 // Reserved pages have already been allocated and already placed on the |
188 // Reserved pages have already been allocated and already placed on the |
193 // old list so don't allocate them again. |
189 // old list so don't allocate them again. |
194 RamAllocLock::Lock(); |
190 RamAllocLock::Lock(); |
195 iYoungCount = 0; |
191 iYoungCount = 0; |
196 #ifdef _USE_OLDEST_LISTS |
|
197 iOldCount = 0; |
192 iOldCount = 0; |
198 iOldestDirtyCount = 0; |
193 iOldestDirtyCount = 0; |
199 __NK_ASSERT_DEBUG(iOldestCleanCount == iReservePageCount); |
194 __NK_ASSERT_DEBUG(iOldestCleanCount == iReservePageCount); |
200 #else |
|
201 __NK_ASSERT_DEBUG(iOldCount == iReservePageCount); |
|
202 #endif |
|
203 Mmu& m = TheMmu; |
195 Mmu& m = TheMmu; |
204 for(TUint i = iReservePageCount; i < iMinimumPageCount; i++) |
196 for(TUint i = iReservePageCount; i < iMinimumPageCount; i++) |
205 { |
197 { |
206 // Allocate a single page |
198 // Allocate a single page |
207 TPhysAddr pagePhys; |
199 TPhysAddr pagePhys; |
248 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
236 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
249 if (!CheckList(&iOldList.iA, iOldCount)) |
237 if (!CheckList(&iOldList.iA, iOldCount)) |
250 return EFalse; |
238 return EFalse; |
251 if (!CheckList(&iYoungList.iA, iYoungCount)) |
239 if (!CheckList(&iYoungList.iA, iYoungCount)) |
252 return EFalse; |
240 return EFalse; |
253 |
|
254 #ifdef _USE_OLDEST_LISTS |
|
255 if (!CheckList(&iOldestCleanList.iA, iOldestCleanCount)) |
241 if (!CheckList(&iOldestCleanList.iA, iOldestCleanCount)) |
256 return EFalse; |
242 return EFalse; |
257 if (!CheckList(&iOldestDirtyList.iA, iOldestDirtyCount)) |
243 if (!CheckList(&iOldestDirtyList.iA, iOldestDirtyCount)) |
258 return EFalse; |
244 return EFalse; |
259 TRACEP(("DP: y=%d o=%d oc=%d od=%d f=%d", iYoungCount, iOldCount, |
245 TRACEP(("DP: y=%d o=%d oc=%d od=%d f=%d", iYoungCount, iOldCount, |
260 iOldestCleanCount, iOldestDirtyCount, iNumberOfFreePages)); |
246 iOldestCleanCount, iOldestDirtyCount, iNumberOfFreePages)); |
261 #else |
|
262 TRACEP(("DP: y=%d o=%d f=%d", iYoungCount, iOldCount, iNumberOfFreePages)); |
|
263 #endif //#ifdef _USE_OLDEST_LISTS |
|
264 TraceCounts(); |
247 TraceCounts(); |
265 #endif // #ifdef FMM_PAGER_CHECK_LISTS |
248 #endif // #ifdef FMM_PAGER_CHECK_LISTS |
266 return true; |
249 return true; |
267 } |
250 } |
268 |
251 |
269 void DPager::TraceCounts() |
252 void DPager::TraceCounts() |
270 { |
253 { |
271 #ifdef _USE_OLDEST_LISTS |
|
272 TRACEP(("DP: y=%d o=%d oc=%d od=%d f=%d min=%d max=%d ml=%d res=%d", |
254 TRACEP(("DP: y=%d o=%d oc=%d od=%d f=%d min=%d max=%d ml=%d res=%d", |
273 iYoungCount, iOldCount, iOldestCleanCount, iOldestDirtyCount, |
255 iYoungCount, iOldCount, iOldestCleanCount, iOldestDirtyCount, |
274 iNumberOfFreePages, iMinimumPageCount, iMaximumPageCount, |
256 iNumberOfFreePages, iMinimumPageCount, iMaximumPageCount, |
275 iMinimumPageLimit, iReservePageCount)); |
257 iMinimumPageLimit, iReservePageCount)); |
276 #else |
|
277 TRACEP(("DP: y=%d o=%d f=%d min=%d max=%d ml=%d res=%d", |
|
278 iYoungCount, iOldCount, iNumberOfFreePages, iMinimumPageCount, |
|
279 iMaximumPageCount, iMinimumPageLimit, iReservePageCount)); |
|
280 #endif //#ifdef _USE_OLDEST_LISTS |
|
281 } |
258 } |
282 #endif //#ifdef _DEBUG |
259 #endif //#ifdef _DEBUG |
283 |
260 |
284 |
261 |
285 TBool DPager::HaveTooManyPages() |
262 TBool DPager::HaveTooManyPages() |
390 return KErrNotFound; |
359 return KErrNotFound; |
391 } |
360 } |
392 |
361 |
393 // Update the dirty page count as required... |
362 // Update the dirty page count as required... |
394 if (aPageInfo->IsDirty()) |
363 if (aPageInfo->IsDirty()) |
|
364 { |
|
365 aPageInfo->SetReadOnly(); |
395 SetClean(*aPageInfo); |
366 SetClean(*aPageInfo); |
|
367 } |
396 |
368 |
397 if (iNumberOfFreePages > 0) |
369 if (iNumberOfFreePages > 0) |
398 {// The paging cache is not at the minimum size so safe to let the |
370 {// The paging cache is not at the minimum size so safe to let the |
399 // ram allocator free this page. |
371 // ram allocator free this page. |
400 iNumberOfFreePages--; |
372 iNumberOfFreePages--; |
401 aPageInfo->SetPagedState(SPageInfo::EUnpaged); |
373 aPageInfo->SetPagedState(SPageInfo::EUnpaged); |
402 return KErrCompletion; |
374 return KErrCompletion; |
403 } |
375 } |
404 // Need to hold onto this page as have reached the page cache limit. |
376 // Need to hold onto this page as have reached the page cache limit. |
405 // add as oldest page... |
377 // add as oldest page... |
406 #ifdef _USE_OLDEST_LISTS |
|
407 aPageInfo->SetPagedState(SPageInfo::EPagedOldestClean); |
378 aPageInfo->SetPagedState(SPageInfo::EPagedOldestClean); |
408 iOldestCleanList.Add(&aPageInfo->iLink); |
379 iOldestCleanList.Add(&aPageInfo->iLink); |
409 ++iOldestCleanCount; |
380 ++iOldestCleanCount; |
410 #else |
|
411 aPageInfo->SetPagedState(SPageInfo::EPagedOld); |
|
412 iOldList.Add(&aPageInfo->iLink); |
|
413 ++iOldCount; |
|
414 #endif |
|
415 |
381 |
416 return KErrNone; |
382 return KErrNone; |
417 } |
383 } |
418 |
384 |
419 |
385 |
519 break; |
483 break; |
520 } |
484 } |
521 } |
485 } |
522 |
486 |
523 |
487 |
524 SPageInfo* DPager::StealOldestPage() |
488 TInt DPager::TryStealOldestPage(SPageInfo*& aPageInfoOut) |
525 { |
489 { |
526 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
490 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
527 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
491 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
528 |
492 |
|
493 // find oldest page in list... |
|
494 SDblQueLink* link; |
|
495 if (iOldestCleanCount) |
|
496 { |
|
497 __NK_ASSERT_DEBUG(!iOldestCleanList.IsEmpty()); |
|
498 link = iOldestCleanList.Last(); |
|
499 } |
|
500 else if (iOldestDirtyCount) |
|
501 { |
|
502 __NK_ASSERT_DEBUG(!iOldestDirtyList.IsEmpty()); |
|
503 link = iOldestDirtyList.Last(); |
|
504 } |
|
505 else if (iOldCount) |
|
506 { |
|
507 __NK_ASSERT_DEBUG(!iOldList.IsEmpty()); |
|
508 link = iOldList.Last(); |
|
509 } |
|
510 else |
|
511 { |
|
512 __NK_ASSERT_DEBUG(iYoungCount); |
|
513 __NK_ASSERT_ALWAYS(!iYoungList.IsEmpty()); |
|
514 link = iYoungList.Last(); |
|
515 } |
|
516 SPageInfo* pageInfo = SPageInfo::FromLink(link); |
|
517 |
|
518 if (pageInfo->IsDirty() && !PageCleaningLock::IsHeld()) |
|
519 return 1; |
|
520 |
|
521 // try to steal it from owning object... |
|
522 TInt r = StealPage(pageInfo); |
|
523 if (r == KErrNone) |
|
524 { |
|
525 BalanceAges(); |
|
526 aPageInfoOut = pageInfo; |
|
527 } |
|
528 |
|
529 return r; |
|
530 } |
|
531 |
|
532 |
|
533 SPageInfo* DPager::StealOldestPage() |
|
534 { |
|
535 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
536 TBool pageCleaningLockHeld = EFalse; |
529 for(;;) |
537 for(;;) |
530 { |
538 { |
531 // find oldest page in list... |
539 SPageInfo* pageInfo = NULL; |
532 SDblQueLink* link; |
540 TInt r = TryStealOldestPage(pageInfo); |
533 #ifdef _USE_OLDEST_LISTS |
541 |
534 if (iOldestCleanCount) |
542 if (r == KErrNone) |
535 { |
543 { |
536 __NK_ASSERT_DEBUG(!iOldestCleanList.IsEmpty()); |
544 if (pageCleaningLockHeld) |
537 link = iOldestCleanList.Last(); |
545 { |
538 } |
546 MmuLock::Unlock(); |
539 else if (iOldestDirtyCount) |
547 PageCleaningLock::Unlock(); |
540 { |
548 MmuLock::Lock(); |
541 __NK_ASSERT_DEBUG(!iOldestDirtyList.IsEmpty()); |
549 } |
542 link = iOldestDirtyList.Last(); |
550 return pageInfo; |
543 } |
551 } |
544 else if (iOldCount) |
552 else if (r == 1) |
|
553 { |
|
554 __NK_ASSERT_ALWAYS(!pageCleaningLockHeld); |
|
555 MmuLock::Unlock(); |
|
556 PageCleaningLock::Lock(); |
|
557 MmuLock::Lock(); |
|
558 pageCleaningLockHeld = ETrue; |
|
559 } |
|
560 // else retry... |
|
561 } |
|
562 } |
|
563 |
|
564 #ifdef __CPU_CACHE_HAS_COLOUR |
|
565 |
|
566 template <class T, TInt maxObjects> class TSequentialColourSelector |
|
567 { |
|
568 public: |
|
569 static const TInt KMaxLength = maxObjects; |
|
570 static const TInt KArrayLength = _ALIGN_UP(KMaxLength, KPageColourCount); |
|
571 |
|
572 FORCE_INLINE TSequentialColourSelector() |
|
573 { |
|
574 memclr(this, sizeof(*this)); |
|
575 } |
|
576 |
|
577 FORCE_INLINE TBool FoundLongestSequence() |
|
578 { |
|
579 return iLongestLength >= KMaxLength; |
|
580 } |
|
581 |
|
582 FORCE_INLINE void AddCandidate(T* aObject, TInt aColour) |
|
583 { |
|
584 // allocate objects to slots based on colour |
|
585 for (TInt i = aColour ; i < KArrayLength ; i += KPageColourCount) |
|
586 { |
|
587 if (!iSlot[i]) |
|
588 { |
|
589 iSlot[i] = aObject; |
|
590 iSeqLength[i] = i == 0 ? 1 : iSeqLength[i - 1] + 1; |
|
591 TInt j = i + 1; |
|
592 while(j < KArrayLength && iSeqLength[j]) |
|
593 iSeqLength[j++] += iSeqLength[i]; |
|
594 TInt currentLength = iSeqLength[j - 1]; |
|
595 if (currentLength > iLongestLength) |
|
596 { |
|
597 iLongestLength = currentLength; |
|
598 iLongestStart = j - currentLength; |
|
599 } |
|
600 break; |
|
601 } |
|
602 } |
|
603 } |
|
604 |
|
605 FORCE_INLINE TInt FindLongestRun(T** aObjectsOut) |
|
606 { |
|
607 if (iLongestLength == 0) |
|
608 return 0; |
|
609 |
|
610 if (iLongestLength < KMaxLength && iSlot[0] && iSlot[KArrayLength - 1]) |
|
611 { |
|
612 // check possibility of wrapping |
|
613 |
|
614 TInt i = 1; |
|
615 while (iSlot[i]) ++i; // find first hole |
|
616 TInt wrappedLength = iSeqLength[KArrayLength - 1] + iSeqLength[i - 1]; |
|
617 if (wrappedLength > iLongestLength) |
|
618 { |
|
619 iLongestLength = wrappedLength; |
|
620 iLongestStart = KArrayLength - iSeqLength[KArrayLength - 1]; |
|
621 } |
|
622 } |
|
623 |
|
624 iLongestLength = Min(iLongestLength, KMaxLength); |
|
625 |
|
626 __NK_ASSERT_DEBUG(iLongestStart >= 0 && iLongestStart < KArrayLength); |
|
627 __NK_ASSERT_DEBUG(iLongestStart + iLongestLength < 2 * KArrayLength); |
|
628 |
|
629 TInt len = Min(iLongestLength, KArrayLength - iLongestStart); |
|
630 wordmove(aObjectsOut, &iSlot[iLongestStart], len * sizeof(T*)); |
|
631 wordmove(aObjectsOut + len, &iSlot[0], (iLongestLength - len) * sizeof(T*)); |
|
632 |
|
633 return iLongestLength; |
|
634 } |
|
635 |
|
636 private: |
|
637 T* iSlot[KArrayLength]; |
|
638 TInt8 iSeqLength[KArrayLength]; |
|
639 TInt iLongestStart; |
|
640 TInt iLongestLength; |
|
641 }; |
|
642 |
|
643 TInt DPager::SelectPagesToClean(SPageInfo** aPageInfosOut) |
|
644 { |
|
645 // select up to KMaxPagesToClean oldest dirty pages with sequential page colours |
|
646 |
|
647 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
648 |
|
649 TSequentialColourSelector<SPageInfo, KMaxPagesToClean> selector; |
|
650 |
|
651 SDblQueLink* link = iOldestDirtyList.Last(); |
|
652 while (link != &iOldestDirtyList.iA) |
|
653 { |
|
654 SPageInfo* pi = SPageInfo::FromLink(link); |
|
655 if (!pi->IsWritable()) |
|
656 { |
|
657 // the page may be in the process of being restricted, stolen or decommitted, but don't |
|
658 // check for this as it will occur infrequently and will be detected by CheckModified |
|
659 // anyway |
|
660 TInt colour = pi->Index() & KPageColourMask; |
|
661 selector.AddCandidate(pi, colour); |
|
662 if (selector.FoundLongestSequence()) |
|
663 break; |
|
664 } |
|
665 link = link->iPrev; |
|
666 } |
|
667 |
|
668 return selector.FindLongestRun(aPageInfosOut); |
|
669 } |
|
670 |
545 #else |
671 #else |
546 if (iOldCount) |
672 |
|
673 TInt DPager::SelectPagesToClean(SPageInfo** aPageInfosOut) |
|
674 { |
|
675 // no page colouring restrictions, so just take up to KMaxPagesToClean oldest dirty pages |
|
676 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
677 TInt pageCount = 0; |
|
678 SDblQueLink* link = iOldestDirtyList.Last(); |
|
679 while (link != &iOldestDirtyList.iA && pageCount < KMaxPagesToClean) |
|
680 { |
|
681 SPageInfo* pi = SPageInfo::FromLink(link); |
|
682 if (!pi->IsWritable()) |
|
683 { |
|
684 // the page may be in the process of being restricted, stolen or decommitted, but don't |
|
685 // check for this as it will occur infrequently and will be detected by CheckModified |
|
686 // anyway |
|
687 aPageInfosOut[pageCount++] = pi; |
|
688 } |
|
689 link = link->iPrev; |
|
690 } |
|
691 return pageCount; |
|
692 } |
|
693 |
547 #endif |
694 #endif |
548 { |
695 |
549 __NK_ASSERT_DEBUG(!iOldList.IsEmpty()); |
696 |
550 link = iOldList.Last(); |
697 TInt DPager::CleanSomePages(TBool aBackground) |
551 } |
698 { |
552 else |
699 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
553 { |
700 __NK_ASSERT_DEBUG(PageCleaningLock::IsHeld()); |
554 __NK_ASSERT_DEBUG(iYoungCount); |
701 // ram alloc lock may or may not be held |
555 __NK_ASSERT_ALWAYS(!iYoungList.IsEmpty()); |
702 |
556 link = iYoungList.Last(); |
703 SPageInfo* pageInfos[KMaxPagesToClean]; |
557 } |
704 TInt pageCount = SelectPagesToClean(&pageInfos[0]); |
558 SPageInfo* pageInfo = SPageInfo::FromLink(link); |
705 |
559 |
706 if (pageCount == 0) |
560 // steal it from owning object... |
707 return 0; |
561 TInt r = StealPage(pageInfo); |
708 |
562 |
709 TheDataPagedMemoryManager->CleanPages(pageCount, pageInfos, aBackground); |
563 BalanceAges(); |
710 |
564 |
711 for (TInt i = 0 ; i < pageCount ; ++i) |
565 if(r==KErrNone) |
712 { |
566 return pageInfo; // done |
713 SPageInfo* pi = pageInfos[i]; |
567 |
714 if (pi) |
568 // loop back and try again |
715 { |
569 } |
716 __NK_ASSERT_DEBUG(pi->PagedState() == SPageInfo::EPagedOldestDirty && iOldestDirtyCount); |
|
717 __NK_ASSERT_DEBUG(!pi->IsDirty() && !pi->IsWritable()); |
|
718 |
|
719 pi->iLink.Deque(); |
|
720 iOldestCleanList.AddHead(&pi->iLink); |
|
721 --iOldestDirtyCount; |
|
722 ++iOldestCleanCount; |
|
723 pi->SetPagedState(SPageInfo::EPagedOldestClean); |
|
724 } |
|
725 } |
|
726 |
|
727 return pageCount; |
|
728 } |
|
729 |
|
730 |
|
731 TBool DPager::HasPagesToClean() |
|
732 { |
|
733 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
734 return iOldestDirtyCount > 0; |
570 } |
735 } |
571 |
736 |
572 |
737 |
573 TInt DPager::RestrictPage(SPageInfo* aPageInfo, TRestrictPagesType aRestriction) |
738 TInt DPager::RestrictPage(SPageInfo* aPageInfo, TRestrictPagesType aRestriction) |
574 { |
739 { |
645 TRACE(("DPager::StealPage returns %d",r)); |
810 TRACE(("DPager::StealPage returns %d",r)); |
646 return r; |
811 return r; |
647 } |
812 } |
648 |
813 |
649 |
814 |
|
815 TInt DPager::DiscardAndAllocPage(SPageInfo* aPageInfo, TZonePageType aPageType) |
|
816 { |
|
817 TInt r = DiscardPage(aPageInfo, KRamZoneInvalidId, EFalse); |
|
818 if (r == KErrNone) |
|
819 { |
|
820 TheMmu.MarkPageAllocated(aPageInfo->PhysAddr(), aPageType); |
|
821 } |
|
822 // Flash the ram alloc lock as we may have had to write a page out to swap. |
|
823 RamAllocLock::Unlock(); |
|
824 RamAllocLock::Lock(); |
|
825 return r; |
|
826 } |
|
827 |
|
828 |
|
829 static TBool DiscardCanStealPage(SPageInfo* aOldPageInfo, TBool aBlockRest) |
|
830 { |
|
831 // If the page is pinned or if the page is dirty and a general defrag is being performed then |
|
832 // don't attempt to steal it |
|
833 return aOldPageInfo->Type() == SPageInfo::EUnused || |
|
834 (aOldPageInfo->PagedState() != SPageInfo::EPagedPinned && (!aBlockRest || !aOldPageInfo->IsDirty())); |
|
835 } |
|
836 |
|
837 |
650 TInt DPager::DiscardPage(SPageInfo* aOldPageInfo, TUint aBlockZoneId, TBool aBlockRest) |
838 TInt DPager::DiscardPage(SPageInfo* aOldPageInfo, TUint aBlockZoneId, TBool aBlockRest) |
651 { |
839 { |
|
840 // todo: assert MmuLock not released |
|
841 |
|
842 TRACE(("> DPager::DiscardPage %08x", aOldPageInfo)); |
|
843 |
652 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
844 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
653 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
845 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
654 |
846 |
655 TInt r; |
847 if (!DiscardCanStealPage(aOldPageInfo, aBlockRest)) |
656 // If the page is pinned or if the page is dirty and a general defrag is being |
848 { |
657 // performed then don't attempt to steal it. |
849 // The page is pinned or is dirty and this is a general defrag so move the page. |
658 if (aOldPageInfo->Type() != SPageInfo::EUnused && |
|
659 (aOldPageInfo->PagedState() == SPageInfo::EPagedPinned || |
|
660 (aBlockRest && aOldPageInfo->IsDirty()))) |
|
661 {// The page is pinned or is dirty and this is a general defrag so move the page. |
|
662 DMemoryObject* memory = aOldPageInfo->Owner(); |
850 DMemoryObject* memory = aOldPageInfo->Owner(); |
663 // Page must be managed if it is pinned or dirty. |
851 // Page must be managed if it is pinned or dirty. |
664 __NK_ASSERT_DEBUG(aOldPageInfo->Type()==SPageInfo::EManaged); |
852 __NK_ASSERT_DEBUG(aOldPageInfo->Type()==SPageInfo::EManaged); |
665 __NK_ASSERT_DEBUG(memory); |
853 __NK_ASSERT_DEBUG(memory); |
666 MmuLock::Unlock(); |
854 MmuLock::Unlock(); |
667 TPhysAddr newAddr; |
855 TPhysAddr newAddr; |
668 return memory->iManager->MovePage(memory, aOldPageInfo, newAddr, aBlockZoneId, aBlockRest); |
856 TRACE2(("DPager::DiscardPage delegating pinned/dirty page to manager")); |
669 } |
857 TInt r = memory->iManager->MovePage(memory, aOldPageInfo, newAddr, aBlockZoneId, aBlockRest); |
670 |
858 TRACE(("< DPager::DiscardPage %d", r)); |
671 if (!iNumberOfFreePages) |
859 return r; |
672 { |
860 } |
673 // Allocate a new page for the live list as it has reached its minimum size. |
861 |
|
862 TInt r = KErrNone; |
|
863 SPageInfo* newPageInfo = NULL; |
|
864 TBool havePageCleaningLock = EFalse; |
|
865 |
|
866 TBool needNewPage; |
|
867 TBool needPageCleaningLock; |
|
868 while(needNewPage = (iNumberOfFreePages == 0 && newPageInfo == NULL), |
|
869 needPageCleaningLock = (aOldPageInfo->IsDirty() && !havePageCleaningLock), |
|
870 needNewPage || needPageCleaningLock) |
|
871 { |
674 MmuLock::Unlock(); |
872 MmuLock::Unlock(); |
675 SPageInfo* newPageInfo = GetPageFromSystem((Mmu::TRamAllocFlags)(EMemAttNormalCached|Mmu::EAllocNoWipe), |
873 |
676 aBlockZoneId, aBlockRest); |
874 if (needNewPage) |
677 if (!newPageInfo) |
875 { |
678 return KErrNoMemory; |
876 // Allocate a new page for the live list as it has reached its minimum size. |
|
877 TUint flags = EMemAttNormalCached | Mmu::EAllocNoWipe; |
|
878 newPageInfo = GetPageFromSystem((Mmu::TRamAllocFlags)flags, aBlockZoneId, aBlockRest); |
|
879 if (!newPageInfo) |
|
880 { |
|
881 TRACE(("< DPager::DiscardPage KErrNoMemory")); |
|
882 r = KErrNoMemory; |
|
883 MmuLock::Lock(); |
|
884 break; |
|
885 } |
|
886 } |
|
887 |
|
888 if (needPageCleaningLock) |
|
889 { |
|
890 // Acquire the page cleaning mutex so StealPage can clean it |
|
891 PageCleaningLock::Lock(); |
|
892 havePageCleaningLock = ETrue; |
|
893 } |
679 |
894 |
680 // Re-acquire the mmulock and re-check that the page is not pinned or dirty. |
895 // Re-acquire the mmulock and re-check that the page is not pinned or dirty. |
681 MmuLock::Lock(); |
896 MmuLock::Lock(); |
682 if (aOldPageInfo->Type() != SPageInfo::EUnused && |
897 if (!DiscardCanStealPage(aOldPageInfo, aBlockRest)) |
683 (aOldPageInfo->PagedState() == SPageInfo::EPagedPinned || |
898 { |
684 (aBlockRest && aOldPageInfo->IsDirty()))) |
899 // Page is now pinned or dirty so give up as it is in use. |
685 {// Page is now pinned or dirty so give up as it is inuse. |
900 r = KErrInUse; |
686 ReturnPageToSystem(*newPageInfo); |
901 break; |
687 MmuLock::Unlock(); |
902 } |
688 return KErrInUse; |
903 } |
689 } |
904 |
690 |
905 if (r == KErrNone) |
|
906 { |
691 // Attempt to steal the page |
907 // Attempt to steal the page |
692 r = StealPage(aOldPageInfo); |
908 r = StealPage(aOldPageInfo); // temporarily releases MmuLock if page is dirty |
693 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
909 } |
694 |
910 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
695 if (r == KErrCompletion) |
911 |
696 {// This was a page table that has been freed but added to the |
912 if (r == KErrCompletion) |
697 // live list as a free page. Remove from live list and continue. |
913 {// This was a page table that has been freed but added to the |
698 __NK_ASSERT_DEBUG(!aOldPageInfo->IsDirty()); |
914 // live list as a free page. Remove from live list and continue. |
699 RemovePage(aOldPageInfo); |
915 __NK_ASSERT_DEBUG(!aOldPageInfo->IsDirty()); |
700 r = KErrNone; |
916 RemovePage(aOldPageInfo); |
701 } |
917 r = KErrNone; |
702 |
918 } |
703 if (r == KErrNone) |
919 |
704 {// Add the new page to the live list as discarding the old page |
920 if (r == KErrNone && iNumberOfFreePages == 0) |
705 // will reduce the live list below the minimum. |
921 { |
|
922 if (newPageInfo) |
|
923 { |
|
924 // Add a new page to the live list if we have one as discarding the old page will reduce |
|
925 // the live list below the minimum. |
706 AddAsFreePage(newPageInfo); |
926 AddAsFreePage(newPageInfo); |
707 // We've successfully discarded the page so return it to the free pool. |
927 newPageInfo = NULL; |
708 ReturnPageToSystem(*aOldPageInfo); |
928 } |
709 BalanceAges(); |
929 else |
710 } |
930 { |
711 else |
931 // Otherwise the live list shrank when page was being cleaned so have to give up |
712 { |
932 AddAsFreePage(aOldPageInfo); |
713 // New page not required so just return it to the system. This is safe as |
933 BalanceAges(); // temporarily releases MmuLock |
714 // iNumberOfFreePages will have this page counted but as it is not on the live list |
934 r = KErrInUse; |
715 // noone else can touch it. |
935 } |
716 ReturnPageToSystem(*newPageInfo); |
936 } |
717 } |
937 |
718 } |
938 if (r == KErrNone) |
719 else |
939 { |
720 { |
940 // We've successfully discarded the page and ensured the live list is large enough, so |
721 // Attempt to steal the page |
941 // return it to the free pool. |
722 r = StealPage(aOldPageInfo); |
942 ReturnPageToSystem(*aOldPageInfo); // temporarily releases MmuLock |
723 |
943 BalanceAges(); // temporarily releases MmuLock |
724 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
944 } |
725 |
945 |
726 if (r == KErrCompletion) |
946 if (newPageInfo) |
727 {// This was a page table that has been freed but added to the |
947 { |
728 // live list as a free page. Remove from live list. |
948 // New page not required so just return it to the system. This is safe as |
729 __NK_ASSERT_DEBUG(!aOldPageInfo->IsDirty()); |
949 // iNumberOfFreePages will have this page counted but as it is not on the live list noone |
730 RemovePage(aOldPageInfo); |
950 // else can touch it. |
731 r = KErrNone; |
951 if (iNumberOfFreePages == 0) |
732 } |
952 AddAsFreePage(newPageInfo); |
733 |
953 else |
734 if (r == KErrNone) |
954 ReturnPageToSystem(*newPageInfo); // temporarily releases MmuLock |
735 {// We've successfully discarded the page so return it to the free pool. |
955 } |
736 ReturnPageToSystem(*aOldPageInfo); |
956 |
737 BalanceAges(); |
957 if (havePageCleaningLock) |
738 } |
958 { |
739 } |
959 // Release the page cleaning mutex |
|
960 MmuLock::Unlock(); |
|
961 PageCleaningLock::Unlock(); |
|
962 MmuLock::Lock(); |
|
963 } |
|
964 |
740 MmuLock::Unlock(); |
965 MmuLock::Unlock(); |
|
966 TRACE(("< DPager::DiscardPage returns %d", r)); |
741 return r; |
967 return r; |
742 } |
968 } |
743 |
969 |
744 |
970 |
745 TBool DPager::TryGrowLiveList() |
971 TBool DPager::TryGrowLiveList() |
841 if(pageInfo) |
1064 if(pageInfo) |
842 goto done; |
1065 goto done; |
843 MmuLock::Lock(); |
1066 MmuLock::Lock(); |
844 } |
1067 } |
845 |
1068 |
|
1069 // try stealing a clean page... |
|
1070 if (iOldestCleanCount) |
|
1071 goto try_steal_oldest_page; |
|
1072 |
|
1073 // see if we can clean multiple dirty pages in one go... |
|
1074 if (KMaxPagesToClean > 1 && iOldestDirtyCount > 1) |
|
1075 { |
|
1076 // if we don't hold the page cleaning mutex then temporarily release ram alloc mutex and |
|
1077 // acquire page cleaning mutex; if we hold it already just proceed |
|
1078 if (!pageCleaningLockHeld) |
|
1079 { |
|
1080 MmuLock::Unlock(); |
|
1081 RamAllocLock::Unlock(); |
|
1082 PageCleaningLock::Lock(); |
|
1083 MmuLock::Lock(); |
|
1084 } |
|
1085 |
|
1086 // there may be clean pages now if we've waited on the page cleaning mutex, if so don't |
|
1087 // bother cleaning but just restart |
|
1088 if (iOldestCleanCount == 0) |
|
1089 CleanSomePages(EFalse); |
|
1090 |
|
1091 if (!pageCleaningLockHeld) |
|
1092 { |
|
1093 MmuLock::Unlock(); |
|
1094 PageCleaningLock::Unlock(); |
|
1095 RamAllocLock::Lock(); |
|
1096 MmuLock::Lock(); |
|
1097 } |
|
1098 |
|
1099 if (iOldestCleanCount > 0) |
|
1100 goto find_a_page; |
|
1101 } |
|
1102 |
846 // as a last resort, steal a page from the live list... |
1103 // as a last resort, steal a page from the live list... |
847 get_oldest: |
1104 |
848 #ifdef _USE_OLDEST_LISTS |
1105 try_steal_oldest_page: |
849 __NK_ASSERT_ALWAYS(iOldestCleanCount|iOldestDirtyCount|iOldCount|iYoungCount); |
1106 __NK_ASSERT_ALWAYS(iOldestCleanCount|iOldestDirtyCount|iOldCount|iYoungCount); |
850 #else |
1107 r = TryStealOldestPage(pageInfo); |
851 __NK_ASSERT_ALWAYS(iOldCount|iYoungCount); |
1108 // if this fails we restart whole process |
852 #endif |
1109 if (r < KErrNone) |
853 pageInfo = StealOldestPage(); |
1110 goto find_a_page; |
|
1111 |
|
1112 // if we need to clean, acquire page cleaning mutex for life of this function |
|
1113 if (r == 1) |
|
1114 { |
|
1115 __NK_ASSERT_ALWAYS(!pageCleaningLockHeld); |
|
1116 MmuLock::Unlock(); |
|
1117 PageCleaningLock::Lock(); |
|
1118 MmuLock::Lock(); |
|
1119 pageCleaningLockHeld = ETrue; |
|
1120 goto find_a_page; |
|
1121 } |
|
1122 |
|
1123 // otherwise we're done! |
|
1124 __NK_ASSERT_DEBUG(r == KErrNone); |
854 MmuLock::Unlock(); |
1125 MmuLock::Unlock(); |
855 |
1126 |
856 // make page state same as a freshly allocated page... |
1127 // make page state same as a freshly allocated page... |
857 pagePhys = pageInfo->PhysAddr(); |
1128 pagePhys = pageInfo->PhysAddr(); |
858 TheMmu.PagesAllocated(&pagePhys,1,aAllocFlags); |
1129 TheMmu.PagesAllocated(&pagePhys,1,aAllocFlags); |
859 |
1130 |
860 done: |
1131 done: |
|
1132 if (pageCleaningLockHeld) |
|
1133 PageCleaningLock::Unlock(); |
861 RamAllocLock::Unlock(); |
1134 RamAllocLock::Unlock(); |
|
1135 |
862 return pageInfo; |
1136 return pageInfo; |
863 } |
1137 } |
864 |
1138 |
865 |
1139 |
866 TBool DPager::GetFreePages(TInt aNumPages) |
1140 TBool DPager::GetFreePages(TInt aNumPages) |
2094 #ifdef __DEMAND_PAGING_BENCHMARKS__ |
2347 #ifdef __DEMAND_PAGING_BENCHMARKS__ |
2095 |
2348 |
2096 void DPager::ResetBenchmarkData(TPagingBenchmark aBm) |
2349 void DPager::ResetBenchmarkData(TPagingBenchmark aBm) |
2097 { |
2350 { |
2098 SPagingBenchmarkInfo& info = iBenchmarkInfo[aBm]; |
2351 SPagingBenchmarkInfo& info = iBenchmarkInfo[aBm]; |
|
2352 __SPIN_LOCK_IRQ(iBenchmarkLock); |
2099 info.iCount = 0; |
2353 info.iCount = 0; |
2100 info.iTotalTime = 0; |
2354 info.iTotalTime = 0; |
2101 info.iMaxTime = 0; |
2355 info.iMaxTime = 0; |
2102 info.iMinTime = KMaxTInt; |
2356 info.iMinTime = KMaxTInt; |
|
2357 __SPIN_UNLOCK_IRQ(iBenchmarkLock); |
2103 } |
2358 } |
2104 |
2359 |
2105 void DPager::RecordBenchmarkData(TPagingBenchmark aBm, TUint32 aStartTime, TUint32 aEndTime) |
2360 void DPager::RecordBenchmarkData(TPagingBenchmark aBm, TUint32 aStartTime, TUint32 aEndTime, TUint aCount) |
2106 { |
2361 { |
2107 SPagingBenchmarkInfo& info = iBenchmarkInfo[aBm]; |
2362 SPagingBenchmarkInfo& info = iBenchmarkInfo[aBm]; |
2108 ++info.iCount; |
|
2109 #if !defined(HIGH_RES_TIMER) || defined(HIGH_RES_TIMER_COUNTS_UP) |
2363 #if !defined(HIGH_RES_TIMER) || defined(HIGH_RES_TIMER_COUNTS_UP) |
2110 TInt64 elapsed = aEndTime - aStartTime; |
2364 TInt64 elapsed = aEndTime - aStartTime; |
2111 #else |
2365 #else |
2112 TInt64 elapsed = aStartTime - aEndTime; |
2366 TInt64 elapsed = aStartTime - aEndTime; |
2113 #endif |
2367 #endif |
|
2368 __SPIN_LOCK_IRQ(iBenchmarkLock); |
|
2369 info.iCount += aCount; |
2114 info.iTotalTime += elapsed; |
2370 info.iTotalTime += elapsed; |
2115 if (elapsed > info.iMaxTime) |
2371 if (elapsed > info.iMaxTime) |
2116 info.iMaxTime = elapsed; |
2372 info.iMaxTime = elapsed; |
2117 if (elapsed < info.iMinTime) |
2373 if (elapsed < info.iMinTime) |
2118 info.iMinTime = elapsed; |
2374 info.iMinTime = elapsed; |
|
2375 __SPIN_UNLOCK_IRQ(iBenchmarkLock); |
2119 } |
2376 } |
|
2377 |
|
2378 void DPager::ReadBenchmarkData(TPagingBenchmark aBm, SPagingBenchmarkInfo& aDataOut) |
|
2379 { |
|
2380 __SPIN_LOCK_IRQ(iBenchmarkLock); |
|
2381 aDataOut = iBenchmarkInfo[aBm]; |
|
2382 __SPIN_UNLOCK_IRQ(iBenchmarkLock); |
|
2383 } |
2120 |
2384 |
2121 #endif //__DEMAND_PAGING_BENCHMARKS__ |
2385 #endif //__DEMAND_PAGING_BENCHMARKS__ |
2122 |
2386 |
2123 |
2387 |
2124 // |
2388 // |
2127 |
2391 |
2128 // |
2392 // |
2129 // DPagingRequest |
2393 // DPagingRequest |
2130 // |
2394 // |
2131 |
2395 |
2132 DPagingRequest::DPagingRequest(DPagingRequestPool::TGroup& aPoolGroup) |
2396 DPagingRequest::DPagingRequest() |
2133 : iPoolGroup(aPoolGroup), iUseRegionMemory(0), iUseRegionIndex(0), iUseRegionCount(0) |
2397 : iMutex(NULL), iUseRegionCount(0) |
2134 { |
2398 { |
2135 } |
2399 } |
2136 |
2400 |
2137 |
2401 |
2138 FORCE_INLINE void DPagingRequest::SetUse(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
2402 void DPagingRequest::SetUseContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
2139 { |
2403 { |
2140 __ASSERT_SYSTEM_LOCK; |
2404 __ASSERT_SYSTEM_LOCK; |
2141 iUseRegionMemory = aMemory; |
2405 __NK_ASSERT_DEBUG(iUseRegionCount == 0); |
2142 iUseRegionIndex = aIndex; |
2406 __NK_ASSERT_DEBUG(aCount > 0 && aCount <= EMaxPages); |
|
2407 for (TUint i = 0 ; i < aCount ; ++i) |
|
2408 { |
|
2409 iUseRegionMemory[i] = aMemory; |
|
2410 iUseRegionIndex[i] = aIndex + i; |
|
2411 } |
2143 iUseRegionCount = aCount; |
2412 iUseRegionCount = aCount; |
2144 } |
2413 } |
2145 |
2414 |
2146 |
2415 |
2147 TBool DPagingRequest::CheckUse(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
2416 void DPagingRequest::SetUseDiscontiguous(DMemoryObject** aMemory, TUint* aIndex, TUint aCount) |
2148 { |
2417 { |
2149 return aMemory==iUseRegionMemory |
2418 __ASSERT_SYSTEM_LOCK; |
2150 && TUint(aIndex-iUseRegionIndex) < iUseRegionCount |
2419 __NK_ASSERT_DEBUG(iUseRegionCount == 0); |
2151 && TUint(iUseRegionCount-TUint(aIndex-iUseRegionIndex)) <= aCount; |
2420 __NK_ASSERT_DEBUG(aCount > 0 && aCount <= EMaxPages); |
2152 } |
2421 for (TUint i = 0 ; i < aCount ; ++i) |
2153 |
2422 { |
2154 |
2423 iUseRegionMemory[i] = aMemory[i]; |
2155 void DPagingRequest::Release() |
2424 iUseRegionIndex[i] = aIndex[i]; |
|
2425 } |
|
2426 iUseRegionCount = aCount; |
|
2427 } |
|
2428 |
|
2429 |
|
2430 void DPagingRequest::ResetUse() |
|
2431 { |
|
2432 __ASSERT_SYSTEM_LOCK; |
|
2433 __NK_ASSERT_DEBUG(iUseRegionCount > 0); |
|
2434 iUseRegionCount = 0; |
|
2435 } |
|
2436 |
|
2437 |
|
2438 TBool DPagingRequest::CheckUseContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
|
2439 { |
|
2440 if (iUseRegionCount != aCount) |
|
2441 return EFalse; |
|
2442 for (TUint i = 0 ; i < iUseRegionCount ; ++i) |
|
2443 { |
|
2444 if (iUseRegionMemory[i] != aMemory || iUseRegionIndex[i] != aIndex + i) |
|
2445 return EFalse; |
|
2446 } |
|
2447 return ETrue; |
|
2448 } |
|
2449 |
|
2450 |
|
2451 TBool DPagingRequest::CheckUseDiscontiguous(DMemoryObject** aMemory, TUint* aIndex, TUint aCount) |
|
2452 { |
|
2453 if (iUseRegionCount != aCount) |
|
2454 return EFalse; |
|
2455 for (TUint i = 0 ; i < iUseRegionCount ; ++i) |
|
2456 { |
|
2457 if (iUseRegionMemory[i] != aMemory[i] || iUseRegionIndex[i] != aIndex[i]) |
|
2458 return EFalse; |
|
2459 } |
|
2460 return ETrue; |
|
2461 } |
|
2462 |
|
2463 |
|
2464 TBool DPagingRequest::IsCollisionContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
|
2465 { |
|
2466 // note this could be optimised as most of the time we will be checking read/read collusions, |
|
2467 // both of which will be contiguous |
|
2468 __ASSERT_SYSTEM_LOCK; |
|
2469 for (TUint i = 0 ; i < iUseRegionCount ; ++i) |
|
2470 { |
|
2471 if (iUseRegionMemory[i] == aMemory && |
|
2472 TUint(iUseRegionIndex[i] - aIndex) < aCount) |
|
2473 return ETrue; |
|
2474 } |
|
2475 return EFalse; |
|
2476 } |
|
2477 |
|
2478 |
|
2479 TLinAddr DPagingRequest::MapPages(TUint aColour, TUint aCount, TPhysAddr* aPages) |
|
2480 { |
|
2481 __NK_ASSERT_DEBUG(iMutex->iCleanup.iThread == &Kern::CurrentThread()); |
|
2482 return iTempMapping.Map(aPages,aCount,aColour); |
|
2483 } |
|
2484 |
|
2485 |
|
2486 void DPagingRequest::UnmapPages(TBool aIMBRequired) |
|
2487 { |
|
2488 __NK_ASSERT_DEBUG(iMutex->iCleanup.iThread == &Kern::CurrentThread()); |
|
2489 iTempMapping.Unmap(aIMBRequired); |
|
2490 } |
|
2491 |
|
2492 // |
|
2493 // DPoolPagingRequest |
|
2494 // |
|
2495 |
|
2496 DPoolPagingRequest::DPoolPagingRequest(DPagingRequestPool::TGroup& aPoolGroup) : |
|
2497 iPoolGroup(aPoolGroup) |
|
2498 { |
|
2499 } |
|
2500 |
|
2501 |
|
2502 void DPoolPagingRequest::Release() |
2156 { |
2503 { |
2157 NKern::LockSystem(); |
2504 NKern::LockSystem(); |
2158 SetUse(0,0,0); |
2505 ResetUse(); |
2159 Signal(); |
2506 Signal(); |
2160 } |
2507 } |
2161 |
2508 |
2162 |
2509 |
2163 void DPagingRequest::Wait() |
2510 void DPoolPagingRequest::Wait() |
2164 { |
2511 { |
2165 __ASSERT_SYSTEM_LOCK; |
2512 __ASSERT_SYSTEM_LOCK; |
2166 ++iUsageCount; |
2513 ++iUsageCount; |
2167 TInt r = iMutex->Wait(); |
2514 TInt r = iMutex->Wait(); |
2168 __NK_ASSERT_ALWAYS(r == KErrNone); |
2515 __NK_ASSERT_ALWAYS(r == KErrNone); |
2169 } |
2516 } |
2170 |
2517 |
2171 |
2518 |
2172 void DPagingRequest::Signal() |
2519 void DPoolPagingRequest::Signal() |
2173 { |
2520 { |
2174 __ASSERT_SYSTEM_LOCK; |
2521 __ASSERT_SYSTEM_LOCK; |
2175 iPoolGroup.Signal(this); |
2522 iPoolGroup.Signal(this); |
2176 } |
2523 } |
2177 |
|
2178 |
|
2179 FORCE_INLINE TBool DPagingRequest::IsCollision(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
|
2180 { |
|
2181 __ASSERT_SYSTEM_LOCK; |
|
2182 DMemoryObject* memory = iUseRegionMemory; |
|
2183 TUint index = iUseRegionIndex; |
|
2184 TUint count = iUseRegionCount; |
|
2185 // note, this comparison would fail if either region includes page number KMaxTUint, |
|
2186 // but it isn't possible to create a memory object which is > KMaxTUint pages... |
|
2187 return (memory == aMemory) && ((index + count) > aIndex) && (index < (aIndex + aCount)); |
|
2188 } |
|
2189 |
|
2190 |
|
2191 TLinAddr DPagingRequest::MapPages(TUint aColour, TUint aCount, TPhysAddr* aPages) |
|
2192 { |
|
2193 __NK_ASSERT_DEBUG(iMutex->iCleanup.iThread == &Kern::CurrentThread()); |
|
2194 return iTempMapping.Map(aPages,aCount,aColour); |
|
2195 } |
|
2196 |
|
2197 |
|
2198 void DPagingRequest::UnmapPages(TBool aIMBRequired) |
|
2199 { |
|
2200 __NK_ASSERT_DEBUG(iMutex->iCleanup.iThread == &Kern::CurrentThread()); |
|
2201 iTempMapping.Unmap(aIMBRequired); |
|
2202 } |
|
2203 |
|
2204 |
2524 |
2205 // |
2525 // |
2206 // DPageReadRequest |
2526 // DPageReadRequest |
2207 // |
2527 // |
2208 |
2528 |
2209 TInt DPageReadRequest::iAllocNext = 0; |
2529 TInt DPageReadRequest::iAllocNext = 0; |
|
2530 |
|
2531 DPageReadRequest::DPageReadRequest(DPagingRequestPool::TGroup& aPoolGroup) : |
|
2532 DPoolPagingRequest(aPoolGroup) |
|
2533 { |
|
2534 // allocate space for mapping pages whilst they're being loaded... |
|
2535 iTempMapping.Alloc(EMaxPages); |
|
2536 } |
2210 |
2537 |
2211 TInt DPageReadRequest::Construct() |
2538 TInt DPageReadRequest::Construct() |
2212 { |
2539 { |
2213 // allocate id and mutex... |
2540 // allocate id and mutex... |
2214 TUint id = (TUint)__e32_atomic_add_ord32(&iAllocNext, 1); |
2541 TUint id = (TUint)__e32_atomic_add_ord32(&iAllocNext, 1); |
2246 |
2570 |
2247 // |
2571 // |
2248 // DPageWriteRequest |
2572 // DPageWriteRequest |
2249 // |
2573 // |
2250 |
2574 |
2251 TInt DPageWriteRequest::iAllocNext = 0; |
2575 |
2252 |
2576 DPageWriteRequest::DPageWriteRequest() |
2253 TInt DPageWriteRequest::Construct() |
2577 { |
2254 { |
2578 iMutex = ThePageCleaningLock; |
2255 // allocate id and mutex... |
|
2256 TUint id = (TUint)__e32_atomic_add_ord32(&iAllocNext, 1); |
|
2257 _LIT(KLitPagingRequest,"PageWriteRequest-"); |
|
2258 TBuf<sizeof("PageWriteRequest-")+10> mutexName(KLitPagingRequest); |
|
2259 mutexName.AppendNum(id); |
|
2260 TInt r = K::MutexCreate(iMutex, mutexName, NULL, EFalse, KMutexOrdPageOut); |
|
2261 if(r!=KErrNone) |
|
2262 return r; |
|
2263 |
|
2264 // allocate space for mapping pages whilst they're being loaded... |
2579 // allocate space for mapping pages whilst they're being loaded... |
2265 iTempMapping.Alloc(EMaxPages); |
2580 iTempMapping.Alloc(KMaxPagesToClean); |
2266 |
2581 } |
2267 return r; |
2582 |
|
2583 |
|
2584 void DPageWriteRequest::Release() |
|
2585 { |
|
2586 NKern::LockSystem(); |
|
2587 ResetUse(); |
|
2588 NKern::UnlockSystem(); |
2268 } |
2589 } |
2269 |
2590 |
2270 |
2591 |
2271 // |
2592 // |
2272 // DPagingRequestPool |
2593 // DPagingRequestPool |
2273 // |
2594 // |
2274 |
2595 |
2275 DPagingRequestPool::DPagingRequestPool(TUint aNumPageReadRequest,TUint aNumPageWriteRequest) |
2596 DPagingRequestPool::DPagingRequestPool(TUint aNumPageReadRequest, TBool aWriteRequest) |
2276 : iPageReadRequests(aNumPageReadRequest), iPageWriteRequests(aNumPageWriteRequest) |
2597 : iPageReadRequests(aNumPageReadRequest) |
2277 { |
2598 { |
2278 TUint i; |
2599 TUint i; |
2279 |
|
2280 for(i=0; i<aNumPageReadRequest; ++i) |
2600 for(i=0; i<aNumPageReadRequest; ++i) |
2281 { |
2601 { |
2282 DPageReadRequest* req = new DPageReadRequest(iPageReadRequests); |
2602 DPageReadRequest* req = new DPageReadRequest(iPageReadRequests); |
2283 __NK_ASSERT_ALWAYS(req); |
2603 __NK_ASSERT_ALWAYS(req); |
2284 TInt r = req->Construct(); |
2604 TInt r = req->Construct(); |
2285 __NK_ASSERT_ALWAYS(r==KErrNone); |
2605 __NK_ASSERT_ALWAYS(r==KErrNone); |
2286 iPageReadRequests.iRequests[i] = req; |
2606 iPageReadRequests.iRequests[i] = req; |
2287 iPageReadRequests.iFreeList.Add(req); |
2607 iPageReadRequests.iFreeList.Add(req); |
2288 } |
2608 } |
2289 |
2609 |
2290 for(i=0; i<aNumPageWriteRequest; ++i) |
2610 if (aWriteRequest) |
2291 { |
2611 { |
2292 DPageWriteRequest* req = new DPageWriteRequest(iPageWriteRequests); |
2612 iPageWriteRequest = new DPageWriteRequest(); |
2293 __NK_ASSERT_ALWAYS(req); |
2613 __NK_ASSERT_ALWAYS(iPageWriteRequest); |
2294 TInt r = req->Construct(); |
|
2295 __NK_ASSERT_ALWAYS(r==KErrNone); |
|
2296 iPageWriteRequests.iRequests[i] = req; |
|
2297 iPageWriteRequests.iFreeList.Add(req); |
|
2298 } |
2614 } |
2299 } |
2615 } |
2300 |
2616 |
2301 |
2617 |
2302 DPagingRequestPool::~DPagingRequestPool() |
2618 DPagingRequestPool::~DPagingRequestPool() |
2307 |
2623 |
2308 DPageReadRequest* DPagingRequestPool::AcquirePageReadRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
2624 DPageReadRequest* DPagingRequestPool::AcquirePageReadRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
2309 { |
2625 { |
2310 NKern::LockSystem(); |
2626 NKern::LockSystem(); |
2311 |
2627 |
2312 DPagingRequest* req; |
2628 DPoolPagingRequest* req; |
2313 |
2629 |
2314 // if we collide with page write operation... |
2630 // check for collision with existing write |
2315 req = iPageWriteRequests.FindCollision(aMemory,aIndex,aCount); |
2631 if(iPageWriteRequest && iPageWriteRequest->IsCollisionContiguous(aMemory,aIndex,aCount)) |
2316 if(req) |
2632 { |
2317 { |
2633 NKern::UnlockSystem(); |
2318 // wait until write completes... |
2634 PageCleaningLock::Lock(); |
2319 req->Wait(); |
2635 PageCleaningLock::Unlock(); |
2320 req->Signal(); |
|
2321 return 0; // caller expected to retry if needed |
2636 return 0; // caller expected to retry if needed |
2322 } |
2637 } |
2323 |
2638 |
2324 // get a request object to use... |
2639 // get a request object to use... |
2325 req = iPageReadRequests.GetRequest(aMemory,aIndex,aCount); |
2640 req = iPageReadRequests.GetRequest(aMemory,aIndex,aCount); |
2326 |
2641 |
2327 // check no new requests collide with us... |
2642 // check no new read or write requests collide with us... |
2328 if(iPageWriteRequests.FindCollision(aMemory,aIndex,aCount) |
2643 if ((iPageWriteRequest && iPageWriteRequest->IsCollisionContiguous(aMemory,aIndex,aCount)) || |
2329 || iPageReadRequests.FindCollision(aMemory,aIndex,aCount)) |
2644 iPageReadRequests.FindCollisionContiguous(aMemory,aIndex,aCount)) |
2330 { |
2645 { |
2331 // another operation is colliding with this region, give up and retry... |
2646 // another operation is colliding with this region, give up and retry... |
2332 req->Signal(); |
2647 req->Signal(); |
2333 return 0; // caller expected to retry if needed |
2648 return 0; // caller expected to retry if needed |
2334 } |
2649 } |
2335 |
2650 |
2336 // we have a request object which we can use... |
2651 // we have a request object which we can use... |
2337 req->SetUse(aMemory,aIndex,aCount); |
2652 req->SetUseContiguous(aMemory,aIndex,aCount); |
2338 |
2653 |
2339 NKern::UnlockSystem(); |
2654 NKern::UnlockSystem(); |
2340 return (DPageReadRequest*)req; |
2655 return (DPageReadRequest*)req; |
2341 } |
2656 } |
2342 |
2657 |
2343 |
2658 |
2344 DPageWriteRequest* DPagingRequestPool::AcquirePageWriteRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
2659 DPageWriteRequest* DPagingRequestPool::AcquirePageWriteRequest(DMemoryObject** aMemory, TUint* aIndex, TUint aCount) |
2345 { |
2660 { |
|
2661 __NK_ASSERT_DEBUG(iPageWriteRequest); |
|
2662 __NK_ASSERT_DEBUG(PageCleaningLock::IsHeld()); |
|
2663 |
2346 NKern::LockSystem(); |
2664 NKern::LockSystem(); |
2347 |
2665 |
2348 DPagingRequest* req; |
2666 // Collision with existing read requests is not possible here. For a page to be read it must |
2349 |
2667 // not be present, and for it to be written it must be present and dirty. There is no way for a |
2350 for(;;) |
2668 // page to go between these states without an intervening read on an uninitialised (freshly |
2351 { |
2669 // committed) page, which will wait on the first read request. In other words something like |
2352 // get a request object to use... |
2670 // this: |
2353 req = iPageWriteRequests.GetRequest(aMemory,aIndex,aCount); |
2671 // |
2354 |
2672 // read (blocks), decommit, re-commit, read (waits on mutex), write (now no pending reads!) |
2355 if(iPageWriteRequests.FindCollision(aMemory,aIndex,aCount)) |
2673 // |
2356 { |
2674 // Note that a read request can be outstanding and appear to collide with this write, but only |
2357 // another write operation is colliding with this region, give up and retry... |
2675 // in the case when the thread making the read has blocked just after acquiring the request but |
2358 req->Signal(); |
2676 // before it checks whether the read is still necessasry. This makes it difficult to assert |
2359 // Reacquire the system lock as Signal() above will release it. |
2677 // that no collisions take place. |
2360 NKern::LockSystem(); |
2678 |
2361 continue; |
2679 iPageWriteRequest->SetUseDiscontiguous(aMemory,aIndex,aCount); |
2362 } |
|
2363 |
|
2364 break; |
|
2365 } |
|
2366 |
|
2367 // we have a request object which we can use... |
|
2368 req->SetUse(aMemory,aIndex,aCount); |
|
2369 |
|
2370 NKern::UnlockSystem(); |
2680 NKern::UnlockSystem(); |
2371 return (DPageWriteRequest*)req; |
2681 |
|
2682 return iPageWriteRequest; |
2372 } |
2683 } |
2373 |
2684 |
2374 |
2685 |
2375 DPagingRequestPool::TGroup::TGroup(TUint aNumRequests) |
2686 DPagingRequestPool::TGroup::TGroup(TUint aNumRequests) |
2376 { |
2687 { |
2377 iNumRequests = aNumRequests; |
2688 iNumRequests = aNumRequests; |
2378 iRequests = new DPagingRequest*[aNumRequests]; |
2689 iRequests = new DPoolPagingRequest*[aNumRequests]; |
2379 __NK_ASSERT_ALWAYS(iRequests); |
2690 __NK_ASSERT_ALWAYS(iRequests); |
2380 } |
2691 } |
2381 |
2692 |
2382 |
2693 |
2383 DPagingRequest* DPagingRequestPool::TGroup::FindCollision(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
2694 DPoolPagingRequest* DPagingRequestPool::TGroup::FindCollisionContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
2384 { |
2695 { |
2385 __ASSERT_SYSTEM_LOCK; |
2696 __ASSERT_SYSTEM_LOCK; |
2386 DPagingRequest** ptr = iRequests; |
2697 DPoolPagingRequest** ptr = iRequests; |
2387 DPagingRequest** ptrEnd = ptr+iNumRequests; |
2698 DPoolPagingRequest** ptrEnd = ptr+iNumRequests; |
2388 while(ptr<ptrEnd) |
2699 while(ptr<ptrEnd) |
2389 { |
2700 { |
2390 DPagingRequest* req = *ptr++; |
2701 DPoolPagingRequest* req = *ptr++; |
2391 if(req->IsCollision(aMemory,aIndex,aCount)) |
2702 if(req->IsCollisionContiguous(aMemory,aIndex,aCount)) |
2392 return req; |
2703 return req; |
2393 } |
2704 } |
2394 return 0; |
2705 return 0; |
2395 } |
2706 } |
2396 |
2707 |
2397 |
2708 |
2398 static TUint32 RandomSeed = 33333; |
2709 static TUint32 RandomSeed = 33333; |
2399 |
2710 |
2400 DPagingRequest* DPagingRequestPool::TGroup::GetRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
2711 DPoolPagingRequest* DPagingRequestPool::TGroup::GetRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
2401 { |
2712 { |
2402 __NK_ASSERT_DEBUG(iNumRequests > 0); |
2713 __NK_ASSERT_DEBUG(iNumRequests > 0); |
2403 |
2714 |
2404 // try using an existing request which collides with this region... |
2715 // try using an existing request which collides with this region... |
2405 DPagingRequest* req = FindCollision(aMemory,aIndex,aCount); |
2716 DPoolPagingRequest* req = FindCollisionContiguous(aMemory,aIndex,aCount); |
2406 if(!req) |
2717 if(!req) |
2407 { |
2718 { |
2408 // use a free request... |
2719 // use a free request... |
2409 req = (DPagingRequest*)iFreeList.GetFirst(); |
2720 req = (DPoolPagingRequest*)iFreeList.GetFirst(); |
2410 if(req) |
2721 if(req) |
2411 { |
2722 { |
2412 // free requests aren't being used... |
2723 // free requests aren't being used... |
2413 __NK_ASSERT_DEBUG(req->iUsageCount == 0); |
2724 __NK_ASSERT_DEBUG(req->iUsageCount == 0); |
2414 } |
2725 } |