--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/kernel/eka/memmodel/epoc/flexible/mmu/mpager.h Thu Dec 17 09:24:54 2009 +0200
@@ -0,0 +1,812 @@
+// Copyright (c) 2005-2009 Nokia Corporation and/or its subsidiary(-ies).
+// All rights reserved.
+// This component and the accompanying materials are made available
+// under the terms of the License "Eclipse Public License v1.0"
+// which accompanies this distribution, and is available
+// at the URL "http://www.eclipse.org/legal/epl-v10.html".
+//
+// Initial Contributors:
+// Nokia Corporation - initial contribution.
+//
+// Contributors:
+//
+// Description:
+//
+
+/**
+ @file
+ @internalComponent
+*/
+
+#ifndef MPAGER_H
+#define MPAGER_H
+
+struct SVMCacheInfo;
+class DMemModelThread;
+class DMemoryMappingBase;
+
+class DPager
+ {
+public:
+ DPager();
+ void Init2();
+ void Init3();
+
+ FORCE_INLINE TUint NumberOfFreePages()
+ {
+ return iNumberOfFreePages;
+ }
+
+ FORCE_INLINE TUint NumberOfDirtyPages()
+ {
+ TUint ret;
+ MmuLock::Lock();
+ ret = iNumberOfDirtyPages;
+ MmuLock::Unlock();
+ return ret;
+ }
+
+ FORCE_INLINE void SetWritable(SPageInfo& aPageInfo)
+ {
+ if (!aPageInfo.IsDirty())
+ {// This is the first mapping to write to the page so increase the
+ // dirty page count.
+ aPageInfo.SetWritable();
+ iNumberOfDirtyPages++;
+ }
+ }
+
+ FORCE_INLINE void SetClean(SPageInfo& aPageInfo)
+ {
+ __NK_ASSERT_DEBUG(iNumberOfDirtyPages);
+ __NK_ASSERT_DEBUG(aPageInfo.IsDirty());
+ aPageInfo.SetClean();
+ iNumberOfDirtyPages--;
+ }
+
+ /**
+ Remove RAM pages from the cache and return them to the system's free pool.
+ (Free them.)
+
+ This is called by class Mmu when it requires more free RAM to meet an
+ allocation request.
+
+ @param aNumPages The number of pages to free up.
+ @return True if all pages could be freed, false otherwise
+ @pre RamAlloc mutex held.
+ */
+ TBool GetFreePages(TInt aNumPages);
+
+
+ /**
+ Attempts to rejuvenate or page in the page to the mapping that took the page fault.
+
+ @param aPc Address of instruction causing the fault.
+ @param aFaultAddress Address of memory access which faulted.
+ @param aFaultAsid The asid of the faulting thread's process.
+ @param aAccessPermissions Bitmask of values from enum TAccessPermissions, which
+ indicates the permissions required by faulting memory access.
+ @param aMapInstanceCount The instance count of the mapping when it took the page fault.
+ @param aThread The thread that took the page fault.
+ @param aExceptionInfo The processor specific exception info.
+
+ @return KErrNone if the page is now accessable, otherwise one of the system wide error codes.
+ */
+ TInt HandlePageFault( TLinAddr aPc, TLinAddr aFaultAddress, TUint aFaultAsid, TUint aFaultIndex,
+ TUint aAccessPermissions, DMemoryObject* aMemory, DMemoryMapping* aMapping,
+ TUint aMapInstanceCount, DThread* aThread, TAny* aExceptionInfo);
+
+
+ /**
+ Fault enumeration
+ */
+ enum TFault
+ {
+ };
+
+ /**
+ Fault the system.
+ */
+ static void Fault(TFault aFault);
+
+ /**
+ Get state of live page list.
+ */
+ void GetLiveListInfo(SVMCacheInfo& aInfo);
+
+ /**
+ Resize the live page list.
+ */
+ TInt ResizeLiveList(TUint aMinimumPageCount, TUint aMaximumPageCount);
+
+ /**
+ Recalculate live list size.
+ */
+ TInt ResizeLiveList();
+
+ /**
+ Flush (unmap) all memory which is demand paged.
+ This reduces the live page list to a minimum.
+ */
+ void FlushAll();
+
+ /**
+ Give pages to paging system for managing.
+ */
+ void DonatePages(TUint aCount, TPhysAddr* aPages);
+
+ /**
+ Reclaim pages from paging system which were previously donated with DonatePages.
+
+ @param aCount Number of pages.
+ @param aPages Array of pages (as stored in an RPageArray).
+
+ @return KErrNone if successful.
+ KErrNoMemory if paging system doesn't have enough spare pages. This will leave some or all of the pages still managed by the pager.
+ KErrNotFound if some of the pages were not actually being managed by the pager.
+ */
+ TInt ReclaimPages(TUint aCount, TPhysAddr* aPages);
+
+ /**
+ Called by class Mmu whenever a page of RAM is freed. The page state will be EUnused.
+ If the page was being used by the pager then this gives it the opportunity to update
+ any internal state. If the pager wishes to retain ownership of the page the it must
+ return the result KErrNone, any other value will cause the page to be returned to the
+ systems free pool.
+ */
+ TInt PageFreed(SPageInfo* aPageInfo);
+
+ //
+ // following public members for use by memory managers...
+ //
+
+ /**
+ Allocate a number of RAM pages to store demand paged content.
+ These pages are obtained from...
+
+ 1. An unused page in the live page list.
+ 2. The systems free pool.
+ 3. The oldest page from the live page list.
+ */
+ TInt PageInAllocPages(TPhysAddr* aPages, TUint aCount, Mmu::TRamAllocFlags aAllocFlags);
+
+ /**
+ Free a number of RAM pages allocated by PageInAllocPages.
+ */
+ void PageInFreePages(TPhysAddr* aPages, TUint aCount);
+
+ /**
+ Called to add a new page to the live list after a fault has occurred.
+
+ @param aPageInfo The page.
+
+ @pre MmuLock held
+ @post MmuLock held (but may have been released by this function)
+ */
+ void PagedIn(SPageInfo* aPageInfo);
+
+ /**
+ @param aPageInfo The page.
+ @param aPinArgs Owner of a replacement page which will be used to substitute for the pinned page.
+
+ @pre MmuLock held
+ @post MmuLock held (but may have been released by this function)
+ */
+ void PagedInPinned(SPageInfo* aPageInfo, TPinArgs& aPinArgs);
+
+ /**
+ @pre MmuLock held
+ @post MmuLock left unchanged.
+ */
+ void PagedInUnneeded(SPageInfo* aPageInfo);
+
+ /**
+ @param aPageInfo The page to unpin.
+ @param aPinArgs The resources used for pinning. The replacement pages allocated
+ to this will be increased for each page which was became completely
+ unpinned.
+
+ @pre MmuLock held
+ @post MmuLock held (but may have been released by this function)
+ */
+ void Unpin(SPageInfo* aPageInfo, TPinArgs& aPinArgs);
+
+ /**
+ @param aPageInfo The page to pin. Must be page being demand paged.
+ @param aPinArgs Owner of a replacement page which will be used to substitute for the pinned page.
+
+ @pre MmuLock held
+ @post MmuLock held (but may have been released by this function)
+ */
+ void Pin(SPageInfo* aPageInfo, TPinArgs& aPinArgs);
+
+
+ /**
+ @pre MmuLock held
+ @post MmuLock held (but may have been released by this function)
+ */
+ void RejuvenatePageTable(TPte* aPt);
+
+ /**
+ */
+ TBool ReservePages(TUint aRequiredCount, TUint& aCount);
+
+ /**
+ */
+ void UnreservePages(TUint& aCount);
+
+ /**
+ Enumeration of instrumented paging events which only require the
+ SPageInfo object as an argument.
+ */
+ enum TEventSimple
+ {
+ EEventPageInNew,
+ EEventPageInAgain,
+ EEventPageInUnneeded,
+ EEventPageInFree,
+ EEventPageOut,
+ EEventPageAged,
+ EEventPagePin,
+ EEventPageUnpin,
+ EEventPageDonate,
+ EEventPageReclaim,
+ EEventPageAgedClean,
+ EEventPageAgedDirty,
+ EEventPagePageTableAlloc
+ };
+
+ /**
+ Signal the occurrence of an event of type TEventSimple.
+ */
+ void Event(TEventSimple aEvent, SPageInfo* aPageInfo);
+
+ /**
+ Enumeration of instrumented paging events which require the faulting address
+ and program counter as arguments.
+ */
+ enum TEventWithAddresses
+ {
+ EEventPageInStart,
+ EEventPageRejuvenate
+ };
+
+ /**
+ Signal the occurrence of an event of type TEventWithAddresses.
+ */
+ void Event(TEventWithAddresses aEvent, SPageInfo* aPageInfo, TLinAddr aPc, TLinAddr aFaultAddress, TUint aAccessPermissions);
+
+ /**
+ Get the pager's event info data.
+ */
+ void GetEventInfo(SVMEventInfo& aInfoOut);
+
+ /**
+ Reset the pager's event info data.
+ */
+ void ResetEventInfo();
+
+ /**
+ Attempt to discard the specified page.
+
+ @param aOldPageInfo The page info of the page to discard.
+ @param aBlockZoneId The ID of the RAM zone not to allocate any required new page into.
+ @param aBlockRest Set to ETrue when we don't want the allocator to search for new pages if the RAM
+ zone with ID==aBlockZoneId is encountered, i.e. a general RAM defrag operation.
+ */
+ TInt DiscardPage(SPageInfo* aOldPageInfo, TUint aBlockZoneId, TBool aBlockRest);
+
+
+ /**
+ Update any live list links to replace the old page with the new page.
+ This is to be used when a page has been moved.
+
+ @param aOldPageInfo The page info of the page to replace.
+ @param aNewPageInfo The page info of the page to be used instead of the old page.
+ */
+ void ReplacePage(SPageInfo& aOldPageInfo, SPageInfo& aNewPageInfo);
+
+
+ //
+ // following public members for use by TPinArgs...
+ //
+
+ /**
+ */
+ TBool AllocPinReplacementPages(TUint aNumPages);
+
+ /**
+ */
+ void FreePinReplacementPages(TUint aNumPages);
+
+private:
+ /**
+ Add a page to the head of the live page list. I.e. make it the 'youngest' page.
+
+ @pre MmuLock held
+ @post MmuLock left unchanged.
+ */
+ void AddAsYoungestPage(SPageInfo* aPageInfo);
+
+ /**
+ Mark a page as type EUnused and add it to the end of the live page list.
+ I.e. make it the 'oldest' page, so that it is the first page to be reused.
+
+ @pre MmuLock held
+ @post MmuLock left unchanged.
+ */
+ void AddAsFreePage(SPageInfo* aPageInfo);
+
+ /**
+ Remove a page from live page list.
+ It paged state is set to EUnpaged.
+
+ @pre MmuLock held
+ @post MmuLock left unchanged.
+ */
+ void RemovePage(SPageInfo* aPageInfo);
+
+ /**
+ Remove the oldest page from the live page list and perform #StealPage.
+
+ @pre MmuLock held
+ @post MmuLock left unchanged.
+ */
+ SPageInfo* StealOldestPage();
+
+ /**
+ Steal a page from the memory object (if any) which is using the page.
+ If successful the returned page will be in the EUnknown state and the
+ cache state for the page is indeterminate. This is the same state as
+ if the page had been allocated by Mmu::AllocRam.
+
+ @pre RamAlloc mutex held
+ @pre MmuLock held
+ @post MmuLock held (but may have been released by this function)
+ */
+ TInt StealPage(SPageInfo* aPageInfo);
+
+ /**
+ Restrict the access permissions for a page.
+
+ @param aPageInfo The page.
+ @param aRestriction The restriction type to apply.
+ */
+ TInt RestrictPage(SPageInfo* aPageInfo, TRestrictPagesType aRestriction);
+
+ /**
+ Get a RAM page from the system's free pool and add it to the live list as a free page.
+
+ @return False if out of memory;
+ true otherwise, though new free page may still have already been used.
+
+ @pre MmuLock held
+ @post MmuLock held (but may have been released by this function)
+ */
+ TBool TryGrowLiveList();
+
+ /**
+ Get a RAM page from the system's free pool.
+
+ @pre RamAllocLock held.
+
+ @return The page or NULL if no page is available.
+ */
+ SPageInfo* GetPageFromSystem(Mmu::TRamAllocFlags aAllocFlags, TUint aBlockZoneId=KRamZoneInvalidId, TBool aBlockRest=EFalse);
+
+ /**
+ Put a page back on the system's free pool.
+
+ @pre RamAllocLock held.
+ */
+ void ReturnPageToSystem();
+
+ /**
+ Put a specific page back on the system's free pool.
+
+ @pre RamAllocLock held.
+ */
+ void ReturnPageToSystem(SPageInfo& aPageInfo);
+
+ /**
+ Allocate a RAM page to store demand paged content.
+ This tries to obtain a RAM from the following places:
+ 1. An unused page in the live page list.
+ 2. The systems free pool.
+ 3. The oldest page from the live page list.
+ */
+ SPageInfo* PageInAllocPage(Mmu::TRamAllocFlags aAllocFlags);
+
+ /**
+ If the number of young pages exceeds that specified by iYoungOldRatio then a
+ single page is made 'old'. Call this after adding a new 'young' page.
+
+ @pre MmuLock held
+ @post MmuLock held (but may have been released by this function)
+ */
+ void BalanceAges();
+
+ /**
+ If HaveTooManyPages() then return them to the system.
+ */
+ void RemoveExcessPages();
+
+ /**
+ @return True if pager has too many pages, false otherwise.
+ */
+ TBool HaveTooManyPages();
+
+ /**
+ @return True if pager has its maximum number of pages, false otherwise.
+ */
+ TBool HaveMaximumPages();
+
+ /**
+ Attempt to rejuvenate a page in which a page fault occurred.
+
+ @param aOsAsid Address space ID in which fault occurred.
+ @param aAddress Address of memory access which faulted.
+ @param aAccessPermissions Bitmask of values from enum TAccessPermissions, which
+ indicates the permissions required by faulting memory access.
+ @param aPc Address of instruction causing the fault. (Used for tracing.)
+ @param aMapping The mapping that maps the page that took the fault.
+ @param aMapInstanceCount The instance count of the mappig when the page fault occurred.
+ @param aThread The thread that took the page fault.
+ @param aExceptionInfo The processor specific exception info.
+
+ @return KErrNone if the page was remapped, KErrAbort if the mapping has be reused or detached,
+ KErrNotFound if it may be possible to page in the page.
+ */
+ TInt TryRejuvenate( TInt aOsAsid, TLinAddr aAddress, TUint aAccessPermissions, TLinAddr aPc,
+ DMemoryMappingBase* aMapping, TUint aMapInstanceCount, DThread* aThread,
+ TAny* aExceptionInfo);
+
+ /**
+ Reserve one page for guaranteed locking use.
+ Increments iReservePageCount if successful.
+
+ @return True if operation was successful.
+ */
+ TBool ReservePage();
+
+ /**
+ Called when a realtime thread takes a paging fault.
+ Checks whether it's OK for the thread to take to fault.
+ @return KErrNone if the paging fault should be further processed
+ */
+ TInt CheckRealtimeThreadFault(DThread* aThread, TAny* aExceptionInfo);
+
+ /**
+ Attempt to find the page table entry and page info for a page in the specified mapping.
+
+ @param aOsAsid The OsAsid of the process that owns the mapping.
+ @param aAddress The linear address of the page.
+ @param aMapping The mapping that maps the linear address.
+ @param aMapInstanceCount The instance count of the mapping.
+ @param[out] aPte Will return a pointer to the page table entry for the page.
+ @param[out] aPageInfo Will return a pointer to the page info for the page.
+
+ @return KErrNone on success, KErrAbort when the mapping is now invalid, KErrNotFound when
+ the page table or page info can't be found.
+ */
+ TInt PteAndInfoFromLinAddr( TInt aOsAsid, TLinAddr aAddress, DMemoryMappingBase* aMapping,
+ TUint aMapInstanceCount, TPte*& aPte, SPageInfo*& aPageInfo);
+#ifdef _DEBUG
+ /**
+ Check consistency of live list.
+ */
+ TBool CheckLists();
+
+ void TraceCounts();
+#endif
+
+private:
+ TUint iMinYoungPages; ///< Minimum number of young pages in live list required for correct functioning.
+ TUint iAbsoluteMinPageCount;///< Absolute minimum number of pages in live to meet algorithm constraints
+private:
+ TUint iMinimumPageCount; /**< Minimum size for the live page list, including locked pages */
+ TUint iMaximumPageCount; /**< Maximum size for the live page list, including locked pages */
+ TUint16 iYoungOldRatio; /**< Ratio of young to old pages in the live page list */
+ SDblQue iYoungList; /**< Head of 'young' page list. */
+ TUint iYoungCount; /**< Number of young pages */
+ SDblQue iOldList; /**< Head of 'old' page list. */
+ TUint iOldCount; /**< Number of old pages */
+#ifdef _USE_OLDEST_LISTS
+ SDblQue iOldestCleanList; /**< Head of 'oldestClean' page list. */
+ TUint iOldestCleanCount; /**< Number of 'oldestClean' pages */
+ SDblQue iOldestDirtyList; /**< Head of 'oldestDirty' page list. */
+ TUint iOldestDirtyCount; /**< Number of 'oldestDirty' pages */
+ TUint16 iOldOldestRatio; /**< Ratio of old pages to oldest to clean and dirty in the live page list*/
+#endif
+ TUint iNumberOfFreePages;
+ TUint iNumberOfDirtyPages; /**< The total number of dirty pages in the paging cache. Protected by MmuLock */
+ TUint iInitMinimumPageCount;/**< Initial value for iMinimumPageCount */
+ TUint iInitMaximumPageCount;/**< Initial value for iMaximumPageCount */
+ TUint iReservePageCount; /**< Number of pages reserved for locking */
+ TUint iMinimumPageLimit; /**< Minimum size for iMinimumPageCount, not including locked pages.
+ iMinimumPageCount >= iMinimumPageLimit + iReservePageCount */
+ SVMEventInfo iEventInfo;
+
+#ifdef __DEMAND_PAGING_BENCHMARKS__
+public:
+ void RecordBenchmarkData(TPagingBenchmark aBm, TUint32 aStartTime, TUint32 aEndTime);
+ void ResetBenchmarkData(TPagingBenchmark aBm);
+ SPagingBenchmarkInfo iBenchmarkInfo[EMaxPagingBm];
+#endif //__DEMAND_PAGING_BENCHMARKS__
+ };
+
+extern DPager ThePager;
+
+
+#ifdef __DEMAND_PAGING_BENCHMARKS__
+
+#define START_PAGING_BENCHMARK TUint32 _bmStart = NKern::FastCounter()
+#define END_PAGING_BENCHMARK(bm) ThePager.RecordBenchmarkData(bm, _bmStart, NKern::FastCounter())
+
+#else
+
+#define START_PAGING_BENCHMARK
+#define END_PAGING_BENCHMARK(bm)
+#endif // __DEMAND_PAGING_BENCHMARKS__
+
+
+FORCE_INLINE void DPager::Event(TEventSimple aEvent, SPageInfo* aPageInfo)
+ {
+ switch(aEvent)
+ {
+ case EEventPageInNew:
+ TRACEP(("DP: %O PageIn 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
+ #ifdef BTRACE_PAGING
+ BTraceContext12(BTrace::EPaging,BTrace::EPagingPageIn,aPageInfo->PhysAddr(),aPageInfo->Owner(),aPageInfo->Index());
+ #endif
+ ++iEventInfo.iPageInReadCount;
+ break;
+
+ case EEventPageInAgain:
+ TRACEP(("DP: %O PageIn (again) 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
+ #ifdef BTRACE_PAGING
+ BTraceContext4(BTrace::EPaging,BTrace::EPagingMapPage,aPageInfo->PhysAddr());
+ #endif
+ break;
+
+ case EEventPageInUnneeded:
+ TRACEP(("DP: %O PageIn (unneeded) 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
+ #ifdef BTRACE_PAGING
+ BTraceContext0(BTrace::EPaging,BTrace::EPagingPageInUnneeded);
+ #endif
+ break;
+
+ case EEventPageInFree:
+ TRACEP(("DP: %O PageInFree 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
+ #ifdef BTRACE_PAGING
+ BTraceContext4(BTrace::EPaging,BTrace::EPagingPageInFree,aPageInfo->PhysAddr());
+ #endif
+ break;
+
+ case EEventPageOut:
+ TRACEP(("DP: %O PageOut 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
+ #ifdef BTRACE_PAGING
+ BTraceContext4(BTrace::EPaging,BTrace::EPagingPageOut,aPageInfo->PhysAddr());
+ #endif
+ break;
+
+ case EEventPageAged:
+ TRACEP(("DP: %O Aged 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
+ #ifdef BTRACE_PAGING_VERBOSE
+ BTraceContext4(BTrace::EPaging,BTrace::EPagingAged,aPageInfo->PhysAddr());
+ #endif
+ break;
+
+ case EEventPagePin:
+ TRACEP(("DP: %O Pin 0x%08x count=%d",TheCurrentThread,aPageInfo->PhysAddr(),aPageInfo->PinCount()));
+ #ifdef BTRACE_PAGING
+ BTraceContext8(BTrace::EPaging,BTrace::EPagingPageLock,aPageInfo->PhysAddr(),aPageInfo->PinCount());
+ #endif
+ break;
+
+ case EEventPageUnpin:
+ TRACEP(("DP: %O Unpin 0x%08x count=%d",TheCurrentThread,aPageInfo->PhysAddr(),aPageInfo->PinCount()));
+ #ifdef BTRACE_PAGING
+ BTraceContext8(BTrace::EPaging,BTrace::EPagingPageUnlock,aPageInfo->PhysAddr(),aPageInfo->PinCount());
+ #endif
+ break;
+
+ case EEventPageDonate:
+ TRACEP(("DP: %O Donate 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
+ #ifdef BTRACE_PAGING
+ BTraceContext12(BTrace::EPaging,BTrace::EPagingDonatePage,aPageInfo->PhysAddr(),aPageInfo->Owner(),aPageInfo->Index());
+ #endif
+ break;
+
+ case EEventPageReclaim:
+ TRACEP(("DP: %O Reclaim 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
+ #ifdef BTRACE_PAGING
+ BTraceContext4(BTrace::EPaging,BTrace::EPagingReclaimPage,aPageInfo->PhysAddr());
+ #endif
+ break;
+
+ case EEventPageAgedClean:
+ TRACEP(("DP: %O AgedClean 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
+ #ifdef BTRACE_PAGING_VERBOSE
+ BTraceContext4(BTrace::EPaging,BTrace::EPagingAgedClean,aPageInfo->PhysAddr());
+ #endif
+ break;
+
+ case EEventPageAgedDirty:
+ TRACEP(("DP: %O AgedDirty 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
+ #ifdef BTRACE_PAGING_VERBOSE
+ BTraceContext4(BTrace::EPaging,BTrace::EPagingAgedDirty,aPageInfo->PhysAddr());
+ #endif
+ break;
+
+ case EEventPagePageTableAlloc:
+ TRACEP(("DP: %O PageTableAlloc 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
+ #ifdef BTRACE_PAGING
+ BTraceContext4(BTrace::EPaging,BTrace::EPagingPageTableAlloc,aPageInfo->PhysAddr());
+ #endif
+ break;
+
+ default:
+ __NK_ASSERT_DEBUG(0);
+ break;
+ }
+ }
+
+
+
+FORCE_INLINE void DPager::Event(TEventWithAddresses aEvent, SPageInfo* aPageInfo, TLinAddr aPc, TLinAddr aFaultAddress, TUint aAccessPermissions)
+ {
+ switch(aEvent)
+ {
+ case EEventPageInStart:
+ TRACEP(("DP: %O HandlePageFault 0x%08x 0x%08x %d",TheCurrentThread,aFaultAddress,aPc,aAccessPermissions));
+ #ifdef BTRACE_PAGING
+ BTraceContext12(BTrace::EPaging,BTrace::EPagingPageInBegin,aFaultAddress,aPc,aAccessPermissions);
+ #endif
+ ++iEventInfo.iPageFaultCount;
+ break;
+
+ case EEventPageRejuvenate:
+ TRACEP(("DP: %O Rejuvenate 0x%08x 0x%08x 0x%08x %d",TheCurrentThread,aPageInfo->PhysAddr(),aFaultAddress,aPc,aAccessPermissions));
+ #ifdef BTRACE_PAGING
+ BTraceContext12(BTrace::EPaging,BTrace::EPagingRejuvenate,aPageInfo->PhysAddr(),aFaultAddress,aPc);
+ #endif
+ ++iEventInfo.iPageFaultCount;
+ break;
+
+ default:
+ __NK_ASSERT_DEBUG(0);
+ break;
+ }
+ }
+
+
+
+/**
+Multiplier for number of request objects in pool per drive that supports paging.
+*/
+const TInt KPagingRequestsPerDevice = 2;
+
+
+class DPagingRequest;
+class DPageReadRequest;
+class DPageWriteRequest;
+
+/**
+A pool of paging requests for use by a single paging device.
+*/
+class DPagingRequestPool : public DBase
+ {
+public:
+ DPagingRequestPool(TUint aNumPageReadRequest,TUint aNumPageWriteRequest);
+ DPageReadRequest* AcquirePageReadRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
+ DPageWriteRequest* AcquirePageWriteRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
+private:
+ ~DPagingRequestPool();
+private:
+ class TGroup
+ {
+ public:
+ TGroup(TUint aNumRequests);
+ DPagingRequest* FindCollision(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
+ DPagingRequest* GetRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
+ void Signal(DPagingRequest* aRequest);
+ public:
+ TUint iNumRequests;
+ DPagingRequest** iRequests;
+ SDblQue iFreeList;
+ };
+ TGroup iPageReadRequests;
+ TGroup iPageWriteRequests;
+
+ friend class DPagingRequest;
+ friend class DPageReadRequest;
+ friend class DPageWriteRequest;
+ };
+
+
+/**
+Resources needed to service a paging request.
+*/
+class DPagingRequest : public SDblQueLink
+ {
+public:
+ DPagingRequest(DPagingRequestPool::TGroup& aPoolGroup);
+ void Release();
+ void Wait();
+ void Signal();
+ void SetUse(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
+ TBool CheckUse(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
+ TBool IsCollision(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
+ TLinAddr MapPages(TUint aColour, TUint aCount, TPhysAddr* aPages);
+ void UnmapPages(TBool aIMBRequired);
+public:
+ TThreadMessage iMessage; /**< Used by the media driver to queue requests */
+ DMutex* iMutex; /**< A mutex for synchronisation and priority inheritance. */
+ TInt iUsageCount;/**< How many threads are using or waiting for this object. */
+ TLinAddr iBuffer; /**< A buffer to read compressed data into. Size is EMaxPages+1 pages.*/
+protected:
+ Mmu::TTempMapping iTempMapping;
+private:
+ DPagingRequestPool::TGroup& iPoolGroup;
+ // used to identify memory request is used for...
+ DMemoryObject* iUseRegionMemory;
+ TUint iUseRegionIndex;
+ TUint iUseRegionCount;
+ };
+
+
+/**
+Resources needed to service a page in request.
+*/
+class DPageReadRequest : public DPagingRequest
+ {
+public:
+ inline DPageReadRequest(DPagingRequestPool::TGroup& aPoolGroup)
+ : DPagingRequest(aPoolGroup)
+ {}
+ TInt Construct();
+ enum
+ {
+ EMaxPages = 4
+ };
+ static TUint ReservedPagesRequired();
+private:
+ ~DPageReadRequest(); // can't delete
+public:
+ TLinAddr iBuffer; /**< A buffer to read compressed data into. Size is EMaxPages+1 pages.*/
+private:
+ DMemoryObject* iMemory;
+private:
+ static TInt iAllocNext;
+ };
+
+
+FORCE_INLINE TUint DPageReadRequest::ReservedPagesRequired()
+ {
+ return iAllocNext*EMaxPages;
+ }
+
+
+/**
+Resources needed to service a page out request.
+*/
+class DPageWriteRequest : public DPagingRequest
+ {
+public:
+ inline DPageWriteRequest(DPagingRequestPool::TGroup& aPoolGroup)
+ : DPagingRequest(aPoolGroup)
+ {}
+ TInt Construct();
+ enum
+ {
+ EMaxPages = 1
+ };
+private:
+ ~DPageWriteRequest(); // can't delete
+private:
+ static TInt iAllocNext;
+ };
+
+
+#endif