kernel/eka/memmodel/epoc/flexible/mmu/mdatapaging.cpp
branchRCL_3
changeset 28 5b5d147c7838
parent 26 c734af59ce98
child 42 a179b74831c9
equal deleted inserted replaced
26:c734af59ce98 28:5b5d147c7838
    12 //
    12 //
    13 // Description:
    13 // Description:
    14 //
    14 //
    15 
    15 
    16 #include <plat_priv.h>
    16 #include <plat_priv.h>
       
    17 #include <kernel/cache.h>
    17 #include "mm.h"
    18 #include "mm.h"
    18 #include "mmu.h"
    19 #include "mmu.h"
    19 
    20 
    20 #include "mmanager.h"
    21 #include "mmanager.h"
    21 #include "mobject.h"
    22 #include "mobject.h"
    23 #include "mpager.h"
    24 #include "mpager.h"
    24 #include "mswap.h"
    25 #include "mswap.h"
    25 
    26 
    26 
    27 
    27 /**
    28 /**
       
    29 Log2 of minimum number of pages to attempt to write at a time.
       
    30 
       
    31 The value of 2 gives a minimum write size of 16KB.
       
    32 */
       
    33 const TUint KMinPreferredWriteShift = 2;
       
    34 
       
    35 /**
       
    36 Log2 of maximum number of pages to attempt to write at a time.
       
    37 
       
    38 The value of 4 gives a maximum write size of 64KB.
       
    39 */
       
    40 const TUint KMaxPreferredWriteShift = 4;
       
    41 
       
    42 __ASSERT_COMPILE((1 << KMaxPreferredWriteShift) <= KMaxPagesToClean);
       
    43 
       
    44 
       
    45 /**
       
    46 Whether the CPU has the page colouring restriction, where pages must be mapped in sequential colour
       
    47 order.
       
    48 */
       
    49 #ifdef __CPU_CACHE_HAS_COLOUR	
       
    50 const TBool KPageColouringRestriction = ETrue;
       
    51 #else
       
    52 const TBool KPageColouringRestriction = EFalse;
       
    53 #endif
       
    54 
       
    55 
       
    56 /**
    28 Manages the swap via the data paging device.
    57 Manages the swap via the data paging device.
    29 */
    58 */
    30 class DSwapManager
    59 class DSwapManager
    31 	{
    60 	{
    32 public:
    61 public:
    55 
    84 
    56 	TInt ReserveSwap(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount);
    85 	TInt ReserveSwap(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount);
    57 	TInt UnreserveSwap(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount);
    86 	TInt UnreserveSwap(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount);
    58 	TBool IsReserved(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount);
    87 	TBool IsReserved(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount);
    59 
    88 
    60 	TInt ReadSwapPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TLinAddr aLinAddr, DPageReadRequest* aRequest, TPhysAddr* aPhysAddrs);
    89 	TInt ReadSwapPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TLinAddr aLinAddr, TPhysAddr* aPhysAddrs);
    61 	TInt WriteSwapPages(DMemoryObject** aMemory, TUint* aIndex, TUint aCount, TLinAddr aLinAddr, TBool aBackground);
    90 	TInt WriteSwapPages(DMemoryObject** aMemory, TUint* aIndex, TUint aCount, TLinAddr aLinAddr, TPhysAddr* aPhysAddrs, TBool aBackground);
    62 
    91 
    63 	void GetSwapInfo(SVMSwapInfo& aInfoOut);
    92 	void GetSwapInfo(SVMSwapInfo& aInfoOut);
    64 	TInt SetSwapThresholds(const SVMSwapThresholds& aThresholds);
    93 	TInt SetSwapThresholds(const SVMSwapThresholds& aThresholds);
       
    94 	void SetSwapAlign(TUint aSwapAlign);
    65 
    95 
    66 private:
    96 private:
    67 	inline TSwapState SwapState(TUint aSwapData);
    97 	inline TSwapState SwapState(TUint aSwapData);
    68 	inline TInt SwapIndex(TUint aSwapData);
    98 	inline TInt SwapIndex(TUint aSwapData);
    69 	inline TUint SwapData(TSwapState aSwapState, TInt aSwapIndex);
    99 	inline TUint SwapData(TSwapState aSwapState, TInt aSwapIndex);
    70 	
   100 	
    71 	TInt AllocSwapIndex(TInt aCount);
   101 	TInt AllocSwapIndex(TUint aCount);
    72 	void FreeSwapIndex(TInt aSwapIndex);
   102 	void FreeSwapIndex(TInt aSwapIndex);
    73 	void CheckSwapThresholdsAndUnlock(TUint aInitial);
   103 	void CheckSwapThresholdsAndUnlock(TUint aInitial);
    74 	
   104 	
    75 	void DoDeleteNotify(TUint aSwapIndex);
   105 	void DoDeleteNotify(TUint aSwapIndex);
    76 	TInt DoWriteSwapPages(DMemoryObject** aMemory, TUint* aIndex, TUint aCount, TLinAddr aLinAddr, TInt aSwapIndex, TBool aBackground);
   106 	TInt DoWriteSwapPages(DMemoryObject** aMemory, TUint* aIndex, TUint aCount, TLinAddr aLinAddr, TInt aPageIndex, TPhysAddr* aPhysAddrs, TInt aSwapIndex, TBool aBackground);
    77 	
   107 	
    78 private:
   108 private:
    79 	DPagingDevice* iDevice;			///< Paging device used to read and write swap pages
   109 	DPagingDevice* iDevice;			///< Paging device used to read and write swap pages
    80 	
   110 	
    81 	NFastMutex iSwapLock;			///< Fast mutex protecting access to all members below
   111 	NFastMutex iSwapLock;			///< Fast mutex protecting access to all members below
    82 	TUint iFreePageCount;			///< Number of swap pages that have not been reserved
   112 	TUint iFreePageCount;			///< Number of swap pages that have not been reserved
    83 	TBitMapAllocator* iBitMap;		///< Bitmap of swap pages that have been allocated
   113 	TBitMapAllocator* iBitMap;		///< Bitmap of swap pages that have been allocated
       
   114 	TUint iSwapAlign;				///< Log2 number of pages to align swap writes to
    84 	TUint iAllocOffset;				///< Next offset to try when allocating a swap page
   115 	TUint iAllocOffset;				///< Next offset to try when allocating a swap page
    85  	TUint iSwapThesholdLow;
   116  	TUint iSwapThesholdLow;
    86  	TUint iSwapThesholdGood;
   117  	TUint iSwapThesholdGood;
    87 	};
   118 	};
    88 
   119 
    89 
   120 
    90 /**
   121 /**
    91 Manager for demand paged memory objects which contain writeable data.
   122    Manager for demand paged memory objects which contain writeable data.
    92 The contents of the memory are written to a backing store whenever its
   123    The contents of the memory are written to a backing store whenever its
    93 pages are 'paged out'.
   124    pages are 'paged out'.
    94 
   125 
    95 @see DSwapManager
   126    @see DSwapManager
    96 */
   127 */
    97 class DDataPagedMemoryManager : public DPagedMemoryManager
   128 class DDataPagedMemoryManager : public DPagedMemoryManager
    98 	{
   129 	{
    99 private:
   130 private:
   100 	// from DMemoryManager...
   131 	// from DMemoryManager...
   112 	virtual TBool IsAllocated(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
   143 	virtual TBool IsAllocated(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
   113 
   144 
   114 public:
   145 public:
   115 	void GetSwapInfo(SVMSwapInfo& aInfoOut);
   146 	void GetSwapInfo(SVMSwapInfo& aInfoOut);
   116 	TInt SetSwapThresholds(const SVMSwapThresholds& aThresholds);
   147 	TInt SetSwapThresholds(const SVMSwapThresholds& aThresholds);
       
   148 	TBool PhysicalAccessSupported();
       
   149 	TBool UsePhysicalAccess();
       
   150 	void SetUsePhysicalAccess(TBool aUsePhysicalAccess);
       
   151 	TUint PreferredWriteSize();
       
   152 	TUint PreferredSwapAlignment();
       
   153 	TInt SetWriteSize(TUint aWriteShift);
   117 
   154 
   118 private:
   155 private:
   119 	TInt WritePages(DMemoryObject** aMemory, TUint* aIndex, TPhysAddr* aPages, TUint aCount, DPageWriteRequest *aRequest, TBool aAnyExecutable, TBool aBackground);
   156 	TInt WritePages(DMemoryObject** aMemory, TUint* aIndex, TPhysAddr* aPages, TUint aCount, DPageWriteRequest *aRequest, TBool aAnyExecutable, TBool aBackground);
   120 
   157 
   121 private:
   158 private:
   122 	/**
   159 	/**
   123 	The paging device used for accessing the backing store.
   160 	   The paging device used for accessing the backing store.
   124 	This is set by #InstallPagingDevice.
   161 	   This is set by #InstallPagingDevice.
   125 	*/
   162 	*/
   126 	DPagingDevice* iDevice;
   163 	DPagingDevice* iDevice;
   127 
   164 
   128 	/**
   165 	/**
   129 	The instance of #DSwapManager being used by this manager.
   166 	   The instance of #DSwapManager being used by this manager.
   130 	*/
   167 	*/
   131 	DSwapManager* iSwapManager;
   168 	DSwapManager* iSwapManager;
   132 
   169 
       
   170 	/**
       
   171 	   Whether to read and write pages by physical address without mapping them first.
       
   172 
       
   173 	   Set if the paging media driver supports it.
       
   174 	*/
       
   175 	TBool iUsePhysicalAccess;
       
   176 
   133 public:
   177 public:
   134 	/**
   178 	/**
   135 	The single instance of this manager class.
   179 	   The single instance of this manager class.
   136 	*/
   180 	*/
   137 	static DDataPagedMemoryManager TheManager;
   181 	static DDataPagedMemoryManager TheManager;
   138 	};
   182 	};
   139 
   183 
   140 
   184 
   141 DDataPagedMemoryManager DDataPagedMemoryManager::TheManager;
   185 DDataPagedMemoryManager DDataPagedMemoryManager::TheManager;
   142 DPagedMemoryManager* TheDataPagedMemoryManager = &DDataPagedMemoryManager::TheManager;
   186 DPagedMemoryManager* TheDataPagedMemoryManager = &DDataPagedMemoryManager::TheManager;
   143 
   187 
   144 
   188 
   145 /**
   189 /**
   146 Create a swap manager.
   190    Create a swap manager.
   147 
   191 
   148 @param	aDevice	The demand paging device for access to the swap.
   192    @param	aDevice	The demand paging device for access to the swap.
   149 */
   193 */
   150 TInt DSwapManager::Create(DPagingDevice* aDevice)
   194 TInt DSwapManager::Create(DPagingDevice* aDevice)
   151 	{
   195 	{
   152 	__ASSERT_COMPILE(!(ESwapIndexMask & ESwapStateMask));
   196 	__ASSERT_COMPILE(!(ESwapIndexMask & ESwapStateMask));
   153 	__NK_ASSERT_DEBUG(iDevice == NULL);
   197 	__NK_ASSERT_DEBUG(iDevice == NULL);
   173 	iAllocOffset = 0;
   217 	iAllocOffset = 0;
   174 	return KErrNone;
   218 	return KErrNone;
   175 	}
   219 	}
   176 
   220 
   177 
   221 
       
   222 void DSwapManager::SetSwapAlign(TUint aSwapAlign)
       
   223 	{
       
   224 	TRACE(("WDP: Set swap alignment to %d (%d KB)", aSwapAlign, 4 << aSwapAlign));
       
   225 	NKern::FMWait(&iSwapLock);
       
   226 	iSwapAlign = aSwapAlign;
       
   227 	NKern::FMSignal(&iSwapLock);
       
   228 	}
       
   229 
       
   230 
   178 inline DSwapManager::TSwapState DSwapManager::SwapState(TUint aSwapData)
   231 inline DSwapManager::TSwapState DSwapManager::SwapState(TUint aSwapData)
   179 	{
   232 	{
   180 	TSwapState state = (TSwapState)(aSwapData & ESwapStateMask);
   233 	TSwapState state = (TSwapState)(aSwapData & ESwapStateMask);
   181 	__NK_ASSERT_DEBUG(state >= EStateWritten || (aSwapData & ~ESwapStateMask) == 0);
   234 	__NK_ASSERT_DEBUG(state >= EStateWritten || (aSwapData & ~ESwapStateMask) == 0);
   182 	return state;
   235 	return state;
   194 	return (aSwapIndex << ESwapIndexShift) | aSwapState;
   247 	return (aSwapIndex << ESwapIndexShift) | aSwapState;
   195 	}
   248 	}
   196 
   249 
   197 
   250 
   198 /**
   251 /**
   199 Allocate one or more page's worth of space within the swap area.
   252    Allocate one or more page's worth of space within the swap area.
   200 
   253 
   201 The location is represented by a page-based index into the swap area.
   254    The location is represented by a page-based index into the swap area.
   202 
   255 
   203 @param aCount The number of page's worth of space to allocate.
   256    @param aCount The number of page's worth of space to allocate.
   204 
   257 
   205 @return The swap index of the first location allocated.
   258    @return The swap index of the first location allocated.
   206 */
   259 */
   207 TInt DSwapManager::AllocSwapIndex(TInt aCount)
   260 TInt DSwapManager::AllocSwapIndex(TUint aCount)
   208 	{
   261 	{
   209 	__NK_ASSERT_DEBUG(aCount > 0 && aCount <= KMaxPagesToClean);
   262 	TRACE2(("DSwapManager::AllocSwapIndex %d", aCount));
       
   263 		
       
   264 	__NK_ASSERT_DEBUG(aCount <= KMaxPagesToClean);
   210 	NKern::FMWait(&iSwapLock);
   265 	NKern::FMWait(&iSwapLock);
   211 
   266 
   212 	// search for run of aCount from iAllocOffset to end
   267 	TInt carry;
   213 	TInt carry = 0;
       
   214 	TInt l = KMaxTInt;
   268 	TInt l = KMaxTInt;
   215 	TInt swapIndex = iBitMap->AllocAligned(aCount, 0, 0, EFalse, carry, l, iAllocOffset);
   269 	TInt swapIndex = -1;
   216 
   270 
   217 	// if search failed, retry from beginning
   271 	// if size suitable for alignment, search for aligned run of aCount from iAllocOffset to end,
       
   272 	// then from beginning
       
   273 	// 
       
   274 	// note that this aligns writes that at least as large as the alignment size - an alternative
       
   275 	// policy might be to align writes that are an exact multiple of the alignment size
       
   276 	if (iSwapAlign && aCount >= (1u << iSwapAlign))
       
   277 		{
       
   278 		carry = 0;
       
   279 		swapIndex = iBitMap->AllocAligned(aCount, iSwapAlign, 0, EFalse, carry, l, iAllocOffset);
       
   280 		if (swapIndex < 0)
       
   281 			{
       
   282 			carry = 0;
       
   283 			swapIndex = iBitMap->AllocAligned(aCount, iSwapAlign, 0, EFalse, carry, l, 0);
       
   284 			}
       
   285 		}
       
   286 	
       
   287 	// if not doing aligned search, or aligned search failed, retry without alignment
   218 	if (swapIndex < 0)
   288 	if (swapIndex < 0)
   219 		{
   289 		{
   220 		iAllocOffset = 0;
       
   221 		carry = 0;
   290 		carry = 0;
   222 		swapIndex = iBitMap->AllocAligned(aCount, 0, 0, EFalse, carry, l, iAllocOffset);
   291 		swapIndex = iBitMap->AllocAligned(aCount, 0, 0, EFalse, carry, l, iAllocOffset);
       
   292 		if (swapIndex < 0)
       
   293 			{
       
   294 			carry = 0;
       
   295 			swapIndex = iBitMap->AllocAligned(aCount, 0, 0, EFalse, carry, l, 0);
       
   296 			}
   223 		}
   297 		}
   224 
   298 
   225 	// if we found one then mark it as allocated and update iAllocOffset
   299 	// if we found one then mark it as allocated and update iAllocOffset
   226 	if (swapIndex >= 0)
   300 	if (swapIndex >= 0)
   227 		{
   301 		{
   228 		__NK_ASSERT_DEBUG(swapIndex <= (iBitMap->iSize - aCount));
   302 		__NK_ASSERT_DEBUG(swapIndex <= (TInt)(iBitMap->iSize - aCount));
   229 		iBitMap->Alloc(swapIndex, aCount);
   303 		iBitMap->Alloc(swapIndex, aCount);
   230 		iAllocOffset = (swapIndex + aCount) % iBitMap->iSize;
   304 		iAllocOffset = (swapIndex + aCount) % iBitMap->iSize;
   231 		}
   305 		}
   232 	
   306 	
   233 	NKern::FMSignal(&iSwapLock);
   307 	NKern::FMSignal(&iSwapLock);
   234 	__NK_ASSERT_DEBUG(swapIndex >= 0 || aCount > 1); // can't fail to allocate single page
   308 	__NK_ASSERT_DEBUG(swapIndex >= 0 || aCount > 1); // can't fail to allocate single page
       
   309 
       
   310 	TRACE2(("DSwapManager::AllocSwapIndex returns %d", swapIndex));	
   235 	return swapIndex;
   311 	return swapIndex;
   236 	}
   312 	}
   237 
   313 
   238 
   314 
   239 /**
   315 /**
   240 Free one page's worth of space within the swap area.
   316    Free one page's worth of space within the swap area.
   241 
   317 
   242 The index must have been previously allocated with AllocSwapIndex().
   318    The index must have been previously allocated with AllocSwapIndex().
   243 */
   319 */
   244 void DSwapManager::FreeSwapIndex(TInt aSwapIndex)
   320 void DSwapManager::FreeSwapIndex(TInt aSwapIndex)
   245 	{
   321 	{
   246 	__NK_ASSERT_DEBUG(aSwapIndex >= 0 && aSwapIndex < iBitMap->iSize);
   322 	__NK_ASSERT_DEBUG(aSwapIndex >= 0 && aSwapIndex < iBitMap->iSize);
   247 	DoDeleteNotify(aSwapIndex);
   323 	DoDeleteNotify(aSwapIndex);
   250 	NKern::FMSignal(&iSwapLock);
   326 	NKern::FMSignal(&iSwapLock);
   251 	}
   327 	}
   252 
   328 
   253 
   329 
   254 /**
   330 /**
   255 Reserve some swap pages for the requested region of the memory object
   331    Reserve some swap pages for the requested region of the memory object
   256 
   332 
   257 @param aMemory		The memory object to reserve pages for.
   333    @param aMemory		The memory object to reserve pages for.
   258 @param aStartIndex	The page index in the memory object of the start of the region.
   334    @param aStartIndex	The page index in the memory object of the start of the region.
   259 @param aPageCount	The number of pages to reserve.
   335    @param aPageCount	The number of pages to reserve.
   260 
   336 
   261 @return KErrNone on success, KErrNoMemory if not enough swap space available.
   337    @return KErrNone on success, KErrNoMemory if not enough swap space available.
   262 @pre aMemory's lock is held.
   338    @pre aMemory's lock is held.
   263 @post aMemory's lock is held.
   339    @post aMemory's lock is held.
   264 */
   340 */
   265 TInt DSwapManager::ReserveSwap(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount)
   341 TInt DSwapManager::ReserveSwap(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount)
   266 	{
   342 	{
   267 	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
   343 	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
   268 
   344 
   291 	return KErrNone;
   367 	return KErrNone;
   292 	}
   368 	}
   293 
   369 
   294 
   370 
   295 /**
   371 /**
   296 Unreserve swap pages for the requested region of the memory object.
   372    Unreserve swap pages for the requested region of the memory object.
   297 
   373 
   298 @param aMemory		The memory object to unreserve pages for.
   374    @param aMemory		The memory object to unreserve pages for.
   299 @param aStartIndex	The page index in the memory object of the start of the region.
   375    @param aStartIndex	The page index in the memory object of the start of the region.
   300 @param aPageCount	The number of pages to unreserve.
   376    @param aPageCount	The number of pages to unreserve.
   301 
   377 
   302 @return The number of pages freed.
   378    @return The number of pages freed.
   303 @pre aMemory's lock is held.
   379    @pre aMemory's lock is held.
   304 @post aMemory's lock is held.
   380    @post aMemory's lock is held.
   305 */
   381 */
   306 TInt DSwapManager::UnreserveSwap(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount)
   382 TInt DSwapManager::UnreserveSwap(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount)
   307 	{
   383 	{
   308 	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
   384 	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
   309 
   385 
   346 	return freedPages;
   422 	return freedPages;
   347 	}
   423 	}
   348 
   424 
   349 
   425 
   350 /**
   426 /**
   351 Determine whether the specified pages in the memory object have swap reserved for them.
   427    Determine whether the specified pages in the memory object have swap reserved for them.
   352 
   428 
   353 @param aMemory		The memory object that owns the pages.
   429    @param aMemory		The memory object that owns the pages.
   354 @param aStartIndex	The first index of the pages to check.
   430    @param aStartIndex	The first index of the pages to check.
   355 @param aPageCount	The number of pages to check.
   431    @param aPageCount	The number of pages to check.
   356 
   432 
   357 @return ETrue if swap is reserved for all the pages, EFalse otherwise.
   433    @return ETrue if swap is reserved for all the pages, EFalse otherwise.
   358 */
   434 */
   359 TBool DSwapManager::IsReserved(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount)
   435 TBool DSwapManager::IsReserved(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount)
   360 	{// MmuLock required to protect manager data.
   436 	{// MmuLock required to protect manager data.
   361 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   437 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   362 	__NK_ASSERT_DEBUG(aStartIndex < aMemory->iSizeInPages);
   438 	__NK_ASSERT_DEBUG(aStartIndex < aMemory->iSizeInPages);
   373 	return ETrue;
   449 	return ETrue;
   374 	}
   450 	}
   375 
   451 
   376 
   452 
   377 /**
   453 /**
   378 Read from the swap the specified pages associated with the memory object.
   454    Read from the swap the specified pages associated with the memory object.
   379 
   455 
   380 @param aMemory 	The memory object to read the pages for
   456    @param aMemory 	The memory object to read the pages for
   381 @param aIndex	The index of the first page within the memory object.
   457    @param aIndex	The index of the first page within the memory object.
   382 @param aCount	The number of pages to read.
   458    @param aCount	The number of pages to read.
   383 @param aLinAddr	The address to copy the pages to.
   459    @param aLinAddr	The address to copy the pages to.
   384 @param aRequest	The request to use for the read.
   460    @param aRequest	The request to use for the read.
   385 @param aPhysAddrs	An array of the physical addresses for each page to read in.
   461    @param aPhysAddrs	An array of the physical addresses for each page to read in.
   386 */
   462 */
   387 TInt DSwapManager::ReadSwapPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TLinAddr aLinAddr, DPageReadRequest* aRequest, TPhysAddr* aPhysAddrs)
   463 TInt DSwapManager::ReadSwapPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TLinAddr aLinAddr, TPhysAddr* aPhysAddrs)
   388 	{
   464 	{
   389 	__ASSERT_CRITICAL;
   465 	__ASSERT_CRITICAL;
   390 	
   466 	
   391 	TInt r = KErrNone;
   467 	TInt r = KErrNone;
   392 	const TUint readUnitShift = iDevice->iReadUnitShift;
   468 	const TUint readUnitShift = iDevice->iReadUnitShift;
   445 	return r;
   521 	return r;
   446 	}
   522 	}
   447 
   523 
   448 
   524 
   449 /**
   525 /**
   450 Write the specified memory object's pages from the RAM into the swap.
   526    Write the specified memory object's pages from the RAM into the swap.
   451 
   527 
   452 @param	aMemory		The memory object who owns the pages.
   528    @param	aMemory		The memory object who owns the pages.
   453 @param	aIndex		The index within the memory object.
   529    @param	aIndex		The index within the memory object.
   454 @param 	aCount		The number of pages to write out.
   530    @param 	aCount		The number of pages to write out.
   455 @param	aLinAddr	The location of the pages to write out.
   531    @param	aLinAddr	The location of the pages to write out.
   456 @param  aBackground Whether this is being called in the background by the page cleaning thread
   532    @param  aBackground Whether this is being called in the background by the page cleaning thread
   457                     as opposed to on demand when a free page is required.
   533    as opposed to on demand when a free page is required.
   458 
   534 
   459 @pre Called with page cleaning lock held
   535    @pre Called with page cleaning lock held
   460 */
   536 */
   461 TInt DSwapManager::WriteSwapPages(DMemoryObject** aMemory, TUint* aIndex, TUint aCount, TLinAddr aLinAddr, TBool aBackground)
   537 TInt DSwapManager::WriteSwapPages(DMemoryObject** aMemory, TUint* aIndex, TUint aCount, TLinAddr aLinAddr, TPhysAddr* aPhysAddrs, TBool aBackground)
   462 	{
   538 	{
       
   539 	TRACE(("DSwapManager::WriteSwapPages %d pages", aCount));
       
   540 	
   463 	__ASSERT_CRITICAL;  // so we can pass the paging device a stack-allocated TThreadMessage
   541 	__ASSERT_CRITICAL;  // so we can pass the paging device a stack-allocated TThreadMessage
   464 	__NK_ASSERT_DEBUG(PageCleaningLock::IsHeld());
   542 	__NK_ASSERT_DEBUG(PageCleaningLock::IsHeld());
   465 
   543 
   466 	START_PAGING_BENCHMARK;
   544 	START_PAGING_BENCHMARK;
   467 	
   545 	
   501 		else
   579 		else
   502 			{
   580 			{
   503 			if (startIndex != -1)
   581 			if (startIndex != -1)
   504 				{
   582 				{
   505 				// write pages from startIndex to i exclusive
   583 				// write pages from startIndex to i exclusive
   506 				TInt count = i - startIndex;
   584 				TUint count = i - startIndex;
   507 				__NK_ASSERT_DEBUG(count > 0 && count <= KMaxPagesToClean);
   585 				__NK_ASSERT_DEBUG(count > 0 && count <= KMaxPagesToClean);
   508 
   586 
   509 				// Get a new swap location for these pages, writing them all together if possible
   587 				// Get a new swap location for these pages, writing them all together if possible
   510 				TInt swapIndex = AllocSwapIndex(count);
   588 				TInt swapIndex = AllocSwapIndex(count);
   511 				if (swapIndex >= 0)
   589 				if (swapIndex >= 0)
   512 					r = DoWriteSwapPages(&aMemory[startIndex], &aIndex[startIndex], count, aLinAddr + (startIndex << KPageShift), swapIndex, aBackground);
   590 					r = DoWriteSwapPages(&aMemory[startIndex], &aIndex[startIndex], count, aLinAddr, startIndex, aPhysAddrs, swapIndex, aBackground);
   513 				else
   591 				else
   514 					{
   592 					{
   515 					// Otherwise, write them individually
   593 					// Otherwise, write them individually
   516 					for (TUint j = startIndex ; j < i ; ++j)
   594 					for (TUint j = startIndex ; j < i ; ++j)
   517 						{
   595 						{
   518 						swapIndex = AllocSwapIndex(1);
   596 						swapIndex = AllocSwapIndex(1);
   519 						__NK_ASSERT_DEBUG(swapIndex >= 0);
   597 						__NK_ASSERT_DEBUG(swapIndex >= 0);
   520 						r = DoWriteSwapPages(&aMemory[j], &aIndex[j], 1, aLinAddr + (j << KPageShift), swapIndex, aBackground);
   598 						r = DoWriteSwapPages(&aMemory[j], &aIndex[j], 1, aLinAddr, j, &aPhysAddrs[j], swapIndex, aBackground);
   521 						if (r != KErrNone)
   599 						if (r != KErrNone)
   522 							break;
   600 							break;
   523 						}
   601 						}
   524 					}
   602 					}
   525 
   603 
   531 	END_PAGING_BENCHMARK_N(EPagingBmWriteDataPage, aCount);
   609 	END_PAGING_BENCHMARK_N(EPagingBmWriteDataPage, aCount);
   532 	
   610 	
   533 	return r;
   611 	return r;
   534 	}
   612 	}
   535 
   613 
   536 TInt DSwapManager::DoWriteSwapPages(DMemoryObject** aMemory, TUint* aIndex, TUint aCount, TLinAddr aLinAddr, TInt aSwapIndex, TBool aBackground)
   614 TInt DSwapManager::DoWriteSwapPages(DMemoryObject** aMemory, TUint* aIndex, TUint aCount, TLinAddr aLinAddr, TInt aPageIndex, TPhysAddr* aPhysAddrs, TInt aSwapIndex, TBool aBackground)
   537 	{	
   615 	{	
       
   616 	TRACE2(("DSwapManager::DoWriteSwapPages %d pages to %d", aCount, aSwapIndex));
   538 		
   617 		
   539 	const TUint readUnitShift = iDevice->iReadUnitShift;
   618 	const TUint readUnitShift = iDevice->iReadUnitShift;
   540 	const TUint writeSize = aCount << (KPageShift - readUnitShift);
   619 	const TUint writeSize = aCount << (KPageShift - readUnitShift);
   541 	const TUint writeOffset = aSwapIndex << (KPageShift - readUnitShift);
   620 	const TUint writeOffset = aSwapIndex << (KPageShift - readUnitShift);
   542 		
   621 
   543 	TThreadMessage msg;
   622 	TThreadMessage msg;
   544 	START_PAGING_BENCHMARK;
   623 	START_PAGING_BENCHMARK;
   545 	TInt r = iDevice->Write(&msg, aLinAddr, writeOffset, writeSize, aBackground);
   624 	TInt r;
       
   625 	if (aLinAddr == 0)
       
   626 		r = iDevice->WritePhysical(&msg, aPhysAddrs, aCount, writeOffset, aBackground);
       
   627 	else
       
   628 		r = iDevice->Write(&msg, aLinAddr + (aPageIndex << KPageShift), writeOffset, writeSize, aBackground);
       
   629 		
   546 	if (r != KErrNone)
   630 	if (r != KErrNone)
   547 		{
   631 		{
   548 		__KTRACE_OPT(KPANIC, Kern::Printf("DSwapManager::WriteSwapPages: error writing media from %08x to %08x + %x: %d", aLinAddr, writeOffset << readUnitShift, writeSize << readUnitShift, r));
   632 		__KTRACE_OPT(KPANIC, Kern::Printf("DSwapManager::WriteSwapPages: error writing media from %08x to %08x + %x: %d", aLinAddr, writeOffset << readUnitShift, writeSize << readUnitShift, r));
   549 		}
   633 		}
   550 	__NK_ASSERT_DEBUG(r!=KErrNoMemory); // not allowed to allocate memory, therefore can't fail with KErrNoMemory
   634 	__NK_ASSERT_DEBUG(r!=KErrNoMemory); // not allowed to allocate memory, therefore can't fail with KErrNoMemory
   641 	NKern::FMSignal(&iSwapLock);
   725 	NKern::FMSignal(&iSwapLock);
   642 	return KErrNone;
   726 	return KErrNone;
   643 	}
   727 	}
   644 
   728 
   645 
   729 
       
   730 TBool DDataPagedMemoryManager::PhysicalAccessSupported()
       
   731 	{
       
   732 	return (iDevice->iFlags & DPagingDevice::ESupportsPhysicalAccess) != 0;
       
   733 	}
       
   734 
       
   735 
       
   736 TBool DDataPagedMemoryManager::UsePhysicalAccess()
       
   737 	{
       
   738 	return iUsePhysicalAccess;
       
   739 	}
       
   740 
       
   741 
       
   742 void DDataPagedMemoryManager::SetUsePhysicalAccess(TBool aUsePhysicalAccess)
       
   743 	{
       
   744 	TRACE(("WDP: Use physical access set to %d", aUsePhysicalAccess));
       
   745 	NKern::ThreadEnterCS();
       
   746 	PageCleaningLock::Lock();
       
   747 	iUsePhysicalAccess = aUsePhysicalAccess;
       
   748 	ThePager.SetCleanInSequence(!iUsePhysicalAccess && KPageColouringRestriction);
       
   749 	PageCleaningLock::Unlock();
       
   750 	NKern::ThreadLeaveCS();
       
   751 	}
       
   752 
       
   753 
       
   754 TUint DDataPagedMemoryManager::PreferredWriteSize()
       
   755 	{
       
   756 	return MaxU(iDevice->iPreferredWriteShift, KMinPreferredWriteShift + KPageShift) - KPageShift;
       
   757 	}
       
   758 
       
   759 
       
   760 TUint DDataPagedMemoryManager::PreferredSwapAlignment()
       
   761 	{
       
   762 	return MaxU(iDevice->iPreferredWriteShift, KPageShift) - KPageShift;
       
   763 	}
       
   764 
       
   765 
       
   766 TInt DDataPagedMemoryManager::SetWriteSize(TUint aWriteShift)
       
   767 	{
       
   768 	TRACE(("WDP: Set write size to %d (%d KB)", aWriteShift, 4 << aWriteShift));
       
   769 	// Check value is sensible
       
   770 	if (aWriteShift > 31)
       
   771 		return KErrArgument;
       
   772 	if (aWriteShift > KMaxPreferredWriteShift)
       
   773 		{
       
   774 		aWriteShift = KMaxPreferredWriteShift;
       
   775 		TRACE(("WDP: Reduced write size to %d (%d KB)",
       
   776 			   aWriteShift, 4 << aWriteShift));
       
   777 
       
   778 		}
       
   779 	NKern::ThreadEnterCS();
       
   780 	PageCleaningLock::Lock();
       
   781 	ThePager.SetPagesToClean(1 << aWriteShift);
       
   782 	PageCleaningLock::Unlock();
       
   783 	NKern::ThreadLeaveCS();
       
   784 	return KErrNone;
       
   785 	}
       
   786 
   646 
   787 
   647 TInt DDataPagedMemoryManager::InstallPagingDevice(DPagingDevice* aDevice)
   788 TInt DDataPagedMemoryManager::InstallPagingDevice(DPagingDevice* aDevice)
   648 	{
   789 	{
   649 	TRACEB(("DDataPagedMemoryManager::InstallPagingDevice(0x%08x)",aDevice));
   790 	TRACEB(("DDataPagedMemoryManager::InstallPagingDevice(0x%08x)",aDevice));
   650 
   791 
   662 		return KErrAlreadyExists;
   803 		return KErrAlreadyExists;
   663 		}
   804 		}
   664 
   805 
   665 	// Now we can determine the size of the swap, create the swap manager.
   806 	// Now we can determine the size of the swap, create the swap manager.
   666 	iSwapManager = new DSwapManager;
   807 	iSwapManager = new DSwapManager;
   667 	__NK_ASSERT_ALWAYS(iSwapManager);
   808 	if (!iSwapManager)
   668 
   809 		return KErrNoMemory;
   669 	TInt r = iSwapManager->Create(iDevice);
   810 
       
   811 	// Create swap manager object
       
   812 	TInt r = iSwapManager->Create(aDevice);
   670 	if (r != KErrNone)
   813 	if (r != KErrNone)
   671 		{// Couldn't create the swap manager.
   814 		{// Couldn't create the swap manager.
   672 		delete iSwapManager;
   815 		delete iSwapManager;
   673 		iSwapManager = NULL;
   816 		iSwapManager = NULL;
   674 		NKern::SafeSwap(NULL, (TAny*&)iDevice);
   817 		NKern::SafeSwap(NULL, (TAny*&)iDevice);
   675 		return r;
   818 		return r;
   676 		}
   819 		}
       
   820 
       
   821 	// Enable physical access where supported
       
   822 	SetUsePhysicalAccess(PhysicalAccessSupported());
       
   823 	
       
   824 	// Determine swap alignment and number of pages to clean at once from device's preferred write
       
   825 	// size, if set
       
   826 	TRACE(("WDP: Preferred write shift is %d", iDevice->iPreferredWriteShift));
       
   827 	r = SetWriteSize(PreferredWriteSize());
       
   828 	if (r != KErrNone)
       
   829 		{
       
   830 		delete iSwapManager;
       
   831 		iSwapManager = NULL;
       
   832 		NKern::SafeSwap(NULL, (TAny*&)iDevice);
       
   833 		return r;
       
   834 		}
       
   835 
       
   836 	// Set swap alignment
       
   837 	iSwapManager->SetSwapAlign(PreferredSwapAlignment());
       
   838 	
   677  	NKern::LockedSetClear(K::MemModelAttributes, 0, EMemModelAttrDataPaging);
   839  	NKern::LockedSetClear(K::MemModelAttributes, 0, EMemModelAttrDataPaging);
   678 
   840 
   679 	return r;
   841 	return r;
   680 	}
   842 	}
   681 
   843 
   744 
   906 
   745 TInt DDataPagedMemoryManager::ReadPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages, DPageReadRequest* aRequest)
   907 TInt DDataPagedMemoryManager::ReadPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages, DPageReadRequest* aRequest)
   746 	{
   908 	{
   747 	__NK_ASSERT_DEBUG(aRequest->CheckUseContiguous(aMemory,aIndex,aCount));
   909 	__NK_ASSERT_DEBUG(aRequest->CheckUseContiguous(aMemory,aIndex,aCount));
   748 
   910 
       
   911 	// todo: Possible to read using physical addresses here, but not sure it's worth it because it's
       
   912 	// not much saving and we may need to map the page anyway if it's blank to clear it
       
   913 	// 
       
   914 	// todo: Could move clearing pages up to here maybe?
       
   915 
   749 	// Map pages temporarily so that we can copy into them.
   916 	// Map pages temporarily so that we can copy into them.
   750 	const TLinAddr linAddr = aRequest->MapPages(aIndex, aCount, aPages);
   917 	const TLinAddr linAddr = aRequest->MapPages(aIndex, aCount, aPages);
   751 
   918 
   752 	TInt r = iSwapManager->ReadSwapPages(aMemory, aIndex, aCount, linAddr, aRequest, aPages);
   919 	TInt r = iSwapManager->ReadSwapPages(aMemory, aIndex, aCount, linAddr, aPages);
   753 
   920 
   754 	// The memory object allows executable mappings then need IMB.
   921 	// The memory object allows executable mappings then need IMB.
   755 	aRequest->UnmapPages(aMemory->IsExecutable());
   922 	aRequest->UnmapPages(aMemory->IsExecutable());
   756 
   923 
   757 	return r;
   924 	return r;
   758 	}
   925 	}
   759 
   926 
   760 
   927 
   761 TInt DDataPagedMemoryManager::WritePages(DMemoryObject** aMemory, TUint* aIndex, TPhysAddr* aPages, TUint aCount, DPageWriteRequest* aRequest, TBool aAnyExecutable, TBool aBackground)
   928 TInt DDataPagedMemoryManager::WritePages(DMemoryObject** aMemory, TUint* aIndex, TPhysAddr* aPages, TUint aCount, DPageWriteRequest* aRequest, TBool aAnyExecutable, TBool aBackground)
   762 	{
   929 	{
   763 	// Map pages temporarily so that we can copy into them.
   930 	// Note: this method used to do an IMB for executable pages (like ReadPages) but it was thought
   764 	const TLinAddr linAddr = aRequest->MapPages(aIndex[0], aCount, aPages);
   931 	// that this was uncessessary and so was removed
   765 
   932 
   766 	TInt r = iSwapManager->WriteSwapPages(aMemory, aIndex, aCount, linAddr, aBackground);
   933 	TLinAddr linAddr = 0;
   767 
   934 
   768 	// The memory object allows executable mappings then need IMB.
   935 	if (iUsePhysicalAccess)
   769 	aRequest->UnmapPages(aAnyExecutable);
   936 		{
       
   937 		// must maps pages to perform cache maintenance but can map each page individually
       
   938 		for (TUint i = 0 ; i < aCount ; ++i)
       
   939 			{
       
   940 			TLinAddr addr = aRequest->MapPages(aIndex[i], 1, &aPages[i]);
       
   941 			Cache::SyncMemoryBeforeDmaWrite(addr, KPageSize);
       
   942 			aRequest->UnmapPages(EFalse);
       
   943 			}
       
   944 		}
       
   945 	else
       
   946 		linAddr = aRequest->MapPages(aIndex[0], aCount, aPages);
       
   947 	
       
   948 	TInt r = iSwapManager->WriteSwapPages(aMemory, aIndex, aCount, linAddr, aPages, aBackground);
       
   949 
       
   950 	if (linAddr != 0)
       
   951 		aRequest->UnmapPages(EFalse);
   770 
   952 
   771 	return r;
   953 	return r;
   772 	}
   954 	}
   773 
   955 
   774 
   956 
   775 void DDataPagedMemoryManager::CleanPages(TUint aPageCount, SPageInfo** aPageInfos, TBool aBackground)
   957 void DDataPagedMemoryManager::CleanPages(TUint aPageCount, SPageInfo** aPageInfos, TBool aBackground)
   776 	{
   958 	{
       
   959 	TRACE(("DDataPagedMemoryManager::CleanPages %d", aPageCount));
       
   960 	
   777 	__NK_ASSERT_DEBUG(PageCleaningLock::IsHeld());
   961 	__NK_ASSERT_DEBUG(PageCleaningLock::IsHeld());
   778 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   962 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   779 	__NK_ASSERT_DEBUG(aPageCount <= (TUint)KMaxPagesToClean);
   963 	__NK_ASSERT_DEBUG(aPageCount <= (TUint)KMaxPagesToClean);
   780 	
   964 	
   781 	TUint i;
   965 	TUint i;
   782 	DMemoryObject* memory[KMaxPagesToClean];
   966 	DMemoryObject* memory[KMaxPagesToClean];
   783 	TUint index[KMaxPagesToClean];
   967 	TUint index[KMaxPagesToClean];
   784 	TPhysAddr physAddr[KMaxPagesToClean];
   968 	TPhysAddr physAddr[KMaxPagesToClean];
   785 	TBool anyExecutable = EFalse;
   969 	TBool anyExecutable = EFalse;
   786 	
   970 
   787 	for (i = 0 ; i < aPageCount ; ++i)
   971 	for (i = 0 ; i < aPageCount ; ++i)
   788 		{
   972 		{
   789 		SPageInfo* pi = aPageInfos[i];
   973 		SPageInfo* pi = aPageInfos[i];
   790 
   974 
   791 		__NK_ASSERT_DEBUG(!pi->IsWritable());
   975 		__NK_ASSERT_DEBUG(!pi->IsWritable());
   817 
  1001 
   818 	for (i = 0 ; i < aPageCount ; ++i)
  1002 	for (i = 0 ; i < aPageCount ; ++i)
   819 		{
  1003 		{
   820 		SPageInfo* pi = aPageInfos[i];
  1004 		SPageInfo* pi = aPageInfos[i];
   821 		// check if page is clean...
  1005 		// check if page is clean...
   822 		if(pi->CheckModified(&memory[0]) || pi->IsWritable())
  1006 		if(pi->CheckModified(&memory[0]) ||
       
  1007 		   pi->IsWritable())
   823 			{
  1008 			{
   824 			// someone else modified the page, or it became writable, so mark as not cleaned
  1009 			// someone else modified the page, or it became writable, so mark as not cleaned
   825 			aPageInfos[i] = NULL;
  1010 			aPageInfos[i] = NULL;
   826 			}
  1011 			}
   827 		else
  1012 		else
   864 
  1049 
   865 TInt SetSwapThresholds(const SVMSwapThresholds& aThresholds)
  1050 TInt SetSwapThresholds(const SVMSwapThresholds& aThresholds)
   866 	{
  1051 	{
   867 	return ((DDataPagedMemoryManager*)TheDataPagedMemoryManager)->SetSwapThresholds(aThresholds);
  1052 	return ((DDataPagedMemoryManager*)TheDataPagedMemoryManager)->SetSwapThresholds(aThresholds);
   868 	}
  1053 	}
   869   
  1054 
       
  1055 
       
  1056 TBool GetPhysicalAccessSupported()
       
  1057 	{
       
  1058 	return ((DDataPagedMemoryManager*)TheDataPagedMemoryManager)->PhysicalAccessSupported();
       
  1059 	}
       
  1060 
       
  1061 
       
  1062 TBool GetUsePhysicalAccess()
       
  1063 	{
       
  1064 	return ((DDataPagedMemoryManager*)TheDataPagedMemoryManager)->UsePhysicalAccess();
       
  1065 	}
       
  1066 
       
  1067 
       
  1068 void SetUsePhysicalAccess(TBool aUsePhysicalAccess)
       
  1069 	{
       
  1070 	((DDataPagedMemoryManager*)TheDataPagedMemoryManager)->SetUsePhysicalAccess(aUsePhysicalAccess);
       
  1071 	}
       
  1072 
       
  1073 
       
  1074 TUint GetPreferredDataWriteSize()
       
  1075 	{
       
  1076 	return ((DDataPagedMemoryManager*)TheDataPagedMemoryManager)->PreferredWriteSize();
       
  1077 	}
       
  1078 
       
  1079 
       
  1080 TInt SetDataWriteSize(TUint aWriteShift)
       
  1081 	{
       
  1082 	return	((DDataPagedMemoryManager*)TheDataPagedMemoryManager)->SetWriteSize(aWriteShift);
       
  1083 	}
       
  1084