kernel/eka/memmodel/epoc/flexible/mmu/mdatapaging.cpp
branchRCL_3
changeset 26 c734af59ce98
parent 22 2f92ad2dc5db
child 28 5b5d147c7838
equal deleted inserted replaced
24:41f0cfe18c80 26:c734af59ce98
    29 */
    29 */
    30 class DSwapManager
    30 class DSwapManager
    31 	{
    31 	{
    32 public:
    32 public:
    33 
    33 
    34 	enum TSwapFlags
    34 	/// The state of swap for a logical page in a memory object.
    35 		{
    35 	///
    36 		EAllocated		= 1 << 0,
    36 	/// Note that this does not always correspond to the state of the page in RAM - for example a
    37 		EUninitialised	= 1 << 1,
    37 	/// page can be dirty in RAM but blank in swap if it has never been written out.
    38 		ESaved			= 1 << 2,
    38 	enum TSwapState
    39 		ESwapFlagsMask 	= 0x7,
    39 		{
    40 
    40 		EStateUnreserved = 0,	///< swap space not yet reserved, or page is being decommitted
    41 		ESwapIndexShift = 3,
    41 		EStateBlank      = 1,	///< swap page has never been written
    42 		ESwapIndexMask = 0xffffffff << ESwapIndexShift,
    42 		EStateWritten    = 2,	///< swap page has been written out at least once
       
    43 		EStateWriting    = 3	///< swap page is in the process of being written out
    43 		};
    44 		};
    44 
    45 	
       
    46 	enum
       
    47 		{
       
    48 		ESwapIndexShift = 2,
       
    49 		ESwapStateMask 	= (1 << ESwapIndexShift) - 1,
       
    50 		ESwapIndexMask  = 0xffffffff & ~ESwapStateMask
       
    51 		};
       
    52 
       
    53 public:
    45 	TInt Create(DPagingDevice* aDevice);
    54 	TInt Create(DPagingDevice* aDevice);
    46 
    55 
    47 	TInt ReserveSwap(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount);
    56 	TInt ReserveSwap(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount);
    48 	TInt UnreserveSwap(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount);
    57 	TInt UnreserveSwap(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount);
    49 	TBool IsReserved(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount);
    58 	TBool IsReserved(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount);
    50 
    59 
    51 	TInt ReadSwapPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TLinAddr aLinAddr, DPageReadRequest* aRequest, TPhysAddr* aPhysAddrs);
    60 	TInt ReadSwapPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TLinAddr aLinAddr, DPageReadRequest* aRequest, TPhysAddr* aPhysAddrs);
    52 	TInt WriteSwapPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TLinAddr aLinAddr, DPageWriteRequest* aRequest);
    61 	TInt WriteSwapPages(DMemoryObject** aMemory, TUint* aIndex, TUint aCount, TLinAddr aLinAddr, TBool aBackground);
    53 	void DoDeleteNotify(TUint aSwapData);
       
    54 
    62 
    55 	void GetSwapInfo(SVMSwapInfo& aInfoOut);
    63 	void GetSwapInfo(SVMSwapInfo& aInfoOut);
    56 	TInt SetSwapThresholds(const SVMSwapThresholds& aThresholds);
    64 	TInt SetSwapThresholds(const SVMSwapThresholds& aThresholds);
    57 	void CheckSwapThresholds(TUint aInitial, TUint aFinal);
    65 
    58 	
    66 private:
    59 protected:
    67 	inline TSwapState SwapState(TUint aSwapData);
    60 	DPagingDevice* iDevice;
    68 	inline TInt SwapIndex(TUint aSwapData);
    61 	TBitMapAllocator* iBitMap;
    69 	inline TUint SwapData(TSwapState aSwapState, TInt aSwapIndex);
    62 	TUint iBitMapFree;
    70 	
    63 	TUint iAllocOffset;
    71 	TInt AllocSwapIndex(TInt aCount);
       
    72 	void FreeSwapIndex(TInt aSwapIndex);
       
    73 	void CheckSwapThresholdsAndUnlock(TUint aInitial);
       
    74 	
       
    75 	void DoDeleteNotify(TUint aSwapIndex);
       
    76 	TInt DoWriteSwapPages(DMemoryObject** aMemory, TUint* aIndex, TUint aCount, TLinAddr aLinAddr, TInt aSwapIndex, TBool aBackground);
       
    77 	
       
    78 private:
       
    79 	DPagingDevice* iDevice;			///< Paging device used to read and write swap pages
       
    80 	
       
    81 	NFastMutex iSwapLock;			///< Fast mutex protecting access to all members below
       
    82 	TUint iFreePageCount;			///< Number of swap pages that have not been reserved
       
    83 	TBitMapAllocator* iBitMap;		///< Bitmap of swap pages that have been allocated
       
    84 	TUint iAllocOffset;				///< Next offset to try when allocating a swap page
    64  	TUint iSwapThesholdLow;
    85  	TUint iSwapThesholdLow;
    65  	TUint iSwapThesholdGood;
    86  	TUint iSwapThesholdGood;
    66 	TThreadMessage iDelNotifyMsg;
       
    67 	};
    87 	};
    68 
    88 
    69 
    89 
    70 /**
    90 /**
    71 Manager for demand paged memory objects which contain writeable data.
    91 Manager for demand paged memory objects which contain writeable data.
    79 private:
    99 private:
    80 	// from DMemoryManager...
   100 	// from DMemoryManager...
    81 	virtual TInt Alloc(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
   101 	virtual TInt Alloc(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
    82 	virtual void Free(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
   102 	virtual void Free(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
    83 	virtual TInt Wipe(DMemoryObject* aMemory);
   103 	virtual TInt Wipe(DMemoryObject* aMemory);
    84 	virtual TInt CleanPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TPhysAddr*& aPageArrayEntry);
   104 	virtual void CleanPages(TUint aPageCount, SPageInfo** aPageInfos, TBool aBackground);
    85 
   105 
    86 	// Methods inherited from DPagedMemoryManager
   106 	// Methods inherited from DPagedMemoryManager
    87 	virtual void Init3();
   107 	virtual void Init3();
    88 	virtual TInt InstallPagingDevice(DPagingDevice* aDevice);
   108 	virtual TInt InstallPagingDevice(DPagingDevice* aDevice);
    89 	virtual TInt AcquirePageReadRequest(DPageReadRequest*& aRequest, DMemoryObject* aMemory, TUint aIndex, TUint aCount);
   109 	virtual TInt AcquirePageReadRequest(DPageReadRequest*& aRequest, DMemoryObject* aMemory, TUint aIndex, TUint aCount);
    90 	virtual TInt AcquirePageWriteRequest(DPageWriteRequest*& aRequest, DMemoryObject* aMemory, TUint aIndex, TUint aCount);
   110 	virtual TInt AcquirePageWriteRequest(DPageWriteRequest*& aRequest, DMemoryObject** aMemory, TUint* aIndex, TUint aCount);
    91 	virtual TInt ReadPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages, DPageReadRequest* aRequest);
   111 	virtual TInt ReadPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages, DPageReadRequest* aRequest);
    92 	virtual TInt WritePages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages, DPageWriteRequest* aRequest);
       
    93 	virtual TBool IsAllocated(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
   112 	virtual TBool IsAllocated(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
    94 
   113 
    95 public:
   114 public:
    96 	void GetSwapInfo(SVMSwapInfo& aInfoOut);
   115 	void GetSwapInfo(SVMSwapInfo& aInfoOut);
    97 	TInt SetSwapThresholds(const SVMSwapThresholds& aThresholds);
   116 	TInt SetSwapThresholds(const SVMSwapThresholds& aThresholds);
       
   117 
       
   118 private:
       
   119 	TInt WritePages(DMemoryObject** aMemory, TUint* aIndex, TPhysAddr* aPages, TUint aCount, DPageWriteRequest *aRequest, TBool aAnyExecutable, TBool aBackground);
    98 
   120 
    99 private:
   121 private:
   100 	/**
   122 	/**
   101 	The paging device used for accessing the backing store.
   123 	The paging device used for accessing the backing store.
   102 	This is set by #InstallPagingDevice.
   124 	This is set by #InstallPagingDevice.
   125 
   147 
   126 @param	aDevice	The demand paging device for access to the swap.
   148 @param	aDevice	The demand paging device for access to the swap.
   127 */
   149 */
   128 TInt DSwapManager::Create(DPagingDevice* aDevice)
   150 TInt DSwapManager::Create(DPagingDevice* aDevice)
   129 	{
   151 	{
   130 	__ASSERT_COMPILE(!(ESwapIndexMask & ESwapFlagsMask));
   152 	__ASSERT_COMPILE(!(ESwapIndexMask & ESwapStateMask));
   131 	__NK_ASSERT_DEBUG(iDevice == NULL);
   153 	__NK_ASSERT_DEBUG(iDevice == NULL);
   132 	iDevice = aDevice;
   154 	iDevice = aDevice;
   133 
   155 
   134 	// Create the structures required to track the swap usage.
   156 	// Create the structures required to track the swap usage.
   135 	TUint swapPages = (iDevice->iSwapSize << iDevice->iReadUnitShift) >> KPageShift;
   157 	TUint swapPages = (iDevice->iSwapSize << iDevice->iReadUnitShift) >> KPageShift;
   145 	iBitMap = TBitMapAllocator::New(swapPages, ETrue);
   167 	iBitMap = TBitMapAllocator::New(swapPages, ETrue);
   146 	if (iBitMap == NULL)
   168 	if (iBitMap == NULL)
   147 		{// Not enough RAM to keep track of the swap.
   169 		{// Not enough RAM to keep track of the swap.
   148 		return KErrNoMemory;
   170 		return KErrNoMemory;
   149 		}
   171 		}
   150 	iBitMapFree = swapPages;
   172 	iFreePageCount = swapPages;
   151 	iAllocOffset = 0;
   173 	iAllocOffset = 0;
   152 	return KErrNone;
   174 	return KErrNone;
       
   175 	}
       
   176 
       
   177 
       
   178 inline DSwapManager::TSwapState DSwapManager::SwapState(TUint aSwapData)
       
   179 	{
       
   180 	TSwapState state = (TSwapState)(aSwapData & ESwapStateMask);
       
   181 	__NK_ASSERT_DEBUG(state >= EStateWritten || (aSwapData & ~ESwapStateMask) == 0);
       
   182 	return state;
       
   183 	}
       
   184 
       
   185 
       
   186 inline TInt DSwapManager::SwapIndex(TUint aSwapData)
       
   187 	{
       
   188 	return aSwapData >> ESwapIndexShift;
       
   189 	}
       
   190 
       
   191 
       
   192 inline TUint DSwapManager::SwapData(TSwapState aSwapState, TInt aSwapIndex)
       
   193 	{
       
   194 	return (aSwapIndex << ESwapIndexShift) | aSwapState;
       
   195 	}
       
   196 
       
   197 
       
   198 /**
       
   199 Allocate one or more page's worth of space within the swap area.
       
   200 
       
   201 The location is represented by a page-based index into the swap area.
       
   202 
       
   203 @param aCount The number of page's worth of space to allocate.
       
   204 
       
   205 @return The swap index of the first location allocated.
       
   206 */
       
   207 TInt DSwapManager::AllocSwapIndex(TInt aCount)
       
   208 	{
       
   209 	__NK_ASSERT_DEBUG(aCount > 0 && aCount <= KMaxPagesToClean);
       
   210 	NKern::FMWait(&iSwapLock);
       
   211 
       
   212 	// search for run of aCount from iAllocOffset to end
       
   213 	TInt carry = 0;
       
   214 	TInt l = KMaxTInt;
       
   215 	TInt swapIndex = iBitMap->AllocAligned(aCount, 0, 0, EFalse, carry, l, iAllocOffset);
       
   216 
       
   217 	// if search failed, retry from beginning
       
   218 	if (swapIndex < 0)
       
   219 		{
       
   220 		iAllocOffset = 0;
       
   221 		carry = 0;
       
   222 		swapIndex = iBitMap->AllocAligned(aCount, 0, 0, EFalse, carry, l, iAllocOffset);
       
   223 		}
       
   224 
       
   225 	// if we found one then mark it as allocated and update iAllocOffset
       
   226 	if (swapIndex >= 0)
       
   227 		{
       
   228 		__NK_ASSERT_DEBUG(swapIndex <= (iBitMap->iSize - aCount));
       
   229 		iBitMap->Alloc(swapIndex, aCount);
       
   230 		iAllocOffset = (swapIndex + aCount) % iBitMap->iSize;
       
   231 		}
       
   232 	
       
   233 	NKern::FMSignal(&iSwapLock);
       
   234 	__NK_ASSERT_DEBUG(swapIndex >= 0 || aCount > 1); // can't fail to allocate single page
       
   235 	return swapIndex;
       
   236 	}
       
   237 
       
   238 
       
   239 /**
       
   240 Free one page's worth of space within the swap area.
       
   241 
       
   242 The index must have been previously allocated with AllocSwapIndex().
       
   243 */
       
   244 void DSwapManager::FreeSwapIndex(TInt aSwapIndex)
       
   245 	{
       
   246 	__NK_ASSERT_DEBUG(aSwapIndex >= 0 && aSwapIndex < iBitMap->iSize);
       
   247 	DoDeleteNotify(aSwapIndex);
       
   248 	NKern::FMWait(&iSwapLock);
       
   249 	iBitMap->Free(aSwapIndex);
       
   250 	NKern::FMSignal(&iSwapLock);
   153 	}
   251 	}
   154 
   252 
   155 
   253 
   156 /**
   254 /**
   157 Reserve some swap pages for the requested region of the memory object
   255 Reserve some swap pages for the requested region of the memory object
   165 @post aMemory's lock is held.
   263 @post aMemory's lock is held.
   166 */
   264 */
   167 TInt DSwapManager::ReserveSwap(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount)
   265 TInt DSwapManager::ReserveSwap(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount)
   168 	{
   266 	{
   169 	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
   267 	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
   170 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
   268 
   171 
   269 	NKern::FMWait(&iSwapLock);
   172 	const TUint indexEnd = aStartIndex + aPageCount;
   270 	TUint initFree = iFreePageCount;
   173 	TUint index = aStartIndex;
   271 	if (iFreePageCount < aPageCount)
   174 
   272 		{
   175 #ifdef _DEBUG
   273 		NKern::FMSignal(&iSwapLock);
   176 	for (; index < indexEnd; index++)
       
   177 		{// This page shouldn't already be in use.
       
   178 		MmuLock::Lock();
       
   179 		__NK_ASSERT_DEBUG(!(aMemory->PagingManagerData(index) & ESwapFlagsMask));
       
   180 		MmuLock::Unlock();
       
   181 		}
       
   182 #endif
       
   183 
       
   184 	if (iBitMapFree < aPageCount)
       
   185 		{
       
   186 		Kern::AsyncNotifyChanges(EChangesOutOfMemory);
   274 		Kern::AsyncNotifyChanges(EChangesOutOfMemory);
   187 		return KErrNoMemory;
   275 		return KErrNoMemory;
   188 		}
   276 		}
   189 	// Reserve the required swap space and mark each page as allocated and uninitialised.
   277 	iFreePageCount -= aPageCount;
   190 	TUint initFree = iBitMapFree;
   278 	CheckSwapThresholdsAndUnlock(initFree);		
   191 	iBitMapFree -= aPageCount;
   279 	
   192 	for (index = aStartIndex; index < indexEnd; index++)
   280 	// Mark each page as allocated and uninitialised.
       
   281 	const TUint indexEnd = aStartIndex + aPageCount;
       
   282 	for (TUint index = aStartIndex; index < indexEnd; index++)
   193 		{		
   283 		{		
   194 		// Grab MmuLock to stop manager data being accessed.
   284 		// Grab MmuLock to stop manager data being accessed.
   195 		MmuLock::Lock();
   285 		MmuLock::Lock();
   196 		TUint swapData = aMemory->PagingManagerData(index);
   286 		__NK_ASSERT_DEBUG(SwapState(aMemory->PagingManagerData(index)) == EStateUnreserved);
   197 		__NK_ASSERT_DEBUG(!(swapData & EAllocated));
   287 		aMemory->SetPagingManagerData(index, EStateBlank);
   198 		swapData = EAllocated | EUninitialised;
       
   199 		aMemory->SetPagingManagerData(index, swapData);
       
   200 		MmuLock::Unlock();
   288 		MmuLock::Unlock();
   201 		}
   289 		}
   202 
   290 
   203 	CheckSwapThresholds(initFree, iBitMapFree);		
       
   204 	return KErrNone;
   291 	return KErrNone;
   205 	}
   292 	}
   206 
   293 
   207 
   294 
   208 /**
   295 /**
   217 @post aMemory's lock is held.
   304 @post aMemory's lock is held.
   218 */
   305 */
   219 TInt DSwapManager::UnreserveSwap(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount)
   306 TInt DSwapManager::UnreserveSwap(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount)
   220 	{
   307 	{
   221 	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
   308 	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
   222 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
   309 
   223 
       
   224 	TUint initFree = iBitMapFree;
       
   225 	TUint freedPages = 0;
   310 	TUint freedPages = 0;
   226 	const TUint indexEnd = aStartIndex + aPageCount;
   311 	const TUint indexEnd = aStartIndex + aPageCount;
   227 	for (TUint index = aStartIndex; index < indexEnd; index++)
   312 	for (TUint index = aStartIndex; index < indexEnd; index++)
   228 		{
   313 		{
   229 		// Grab MmuLock to stop manager data being accessed.
   314 		// Grab MmuLock to stop manager data being accessed.
   230 		MmuLock::Lock();
   315 		MmuLock::Lock();
   231 		TUint swapData = aMemory->PagingManagerData(index);
   316 		TUint swapData = aMemory->PagingManagerData(index);
   232 		TUint swapIndex = swapData >> ESwapIndexShift;
   317 		TSwapState state = SwapState(swapData);
   233 		TBool notifyDelete = EFalse;
   318 		if (state != EStateUnreserved)
   234 		if (swapData & EAllocated)
       
   235 			{
   319 			{
   236 			if (swapData & ESaved)
       
   237 				{
       
   238 				notifyDelete = ETrue;
       
   239 				iBitMap->Free(swapIndex);
       
   240 				}
       
   241 			freedPages++;
   320 			freedPages++;
   242 			aMemory->SetPagingManagerData(index, 0);
   321 			aMemory->SetPagingManagerData(index, EStateUnreserved);
   243 			}
   322 			}
       
   323 		MmuLock::Unlock();
       
   324 
       
   325 		if (state == EStateWritten)
       
   326 			FreeSwapIndex(SwapIndex(swapData));
       
   327 		else if (state == EStateWriting)
       
   328 			{
       
   329 			// Wait for cleaning to finish before deallocating swap space
       
   330 			PageCleaningLock::Lock();
       
   331 			PageCleaningLock::Unlock();
       
   332 			
   244 #ifdef _DEBUG
   333 #ifdef _DEBUG
   245 		else
   334 			MmuLock::Lock();
   246 			__NK_ASSERT_DEBUG(swapData == 0);
   335 			__NK_ASSERT_DEBUG(SwapState(aMemory->PagingManagerData(index)) == EStateUnreserved);
       
   336 			MmuLock::Unlock();
   247 #endif
   337 #endif
   248 
   338 			}
   249 		MmuLock::Unlock();
   339 		}
   250 
   340 	
   251 		if (notifyDelete)
   341 	NKern::FMWait(&iSwapLock);
   252 			DoDeleteNotify(swapIndex);
   342 	TUint initFree = iFreePageCount;
   253 		}
   343 	iFreePageCount += freedPages;
   254 	iBitMapFree += freedPages;
   344 	CheckSwapThresholdsAndUnlock(initFree);	
   255 	CheckSwapThresholds(initFree, iBitMapFree);	
   345 	
   256 	return freedPages;
   346 	return freedPages;
   257 	}
   347 	}
   258 
   348 
   259 
   349 
   260 /**
   350 /**
   273 	__NK_ASSERT_DEBUG(aStartIndex + aPageCount <= aMemory->iSizeInPages);
   363 	__NK_ASSERT_DEBUG(aStartIndex + aPageCount <= aMemory->iSizeInPages);
   274 
   364 
   275 	const TUint indexEnd = aStartIndex + aPageCount;
   365 	const TUint indexEnd = aStartIndex + aPageCount;
   276 	for (TUint index = aStartIndex; index < indexEnd; index++)
   366 	for (TUint index = aStartIndex; index < indexEnd; index++)
   277 		{
   367 		{
   278 		if (!(aMemory->PagingManagerData(index) & DSwapManager::EAllocated))
   368 		if (SwapState(aMemory->PagingManagerData(index)) == EStateUnreserved)
   279 			{// This page is not allocated by swap manager.
   369 			{// This page is not allocated by swap manager.
   280 			return EFalse;
   370 			return EFalse;
   281 			}
   371 			}
   282 		}
   372 		}
   283 	return ETrue;
   373 	return ETrue;
   294 @param aRequest	The request to use for the read.
   384 @param aRequest	The request to use for the read.
   295 @param aPhysAddrs	An array of the physical addresses for each page to read in.
   385 @param aPhysAddrs	An array of the physical addresses for each page to read in.
   296 */
   386 */
   297 TInt DSwapManager::ReadSwapPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TLinAddr aLinAddr, DPageReadRequest* aRequest, TPhysAddr* aPhysAddrs)
   387 TInt DSwapManager::ReadSwapPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TLinAddr aLinAddr, DPageReadRequest* aRequest, TPhysAddr* aPhysAddrs)
   298 	{
   388 	{
       
   389 	__ASSERT_CRITICAL;
       
   390 	
   299 	TInt r = KErrNone;
   391 	TInt r = KErrNone;
   300 	const TUint readUnitShift = iDevice->iReadUnitShift;
   392 	const TUint readUnitShift = iDevice->iReadUnitShift;
   301 	TUint readSize = KPageSize >> readUnitShift;
   393 	TUint readSize = KPageSize >> readUnitShift;
   302 	TThreadMessage* msg = const_cast<TThreadMessage*>(&aRequest->iMessage);
   394 	TThreadMessage message;
   303 
       
   304 	// Determine the wipe byte values for uninitialised pages.
       
   305 	TUint allocFlags = aMemory->RamAllocFlags();
       
   306 	TBool wipePages = !(allocFlags & Mmu::EAllocNoWipe);
       
   307 	TUint8 wipeByte = (allocFlags & Mmu::EAllocUseCustomWipeByte) ? (allocFlags >> Mmu::EAllocWipeByteShift) & 0xff : 0x03;
       
   308 
   395 
   309 	const TUint indexEnd = aIndex + aCount;
   396 	const TUint indexEnd = aIndex + aCount;
   310 	for (TUint index = aIndex; index < indexEnd; index++, aLinAddr += KPageSize, aPhysAddrs++)
   397 	for (TUint index = aIndex; index < indexEnd; index++, aLinAddr += KPageSize, aPhysAddrs++)
   311 		{
   398 		{
   312 		START_PAGING_BENCHMARK;
   399 		START_PAGING_BENCHMARK;
   313 
   400 
   314 		MmuLock::Lock();	// MmuLock required for atomic access to manager data.
   401 		MmuLock::Lock();	// MmuLock required for atomic access to manager data.
   315 		TUint swapData = aMemory->PagingManagerData(index);
   402 		TUint swapData = aMemory->PagingManagerData(index);
   316 
   403 		TSwapState state = SwapState(swapData);
   317 		if (!(swapData & EAllocated))
   404 
       
   405 		if (state == EStateUnreserved)
   318 			{// This page is not committed to the memory object
   406 			{// This page is not committed to the memory object
   319 			MmuLock::Unlock();
   407 			MmuLock::Unlock();
   320 			return KErrNotFound;			
   408 			return KErrNotFound;			
   321 			}
   409 			}
   322 		if (swapData & EUninitialised)
   410 		else if (state == EStateBlank)
   323 			{// This page has not been written to yet so don't read from swap 
   411 			{// This page has not been written to yet so don't read from swap 
   324 			// just wipe it if required.
   412 			// just wipe it if required.
       
   413 			TUint allocFlags = aMemory->RamAllocFlags();
   325 			MmuLock::Unlock();
   414 			MmuLock::Unlock();
       
   415 			TBool wipePages = !(allocFlags & Mmu::EAllocNoWipe);
   326 			if (wipePages)
   416 			if (wipePages)
   327 				{
   417 				{
       
   418 				TUint8 wipeByte = (allocFlags & Mmu::EAllocUseCustomWipeByte) ?
       
   419 					(allocFlags >> Mmu::EAllocWipeByteShift) & 0xff :
       
   420 					0x03;
   328 				memset((TAny*)aLinAddr, wipeByte, KPageSize);
   421 				memset((TAny*)aLinAddr, wipeByte, KPageSize);
   329 				}
   422 				}
   330 			}
   423 			}
   331 		else
   424 		else
   332 			{
   425 			{
   333 			__NK_ASSERT_DEBUG(swapData & ESaved);
   426 			// It is not possible to get here if the page is in state EStateWriting as if so it must
   334 			TUint swapIndex = swapData >> ESwapIndexShift;
   427 			// be present in RAM, and so will not need to be read in.
       
   428 			__NK_ASSERT_DEBUG(state == EStateWritten);
       
   429 			
   335 			// OK to release as if the object's data is decommitted the pager 
   430 			// OK to release as if the object's data is decommitted the pager 
   336 			// will check that data is still valid before mapping it.
   431 			// will check that data is still valid before mapping it.
   337 			MmuLock::Unlock();
   432 			MmuLock::Unlock();
   338 			TUint readStart = (swapIndex << KPageShift) >> readUnitShift;
   433 			TUint readStart = (SwapIndex(swapData) << KPageShift) >> readUnitShift;
   339 			START_PAGING_BENCHMARK;
   434 			START_PAGING_BENCHMARK;
   340 			r = iDevice->Read(msg, aLinAddr, readStart, readSize, DPagingDevice::EDriveDataPaging);
   435 			r = iDevice->Read(&message, aLinAddr, readStart, readSize, DPagingDevice::EDriveDataPaging);
   341 			if (r != KErrNone)
   436 			if (r != KErrNone)
   342 				__KTRACE_OPT(KPANIC, Kern::Printf("DSwapManager::ReadSwapPages: error reading media at %08x + %x: %d", readStart << readUnitShift, readSize << readUnitShift, r));				
   437 				__KTRACE_OPT(KPANIC, Kern::Printf("DSwapManager::ReadSwapPages: error reading media at %08x + %x: %d", readStart << readUnitShift, readSize << readUnitShift, r));				
   343 			__NK_ASSERT_DEBUG(r!=KErrNoMemory); // not allowed to allocate memory, therefore can't fail with KErrNoMemory
   438 			__NK_ASSERT_DEBUG(r!=KErrNoMemory); // not allowed to allocate memory, therefore can't fail with KErrNoMemory
   344 			END_PAGING_BENCHMARK(EPagingBmReadDataMedia);
   439 			END_PAGING_BENCHMARK(EPagingBmReadDataMedia);
   345 			// TODO: Work out what to do if page in fails, unmap all pages????
       
   346 			__NK_ASSERT_ALWAYS(r == KErrNone);
   440 			__NK_ASSERT_ALWAYS(r == KErrNone);
   347 			}
   441 			}
   348 		END_PAGING_BENCHMARK(EPagingBmReadDataPage);
   442 		END_PAGING_BENCHMARK(EPagingBmReadDataPage);
   349 		}
   443 		}
   350 
   444 
   357 
   451 
   358 @param	aMemory		The memory object who owns the pages.
   452 @param	aMemory		The memory object who owns the pages.
   359 @param	aIndex		The index within the memory object.
   453 @param	aIndex		The index within the memory object.
   360 @param 	aCount		The number of pages to write out.
   454 @param 	aCount		The number of pages to write out.
   361 @param	aLinAddr	The location of the pages to write out.
   455 @param	aLinAddr	The location of the pages to write out.
   362 @param	aRequest	The demand paging request to use.
   456 @param  aBackground Whether this is being called in the background by the page cleaning thread
   363 
   457                     as opposed to on demand when a free page is required.
   364 */
   458 
   365 TInt DSwapManager::WriteSwapPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TLinAddr aLinAddr, DPageWriteRequest* aRequest)
   459 @pre Called with page cleaning lock held
   366 	{// The RamAllocLock prevents the object's swap pages being reassigned.
   460 */
   367 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
   461 TInt DSwapManager::WriteSwapPages(DMemoryObject** aMemory, TUint* aIndex, TUint aCount, TLinAddr aLinAddr, TBool aBackground)
   368 
   462 	{
   369 	// Write the page out to the swap.
   463 	__ASSERT_CRITICAL;  // so we can pass the paging device a stack-allocated TThreadMessage
       
   464 	__NK_ASSERT_DEBUG(PageCleaningLock::IsHeld());
       
   465 
       
   466 	START_PAGING_BENCHMARK;
       
   467 	
       
   468 	TUint i;
       
   469 	TUint swapData[KMaxPagesToClean + 1];
       
   470 	 
       
   471 	MmuLock::Lock();
       
   472 	for (i = 0 ; i < aCount ; ++i)
       
   473 		{
       
   474 		swapData[i] = aMemory[i]->PagingManagerData(aIndex[i]);
       
   475 		TSwapState s = SwapState(swapData[i]);
       
   476 		// It's not possible to write a page while it's already being written, because we always hold
       
   477 		// the PageCleaning mutex when we clean
       
   478 		__NK_ASSERT_DEBUG(s == EStateUnreserved || s == EStateBlank || s == EStateWritten);
       
   479 		if (s == EStateBlank || s == EStateWritten)
       
   480 			aMemory[i]->SetPagingManagerData(aIndex[i], SwapData(EStateWriting, 0));
       
   481 		}
       
   482 	MmuLock::Unlock();
       
   483 
       
   484 	// By the time we get here, some pages may have been decommitted, so write out only those runs
       
   485 	// of pages which are still committed.
       
   486 
   370 	TInt r = KErrNone;
   487 	TInt r = KErrNone;
       
   488 	TInt startIndex = -1;
       
   489 	swapData[aCount] = SwapData(EStateUnreserved, 0); // end of list marker
       
   490 	for (i = 0 ; i < (aCount + 1) ; ++i)
       
   491 		{
       
   492 		if (SwapState(swapData[i]) != EStateUnreserved)
       
   493 			{
       
   494 			if (startIndex == -1)
       
   495 				startIndex = i;
       
   496 
       
   497 			// Free swap page corresponding to old version of the pages we are going to write
       
   498 			if (SwapState(swapData[i]) == EStateWritten)
       
   499 				FreeSwapIndex(SwapIndex(swapData[i]));
       
   500 			}
       
   501 		else
       
   502 			{
       
   503 			if (startIndex != -1)
       
   504 				{
       
   505 				// write pages from startIndex to i exclusive
       
   506 				TInt count = i - startIndex;
       
   507 				__NK_ASSERT_DEBUG(count > 0 && count <= KMaxPagesToClean);
       
   508 
       
   509 				// Get a new swap location for these pages, writing them all together if possible
       
   510 				TInt swapIndex = AllocSwapIndex(count);
       
   511 				if (swapIndex >= 0)
       
   512 					r = DoWriteSwapPages(&aMemory[startIndex], &aIndex[startIndex], count, aLinAddr + (startIndex << KPageShift), swapIndex, aBackground);
       
   513 				else
       
   514 					{
       
   515 					// Otherwise, write them individually
       
   516 					for (TUint j = startIndex ; j < i ; ++j)
       
   517 						{
       
   518 						swapIndex = AllocSwapIndex(1);
       
   519 						__NK_ASSERT_DEBUG(swapIndex >= 0);
       
   520 						r = DoWriteSwapPages(&aMemory[j], &aIndex[j], 1, aLinAddr + (j << KPageShift), swapIndex, aBackground);
       
   521 						if (r != KErrNone)
       
   522 							break;
       
   523 						}
       
   524 					}
       
   525 
       
   526 				startIndex = -1;
       
   527 				}
       
   528 			}
       
   529 		}
       
   530 	
       
   531 	END_PAGING_BENCHMARK_N(EPagingBmWriteDataPage, aCount);
       
   532 	
       
   533 	return r;
       
   534 	}
       
   535 
       
   536 TInt DSwapManager::DoWriteSwapPages(DMemoryObject** aMemory, TUint* aIndex, TUint aCount, TLinAddr aLinAddr, TInt aSwapIndex, TBool aBackground)
       
   537 	{	
       
   538 		
   371 	const TUint readUnitShift = iDevice->iReadUnitShift;
   539 	const TUint readUnitShift = iDevice->iReadUnitShift;
   372 	TUint writeSize = KPageSize >> readUnitShift;
   540 	const TUint writeSize = aCount << (KPageShift - readUnitShift);
   373 	TThreadMessage* msg = const_cast<TThreadMessage*>(&aRequest->iMessage);
   541 	const TUint writeOffset = aSwapIndex << (KPageShift - readUnitShift);
   374 
   542 		
   375 	const TUint indexEnd = aIndex + aCount;
   543 	TThreadMessage msg;
   376 	for (TUint index = aIndex; index < indexEnd; index++)
   544 	START_PAGING_BENCHMARK;
   377 		{
   545 	TInt r = iDevice->Write(&msg, aLinAddr, writeOffset, writeSize, aBackground);
   378 		START_PAGING_BENCHMARK;
   546 	if (r != KErrNone)
   379 
   547 		{
   380 		MmuLock::Lock();
   548 		__KTRACE_OPT(KPANIC, Kern::Printf("DSwapManager::WriteSwapPages: error writing media from %08x to %08x + %x: %d", aLinAddr, writeOffset << readUnitShift, writeSize << readUnitShift, r));
   381 		TUint swapData = aMemory->PagingManagerData(index);
   549 		}
   382 		// OK to release as ram alloc lock prevents manager data being updated.
   550 	__NK_ASSERT_DEBUG(r!=KErrNoMemory); // not allowed to allocate memory, therefore can't fail with KErrNoMemory
   383 		MmuLock::Unlock();
   551 	__NK_ASSERT_ALWAYS(r == KErrNone);
   384 		if (!(swapData & EAllocated))
   552 	END_PAGING_BENCHMARK(EPagingBmWriteDataMedia);
   385 			{// This page is being decommited from aMemory so it is clean/unrequired.
   553 
   386 			continue;
   554 	TUint i;
   387 			}
   555 	TUint swapData[KMaxPagesToClean];
   388 		TInt swapIndex = swapData >> ESwapIndexShift;
   556 	
   389 		if (swapData & ESaved)
   557 	MmuLock::Lock();
   390 			{// An old version of this page has been saved to swap so free it now
   558 	for (i = 0 ; i < aCount ; ++i)
   391 			// as it will be out of date.
   559 		{
   392 			iBitMap->Free(swapIndex);
   560 		// Re-check the swap state in case page was decommitted while we were writing
   393 			DoDeleteNotify(swapIndex);
   561 		swapData[i] = aMemory[i]->PagingManagerData(aIndex[i]);
   394 			}
   562 		TSwapState s = SwapState(swapData[i]);
   395 		// Get a new swap location for this page.
   563 		__NK_ASSERT_DEBUG(s == EStateUnreserved || s == EStateWriting);
   396 		swapIndex = iBitMap->AllocFrom(iAllocOffset);
   564 		if (s == EStateWriting)
   397 		__NK_ASSERT_DEBUG(swapIndex != -1 && swapIndex < iBitMap->iSize);
   565 			{
   398 		iAllocOffset = swapIndex + 1;
   566 			// Store the new swap location and mark the page as saved.
   399 		if (iAllocOffset == (TUint)iBitMap->iSize)
   567 			aMemory[i]->SetPagingManagerData(aIndex[i], SwapData(EStateWritten, aSwapIndex + i));
   400 			iAllocOffset = 0;
   568 			}
   401 
   569 		}
   402 		TUint writeOffset = (swapIndex << KPageShift) >> readUnitShift;
   570 	MmuLock::Unlock();
   403 		{
   571 
   404 		START_PAGING_BENCHMARK;
   572 	for (i = 0 ; i < aCount ; ++i)
   405 		r = iDevice->Write(msg, aLinAddr, writeOffset, writeSize, EFalse);
   573 		{
   406 		if (r != KErrNone)
   574 		TSwapState s = SwapState(swapData[i]);
   407 			__KTRACE_OPT(KPANIC, Kern::Printf("DSwapManager::WriteSwapPages: error writing media at %08x + %x: %d", writeOffset << readUnitShift, writeSize << readUnitShift, r));				
   575 		if (s == EStateUnreserved)
   408 		__NK_ASSERT_DEBUG(r!=KErrNoMemory); // not allowed to allocate memory, therefore can't fail with KErrNoMemory
   576 			{
   409 		END_PAGING_BENCHMARK(EPagingBmWriteDataMedia);
   577 			// The page was decommitted while we were cleaning it, so free the swap page we
   410 		}
   578 			// allocated and continue, leaving this page in the unreserved state.
   411 		// TODO: Work out what to do if page out fails.
   579 			FreeSwapIndex(aSwapIndex + i);
   412 		__NK_ASSERT_ALWAYS(r == KErrNone);
   580 			}
   413 		MmuLock::Lock();
   581 		}
   414 		// The swap data should not have been modified.
   582 
   415 		__NK_ASSERT_DEBUG(swapData == aMemory->PagingManagerData(index));
   583 	return KErrNone;
   416 		// Store the new swap location and mark the page as saved.
   584 	}
   417 		swapData &= ~(EUninitialised | ESwapIndexMask);
   585 	
   418 		swapData |= (swapIndex << ESwapIndexShift) | ESaved;
       
   419 		aMemory->SetPagingManagerData(index, swapData);
       
   420 		MmuLock::Unlock();
       
   421 
       
   422 		END_PAGING_BENCHMARK(EPagingBmWriteDataPage);
       
   423 		}
       
   424 	
       
   425 	return r;
       
   426 	}
       
   427 
       
   428 
   586 
   429 /**
   587 /**
   430 Notify the media driver that the page written to swap is no longer required.
   588 Notify the media driver that the page written to swap is no longer required.
   431 */
   589 */
   432 void DSwapManager::DoDeleteNotify(TUint aSwapIndex)
   590 void DSwapManager::DoDeleteNotify(TUint aSwapIndex)
   433 	{
   591 	{
   434 	// Ram Alloc lock prevents the swap location being assigned to another page.
   592 	__ASSERT_CRITICAL;  // so we can pass the paging device a stack-allocated TThreadMessage
   435 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
       
   436 
       
   437 #ifdef __PAGING_DELETE_NOTIFY_ENABLED
   593 #ifdef __PAGING_DELETE_NOTIFY_ENABLED
   438 	const TUint readUnitShift = iDevice->iReadUnitShift;
   594 	const TUint readUnitShift = iDevice->iReadUnitShift;
   439 	const TUint size = KPageSize >> readUnitShift;
   595 	const TUint size = KPageSize >> readUnitShift;
   440 	TUint offset = (aSwapIndex << KPageShift) >> readUnitShift;
   596 	TUint offset = (aSwapIndex << KPageShift) >> readUnitShift;
       
   597 	TThreadMessage msg;
   441 
   598 
   442 	START_PAGING_BENCHMARK;
   599 	START_PAGING_BENCHMARK;
   443 	// Ignore the return value as this is just an optimisation that is not supported on all media.
   600 	// Ignore the return value as this is just an optimisation that is not supported on all media.
   444 	(void)iDevice->DeleteNotify(&iDelNotifyMsg, offset, size);
   601 	(void)iDevice->DeleteNotify(&msg, offset, size);
   445 	END_PAGING_BENCHMARK(EPagingBmDeleteNotifyDataPage);
   602 	END_PAGING_BENCHMARK(EPagingBmDeleteNotifyDataPage);
   446 #endif
   603 #endif
   447 	}
   604 	}
   448 
   605 
   449 
   606 
   450 // Check swap thresholds and notify (see K::CheckFreeMemoryLevel)
   607 // Check swap thresholds and notify (see K::CheckFreeMemoryLevel)
   451 void DSwapManager::CheckSwapThresholds(TUint aInitial, TUint aFinal)
   608 void DSwapManager::CheckSwapThresholdsAndUnlock(TUint aInitial)
   452 	{
   609 	{
   453 	TUint changes = 0;
   610 	TUint changes = 0;
   454 	if (aFinal < iSwapThesholdLow && aInitial >= iSwapThesholdLow)
   611 	if (iFreePageCount < iSwapThesholdLow && aInitial >= iSwapThesholdLow)
   455 		changes |= (EChangesFreeMemory | EChangesLowMemory);
   612 		changes |= (EChangesFreeMemory | EChangesLowMemory);
   456 	if (aFinal >= iSwapThesholdGood && aInitial < iSwapThesholdGood)
   613 	if (iFreePageCount >= iSwapThesholdGood && aInitial < iSwapThesholdGood)
   457 		changes |= EChangesFreeMemory;
   614 		changes |= EChangesFreeMemory;
       
   615 	NKern::FMSignal(&iSwapLock);
   458 	if (changes)
   616 	if (changes)
   459 		Kern::AsyncNotifyChanges(changes);
   617 		Kern::AsyncNotifyChanges(changes);
   460 	}
   618 	}
   461 
   619 
   462 
   620 
   463 void DSwapManager::GetSwapInfo(SVMSwapInfo& aInfoOut)
   621 void DSwapManager::GetSwapInfo(SVMSwapInfo& aInfoOut)
   464 	{
   622 	{
   465 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
       
   466 	aInfoOut.iSwapSize = iBitMap->iSize << KPageShift;
   623 	aInfoOut.iSwapSize = iBitMap->iSize << KPageShift;
   467 	aInfoOut.iSwapFree = iBitMapFree << KPageShift;
   624 	NKern::FMWait(&iSwapLock);
       
   625 	aInfoOut.iSwapFree = iFreePageCount << KPageShift;
       
   626 	NKern::FMSignal(&iSwapLock);
   468 	}
   627 	}
   469 
   628 
   470 
   629 
   471 TInt DSwapManager::SetSwapThresholds(const SVMSwapThresholds& aThresholds)
   630 TInt DSwapManager::SetSwapThresholds(const SVMSwapThresholds& aThresholds)
   472 	{
   631 	{
   473 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
       
   474 	if (aThresholds.iLowThreshold > aThresholds.iGoodThreshold)
   632 	if (aThresholds.iLowThreshold > aThresholds.iGoodThreshold)
   475 		return KErrArgument;		
   633 		return KErrArgument;		
   476 	TInt low = (aThresholds.iLowThreshold + KPageSize - 1) >> KPageShift;
   634 	TInt low = (aThresholds.iLowThreshold + KPageSize - 1) >> KPageShift;
   477 	TInt good = (aThresholds.iGoodThreshold + KPageSize - 1) >> KPageShift;
   635 	TInt good = (aThresholds.iGoodThreshold + KPageSize - 1) >> KPageShift;
   478 	if (good > iBitMap->iSize)
   636 	if (good > iBitMap->iSize)
   479 		return KErrArgument;
   637 		return KErrArgument;
       
   638 	NKern::FMWait(&iSwapLock);
   480 	iSwapThesholdLow = low;
   639 	iSwapThesholdLow = low;
   481 	iSwapThesholdGood = good;
   640 	iSwapThesholdGood = good;
       
   641 	NKern::FMSignal(&iSwapLock);
   482 	return KErrNone;
   642 	return KErrNone;
   483 	}
   643 	}
   484 
   644 
   485 
   645 
   486 
   646 
   525 	aRequest = iDevice->iRequestPool->AcquirePageReadRequest(aMemory,aIndex,aCount);
   685 	aRequest = iDevice->iRequestPool->AcquirePageReadRequest(aMemory,aIndex,aCount);
   526 	return KErrNone;
   686 	return KErrNone;
   527 	}
   687 	}
   528 
   688 
   529 
   689 
   530 TInt DDataPagedMemoryManager::AcquirePageWriteRequest(DPageWriteRequest*& aRequest, DMemoryObject* aMemory, TUint aIndex, TUint aCount)
   690 TInt DDataPagedMemoryManager::AcquirePageWriteRequest(DPageWriteRequest*& aRequest, DMemoryObject** aMemory, TUint* aIndex, TUint aCount)
   531 	{
   691 	{
   532 	aRequest = iDevice->iRequestPool->AcquirePageWriteRequest(aMemory,aIndex,aCount);
   692 	aRequest = iDevice->iRequestPool->AcquirePageWriteRequest(aMemory,aIndex,aCount);
   533 	return KErrNone;
   693 	return KErrNone;
   534 	}
   694 	}
   535 
   695 
   545 
   705 
   546 	// re-initialise any decommitted pages which we may still own because they were pinned...
   706 	// re-initialise any decommitted pages which we may still own because they were pinned...
   547 	ReAllocDecommitted(aMemory,aIndex,aCount);
   707 	ReAllocDecommitted(aMemory,aIndex,aCount);
   548 
   708 
   549 	// Reserve the swap pages required.
   709 	// Reserve the swap pages required.
   550 	RamAllocLock::Lock();
   710 	return iSwapManager->ReserveSwap(aMemory, aIndex, aCount);
   551 	TInt r = iSwapManager->ReserveSwap(aMemory, aIndex, aCount);
       
   552 	RamAllocLock::Unlock();
       
   553 
       
   554 	return r;
       
   555 	}
   711 	}
   556 
   712 
   557 
   713 
   558 void DDataPagedMemoryManager::Free(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
   714 void DDataPagedMemoryManager::Free(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
   559 	{
   715 	{
   560 	TRACE2(("DDataPagedMemoryManager::Free(0x%08x,0x%x,0x%x)", aMemory, aIndex, aCount));
   716 	TRACE2(("DDataPagedMemoryManager::Free(0x%08x,0x%x,0x%x)", aMemory, aIndex, aCount));
   561 	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
   717 	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
   562 
   718 
   563 	// Unreserve the swap pages associated with the memory object.  Do this before
   719 	// Unreserve the swap pages associated with the memory object.  Do this before
   564 	// removing the page array entries to prevent a page fault reallocating these pages.
   720 	// removing the page array entries to prevent a page fault reallocating these pages.
   565 	RamAllocLock::Lock();
       
   566 	TInt freed = iSwapManager->UnreserveSwap(aMemory, aIndex, aCount);
   721 	TInt freed = iSwapManager->UnreserveSwap(aMemory, aIndex, aCount);
   567 	(void)freed;
   722 	(void)freed;
   568 	RamAllocLock::Unlock();
       
   569 
   723 
   570 	DoFree(aMemory,aIndex,aCount);
   724 	DoFree(aMemory,aIndex,aCount);
   571 	}
   725 	}
   572 
   726 
   573 
   727 
   574 /**
   728 /**
   575 @copydoc DMemoryManager::Wipe
   729 @copydoc DMemoryManager::Wipe
   576 @todo	Not yet implemented.
       
   577 		Need to handle this smartly, e.g. throw RAM away and set to uninitialised 
       
   578 */
   730 */
   579 TInt DDataPagedMemoryManager::Wipe(DMemoryObject* aMemory)
   731 TInt DDataPagedMemoryManager::Wipe(DMemoryObject* aMemory)
   580 	{
   732 	{
   581 	__NK_ASSERT_ALWAYS(0); // not implemented yet
   733 	// This is not implemented
       
   734 	//
       
   735 	// It's possible to implement this by throwing away all pages that are paged in and just setting
       
   736 	// the backing store state to EStateBlank, however there are currently no use cases which
       
   737 	// involve calling Wipe on paged memory.
       
   738 	
       
   739 	__NK_ASSERT_ALWAYS(0);
   582 
   740 
   583 	return KErrNotSupported;
   741 	return KErrNotSupported;
   584 	}
   742 	}
   585 
   743 
   586 
   744 
   587 TInt DDataPagedMemoryManager::ReadPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages, DPageReadRequest* aRequest)
   745 TInt DDataPagedMemoryManager::ReadPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages, DPageReadRequest* aRequest)
   588 	{
   746 	{
   589 	__NK_ASSERT_DEBUG(aRequest->CheckUse(aMemory,aIndex,aCount));
   747 	__NK_ASSERT_DEBUG(aRequest->CheckUseContiguous(aMemory,aIndex,aCount));
   590 
   748 
   591 	// Map pages temporarily so that we can copy into them.
   749 	// Map pages temporarily so that we can copy into them.
   592 	const TLinAddr linAddr = aRequest->MapPages(aIndex, aCount, aPages);
   750 	const TLinAddr linAddr = aRequest->MapPages(aIndex, aCount, aPages);
   593 
   751 
   594 	TInt r = iSwapManager->ReadSwapPages(aMemory, aIndex, aCount, linAddr, aRequest, aPages);
   752 	TInt r = iSwapManager->ReadSwapPages(aMemory, aIndex, aCount, linAddr, aRequest, aPages);
   598 
   756 
   599 	return r;
   757 	return r;
   600 	}
   758 	}
   601 
   759 
   602 
   760 
   603 TInt DDataPagedMemoryManager::WritePages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages, DPageWriteRequest* aRequest)
   761 TInt DDataPagedMemoryManager::WritePages(DMemoryObject** aMemory, TUint* aIndex, TPhysAddr* aPages, TUint aCount, DPageWriteRequest* aRequest, TBool aAnyExecutable, TBool aBackground)
   604 	{
   762 	{
   605 	__NK_ASSERT_DEBUG(aRequest->CheckUse(aMemory,aIndex,aCount));
       
   606 
       
   607 	// Map pages temporarily so that we can copy into them.
   763 	// Map pages temporarily so that we can copy into them.
   608 	const TLinAddr linAddr = aRequest->MapPages(aIndex, aCount, aPages);
   764 	const TLinAddr linAddr = aRequest->MapPages(aIndex[0], aCount, aPages);
   609 
   765 
   610 	TInt r = iSwapManager->WriteSwapPages(aMemory, aIndex, aCount, linAddr, aRequest);
   766 	TInt r = iSwapManager->WriteSwapPages(aMemory, aIndex, aCount, linAddr, aBackground);
   611 
   767 
   612 	// The memory object allows executable mappings then need IMB.
   768 	// The memory object allows executable mappings then need IMB.
   613 	aRequest->UnmapPages(aMemory->IsExecutable());
   769 	aRequest->UnmapPages(aAnyExecutable);
   614 
   770 
   615 	return r;
   771 	return r;
   616 	}
   772 	}
   617 
   773 
   618 
   774 
   619 TInt DDataPagedMemoryManager::CleanPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TPhysAddr*& aPageArrayEntry)
   775 void DDataPagedMemoryManager::CleanPages(TUint aPageCount, SPageInfo** aPageInfos, TBool aBackground)
   620 	{
   776 	{
   621 	if(!aPageInfo->IsDirty())
   777 	__NK_ASSERT_DEBUG(PageCleaningLock::IsHeld());
   622 		return KErrNone;
   778 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   623 
   779 	__NK_ASSERT_DEBUG(aPageCount <= (TUint)KMaxPagesToClean);
   624 	// shouldn't be asked to clean a page which is writable...
   780 	
   625 	__NK_ASSERT_DEBUG(!aPageInfo->IsWritable());
   781 	TUint i;
   626 
   782 	DMemoryObject* memory[KMaxPagesToClean];
   627 	// mark page as being modified by us...
   783 	TUint index[KMaxPagesToClean];
   628 	TUint modifierInstance; // dummy variable used only for it's storage address on the stack
   784 	TPhysAddr physAddr[KMaxPagesToClean];
   629 	aPageInfo->SetModifier(&modifierInstance);
   785 	TBool anyExecutable = EFalse;
   630 
   786 	
   631 	// get info about page...
   787 	for (i = 0 ; i < aPageCount ; ++i)
   632 	TUint index = aPageInfo->Index();
   788 		{
   633 	TPhysAddr physAddr = aPageInfo->PhysAddr();
   789 		SPageInfo* pi = aPageInfos[i];
   634 
   790 
   635 	// Release the mmu lock while we write out the page.  This is safe as the 
   791 		__NK_ASSERT_DEBUG(!pi->IsWritable());
   636 	// RamAllocLock stops the physical address being freed from this object.
   792 		__NK_ASSERT_DEBUG(pi->IsDirty());
       
   793 		
       
   794 		// mark page as being modified by us...
       
   795 		pi->SetModifier(&memory[0]);
       
   796 		
       
   797 		// get info about page...
       
   798 		memory[i] = pi->Owner();
       
   799 		index[i] = pi->Index();
       
   800 		physAddr[i] = pi->PhysAddr();
       
   801 		anyExecutable = anyExecutable || memory[i]->IsExecutable();
       
   802 		}
       
   803 
   637 	MmuLock::Unlock();
   804 	MmuLock::Unlock();
   638 
   805 
   639 	// get paging request object...
   806 	// get paging request object...
   640 	DPageWriteRequest* req;
   807 	DPageWriteRequest* req;
   641 	TInt r = AcquirePageWriteRequest(req, aMemory, index, 1);
   808 	TInt r = AcquirePageWriteRequest(req, memory, index, aPageCount);
   642 	__NK_ASSERT_DEBUG(r==KErrNone); // we should always get a write request because the previous function blocks until it gets one
   809 	__NK_ASSERT_DEBUG(r==KErrNone && req);
   643 	__NK_ASSERT_DEBUG(req); // we should always get a write request because the previous function blocks until it gets one
   810 	
   644 
   811 	r = WritePages(memory, index, physAddr, aPageCount, req, anyExecutable, aBackground);
   645 	r = WritePages(aMemory, index, 1, &physAddr, req);
   812 	__NK_ASSERT_DEBUG(r == KErrNone);  // this should never return an error
   646 
   813 
   647 	req->Release();
   814 	req->Release();
   648 
   815 
   649 	MmuLock::Lock();
   816 	MmuLock::Lock();
   650 
   817 
   651 	if(r!=KErrNone)
   818 	for (i = 0 ; i < aPageCount ; ++i)
   652 		return r;
   819 		{
   653 
   820 		SPageInfo* pi = aPageInfos[i];
   654 	// check if page is clean...
   821 		// check if page is clean...
   655 	if(aPageInfo->CheckModified(&modifierInstance) || aPageInfo->IsWritable())
   822 		if(pi->CheckModified(&memory[0]) || pi->IsWritable())
   656 		{
   823 			{
   657 		// someone else modified the page, or it became writable, so fail...
   824 			// someone else modified the page, or it became writable, so mark as not cleaned
   658 		r = KErrInUse;
   825 			aPageInfos[i] = NULL;
   659 		}
   826 			}
   660 	else
   827 		else
   661 		{
   828 			{
   662 		// page is now clean!
   829 			// page is now clean!
   663 		ThePager.SetClean(*aPageInfo);
   830 			ThePager.SetClean(*pi);
   664 		}
   831 			}
   665 
   832 		}
   666 	return r;
       
   667 	}
   833 	}
   668 
   834 
   669 
   835 
   670 TBool DDataPagedMemoryManager::IsAllocated(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
   836 TBool DDataPagedMemoryManager::IsAllocated(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
   671 	{// MmuLock required to protect manager data.
   837 	{// MmuLock required to protect manager data.
   678 	}
   844 	}
   679 
   845 
   680 
   846 
   681 void DDataPagedMemoryManager::GetSwapInfo(SVMSwapInfo& aInfoOut)
   847 void DDataPagedMemoryManager::GetSwapInfo(SVMSwapInfo& aInfoOut)
   682 	{
   848 	{
   683 	NKern::ThreadEnterCS();
       
   684 	RamAllocLock::Lock();
       
   685 	iSwapManager->GetSwapInfo(aInfoOut);
   849 	iSwapManager->GetSwapInfo(aInfoOut);
   686 	RamAllocLock::Unlock();
       
   687 	NKern::ThreadLeaveCS();
       
   688 	}
   850 	}
   689 
   851 
   690 
   852 
   691 TInt DDataPagedMemoryManager::SetSwapThresholds(const SVMSwapThresholds& aThresholds)
   853 TInt DDataPagedMemoryManager::SetSwapThresholds(const SVMSwapThresholds& aThresholds)
   692 	{
   854 	{
   693 	NKern::ThreadEnterCS();
   855 	return iSwapManager->SetSwapThresholds(aThresholds);
   694 	RamAllocLock::Lock();
       
   695 	TInt r = iSwapManager->SetSwapThresholds(aThresholds);
       
   696 	RamAllocLock::Unlock();
       
   697 	NKern::ThreadLeaveCS();
       
   698 	return r;
       
   699 	}
   856 	}
   700 
   857 
   701 
   858 
   702 void GetSwapInfo(SVMSwapInfo& aInfoOut)
   859 void GetSwapInfo(SVMSwapInfo& aInfoOut)
   703 	{
   860 	{