author | Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com> |
Tue, 27 Apr 2010 18:02:57 +0300 | |
branch | RCL_3 |
changeset 97 | 41f0cfe18c80 |
parent 87 | 2f92ad2dc5db |
child 110 | c734af59ce98 |
permissions | -rw-r--r-- |
0 | 1 |
// Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies). |
2 |
// All rights reserved. |
|
3 |
// This component and the accompanying materials are made available |
|
4 |
// under the terms of the License "Eclipse Public License v1.0" |
|
5 |
// which accompanies this distribution, and is available |
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 |
// |
|
8 |
// Initial Contributors: |
|
9 |
// Nokia Corporation - initial contribution. |
|
10 |
// |
|
11 |
// Contributors: |
|
12 |
// |
|
13 |
// Description: |
|
14 |
// |
|
15 |
||
16 |
#include <plat_priv.h> |
|
17 |
#include "mm.h" |
|
18 |
#include "mmu.h" |
|
19 |
||
20 |
#include "mmanager.h" |
|
21 |
#include "mobject.h" |
|
22 |
#include "mmapping.h" |
|
23 |
#include "mpager.h" |
|
24 |
#include "mswap.h" |
|
25 |
||
26 |
||
27 |
/** |
|
28 |
Manages the swap via the data paging device. |
|
29 |
*/ |
|
30 |
class DSwapManager |
|
31 |
{ |
|
32 |
public: |
|
33 |
||
34 |
enum TSwapFlags |
|
35 |
{ |
|
36 |
EAllocated = 1 << 0, |
|
37 |
EUninitialised = 1 << 1, |
|
38 |
ESaved = 1 << 2, |
|
39 |
ESwapFlagsMask = 0x7, |
|
40 |
||
41 |
ESwapIndexShift = 3, |
|
42 |
ESwapIndexMask = 0xffffffff << ESwapIndexShift, |
|
43 |
}; |
|
44 |
||
45 |
TInt Create(DPagingDevice* aDevice); |
|
46 |
||
47 |
TInt ReserveSwap(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount); |
|
48 |
TInt UnreserveSwap(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount); |
|
49 |
TBool IsReserved(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount); |
|
50 |
||
51 |
TInt ReadSwapPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TLinAddr aLinAddr, DPageReadRequest* aRequest, TPhysAddr* aPhysAddrs); |
|
52 |
TInt WriteSwapPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TLinAddr aLinAddr, DPageWriteRequest* aRequest); |
|
53 |
void DoDeleteNotify(TUint aSwapData); |
|
54 |
||
55 |
void GetSwapInfo(SVMSwapInfo& aInfoOut); |
|
56 |
TInt SetSwapThresholds(const SVMSwapThresholds& aThresholds); |
|
57 |
void CheckSwapThresholds(TUint aInitial, TUint aFinal); |
|
58 |
||
59 |
protected: |
|
60 |
DPagingDevice* iDevice; |
|
61 |
TBitMapAllocator* iBitMap; |
|
62 |
TUint iBitMapFree; |
|
63 |
TUint iAllocOffset; |
|
64 |
TUint iSwapThesholdLow; |
|
65 |
TUint iSwapThesholdGood; |
|
66 |
TThreadMessage iDelNotifyMsg; |
|
67 |
}; |
|
68 |
||
69 |
||
70 |
/** |
|
71 |
Manager for demand paged memory objects which contain writeable data. |
|
72 |
The contents of the memory are written to a backing store whenever its |
|
73 |
pages are 'paged out'. |
|
74 |
||
75 |
@see DSwapManager |
|
76 |
*/ |
|
77 |
class DDataPagedMemoryManager : public DPagedMemoryManager |
|
78 |
{ |
|
79 |
private: |
|
80 |
// from DMemoryManager... |
|
81 |
virtual TInt Alloc(DMemoryObject* aMemory, TUint aIndex, TUint aCount); |
|
82 |
virtual void Free(DMemoryObject* aMemory, TUint aIndex, TUint aCount); |
|
83 |
virtual TInt Wipe(DMemoryObject* aMemory); |
|
84 |
virtual TInt CleanPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TPhysAddr*& aPageArrayEntry); |
|
85 |
||
86 |
// Methods inherited from DPagedMemoryManager |
|
87 |
virtual void Init3(); |
|
88 |
virtual TInt InstallPagingDevice(DPagingDevice* aDevice); |
|
89 |
virtual TInt AcquirePageReadRequest(DPageReadRequest*& aRequest, DMemoryObject* aMemory, TUint aIndex, TUint aCount); |
|
90 |
virtual TInt AcquirePageWriteRequest(DPageWriteRequest*& aRequest, DMemoryObject* aMemory, TUint aIndex, TUint aCount); |
|
91 |
virtual TInt ReadPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages, DPageReadRequest* aRequest); |
|
92 |
virtual TInt WritePages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages, DPageWriteRequest* aRequest); |
|
93 |
virtual TBool IsAllocated(DMemoryObject* aMemory, TUint aIndex, TUint aCount); |
|
94 |
||
95 |
public: |
|
96 |
void GetSwapInfo(SVMSwapInfo& aInfoOut); |
|
97 |
TInt SetSwapThresholds(const SVMSwapThresholds& aThresholds); |
|
98 |
||
99 |
private: |
|
100 |
/** |
|
101 |
The paging device used for accessing the backing store. |
|
102 |
This is set by #InstallPagingDevice. |
|
103 |
*/ |
|
104 |
DPagingDevice* iDevice; |
|
105 |
||
106 |
/** |
|
107 |
The instance of #DSwapManager being used by this manager. |
|
108 |
*/ |
|
109 |
DSwapManager* iSwapManager; |
|
110 |
||
111 |
public: |
|
112 |
/** |
|
113 |
The single instance of this manager class. |
|
114 |
*/ |
|
115 |
static DDataPagedMemoryManager TheManager; |
|
116 |
}; |
|
117 |
||
118 |
||
119 |
DDataPagedMemoryManager DDataPagedMemoryManager::TheManager; |
|
120 |
DPagedMemoryManager* TheDataPagedMemoryManager = &DDataPagedMemoryManager::TheManager; |
|
121 |
||
122 |
||
123 |
/** |
|
124 |
Create a swap manager. |
|
125 |
||
126 |
@param aDevice The demand paging device for access to the swap. |
|
127 |
*/ |
|
128 |
TInt DSwapManager::Create(DPagingDevice* aDevice) |
|
129 |
{ |
|
130 |
__ASSERT_COMPILE(!(ESwapIndexMask & ESwapFlagsMask)); |
|
131 |
__NK_ASSERT_DEBUG(iDevice == NULL); |
|
132 |
iDevice = aDevice; |
|
133 |
||
134 |
// Create the structures required to track the swap usage. |
|
135 |
TUint swapPages = (iDevice->iSwapSize << iDevice->iReadUnitShift) >> KPageShift; |
|
136 |
// Can't have more swap pages than we can map. |
|
137 |
__NK_ASSERT_DEBUG(swapPages<=DMemoryObject::KMaxPagingManagerData); |
|
138 |
__NK_ASSERT_DEBUG(swapPages<=(KMaxTUint>>ESwapIndexShift)); |
|
139 |
||
140 |
if ((TheMmu.TotalPhysicalRamPages() << 2) < swapPages) |
|
141 |
{// The swap is limited to a maximum of 4 times the amount of RAM. |
|
142 |
return KErrTooBig; |
|
143 |
} |
|
144 |
||
145 |
iBitMap = TBitMapAllocator::New(swapPages, ETrue); |
|
146 |
if (iBitMap == NULL) |
|
147 |
{// Not enough RAM to keep track of the swap. |
|
148 |
return KErrNoMemory; |
|
149 |
} |
|
150 |
iBitMapFree = swapPages; |
|
151 |
iAllocOffset = 0; |
|
152 |
return KErrNone; |
|
153 |
} |
|
154 |
||
155 |
||
156 |
/** |
|
157 |
Reserve some swap pages for the requested region of the memory object |
|
158 |
||
159 |
@param aMemory The memory object to reserve pages for. |
|
160 |
@param aStartIndex The page index in the memory object of the start of the region. |
|
161 |
@param aPageCount The number of pages to reserve. |
|
162 |
||
163 |
@return KErrNone on success, KErrNoMemory if not enough swap space available. |
|
164 |
@pre aMemory's lock is held. |
|
165 |
@post aMemory's lock is held. |
|
166 |
*/ |
|
167 |
TInt DSwapManager::ReserveSwap(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount) |
|
168 |
{ |
|
169 |
__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory)); |
|
170 |
__NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
|
171 |
||
172 |
const TUint indexEnd = aStartIndex + aPageCount; |
|
173 |
TUint index = aStartIndex; |
|
174 |
||
175 |
#ifdef _DEBUG |
|
176 |
for (; index < indexEnd; index++) |
|
177 |
{// This page shouldn't already be in use. |
|
178 |
MmuLock::Lock(); |
|
179 |
__NK_ASSERT_DEBUG(!(aMemory->PagingManagerData(index) & ESwapFlagsMask)); |
|
180 |
MmuLock::Unlock(); |
|
181 |
} |
|
182 |
#endif |
|
183 |
||
184 |
if (iBitMapFree < aPageCount) |
|
185 |
{ |
|
186 |
Kern::AsyncNotifyChanges(EChangesOutOfMemory); |
|
187 |
return KErrNoMemory; |
|
188 |
} |
|
189 |
// Reserve the required swap space and mark each page as allocated and uninitialised. |
|
190 |
TUint initFree = iBitMapFree; |
|
191 |
iBitMapFree -= aPageCount; |
|
192 |
for (index = aStartIndex; index < indexEnd; index++) |
|
193 |
{ |
|
194 |
// Grab MmuLock to stop manager data being accessed. |
|
195 |
MmuLock::Lock(); |
|
196 |
TUint swapData = aMemory->PagingManagerData(index); |
|
197 |
__NK_ASSERT_DEBUG(!(swapData & EAllocated)); |
|
198 |
swapData = EAllocated | EUninitialised; |
|
199 |
aMemory->SetPagingManagerData(index, swapData); |
|
200 |
MmuLock::Unlock(); |
|
201 |
} |
|
202 |
||
203 |
CheckSwapThresholds(initFree, iBitMapFree); |
|
204 |
return KErrNone; |
|
205 |
} |
|
206 |
||
207 |
||
208 |
/** |
|
209 |
Unreserve swap pages for the requested region of the memory object. |
|
210 |
||
211 |
@param aMemory The memory object to unreserve pages for. |
|
212 |
@param aStartIndex The page index in the memory object of the start of the region. |
|
213 |
@param aPageCount The number of pages to unreserve. |
|
214 |
||
215 |
@return The number of pages freed. |
|
216 |
@pre aMemory's lock is held. |
|
217 |
@post aMemory's lock is held. |
|
218 |
*/ |
|
219 |
TInt DSwapManager::UnreserveSwap(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount) |
|
220 |
{ |
|
221 |
__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory)); |
|
222 |
__NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
|
223 |
||
224 |
TUint initFree = iBitMapFree; |
|
225 |
TUint freedPages = 0; |
|
226 |
const TUint indexEnd = aStartIndex + aPageCount; |
|
227 |
for (TUint index = aStartIndex; index < indexEnd; index++) |
|
228 |
{ |
|
229 |
// Grab MmuLock to stop manager data being accessed. |
|
230 |
MmuLock::Lock(); |
|
231 |
TUint swapData = aMemory->PagingManagerData(index); |
|
232 |
TUint swapIndex = swapData >> ESwapIndexShift; |
|
233 |
TBool notifyDelete = EFalse; |
|
234 |
if (swapData & EAllocated) |
|
235 |
{ |
|
236 |
if (swapData & ESaved) |
|
237 |
{ |
|
238 |
notifyDelete = ETrue; |
|
239 |
iBitMap->Free(swapIndex); |
|
240 |
} |
|
241 |
freedPages++; |
|
242 |
aMemory->SetPagingManagerData(index, 0); |
|
243 |
} |
|
244 |
#ifdef _DEBUG |
|
245 |
else |
|
246 |
__NK_ASSERT_DEBUG(swapData == 0); |
|
247 |
#endif |
|
248 |
||
249 |
MmuLock::Unlock(); |
|
250 |
||
251 |
if (notifyDelete) |
|
252 |
DoDeleteNotify(swapIndex); |
|
253 |
} |
|
254 |
iBitMapFree += freedPages; |
|
255 |
CheckSwapThresholds(initFree, iBitMapFree); |
|
256 |
return freedPages; |
|
257 |
} |
|
258 |
||
259 |
||
260 |
/** |
|
261 |
Determine whether the specified pages in the memory object have swap reserved for them. |
|
262 |
||
263 |
@param aMemory The memory object that owns the pages. |
|
264 |
@param aStartIndex The first index of the pages to check. |
|
265 |
@param aPageCount The number of pages to check. |
|
266 |
||
267 |
@return ETrue if swap is reserved for all the pages, EFalse otherwise. |
|
268 |
*/ |
|
269 |
TBool DSwapManager::IsReserved(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount) |
|
270 |
{// MmuLock required to protect manager data. |
|
271 |
__NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
272 |
__NK_ASSERT_DEBUG(aStartIndex < aMemory->iSizeInPages); |
|
273 |
__NK_ASSERT_DEBUG(aStartIndex + aPageCount <= aMemory->iSizeInPages); |
|
274 |
||
275 |
const TUint indexEnd = aStartIndex + aPageCount; |
|
276 |
for (TUint index = aStartIndex; index < indexEnd; index++) |
|
277 |
{ |
|
278 |
if (!(aMemory->PagingManagerData(index) & DSwapManager::EAllocated)) |
|
279 |
{// This page is not allocated by swap manager. |
|
280 |
return EFalse; |
|
281 |
} |
|
282 |
} |
|
283 |
return ETrue; |
|
284 |
} |
|
285 |
||
286 |
||
287 |
/** |
|
288 |
Read from the swap the specified pages associated with the memory object. |
|
289 |
||
290 |
@param aMemory The memory object to read the pages for |
|
291 |
@param aIndex The index of the first page within the memory object. |
|
292 |
@param aCount The number of pages to read. |
|
293 |
@param aLinAddr The address to copy the pages to. |
|
294 |
@param aRequest The request to use for the read. |
|
295 |
@param aPhysAddrs An array of the physical addresses for each page to read in. |
|
296 |
*/ |
|
297 |
TInt DSwapManager::ReadSwapPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TLinAddr aLinAddr, DPageReadRequest* aRequest, TPhysAddr* aPhysAddrs) |
|
298 |
{ |
|
299 |
TInt r = KErrNone; |
|
300 |
const TUint readUnitShift = iDevice->iReadUnitShift; |
|
301 |
TUint readSize = KPageSize >> readUnitShift; |
|
302 |
TThreadMessage* msg = const_cast<TThreadMessage*>(&aRequest->iMessage); |
|
303 |
||
304 |
// Determine the wipe byte values for uninitialised pages. |
|
305 |
TUint allocFlags = aMemory->RamAllocFlags(); |
|
306 |
TBool wipePages = !(allocFlags & Mmu::EAllocNoWipe); |
|
307 |
TUint8 wipeByte = (allocFlags & Mmu::EAllocUseCustomWipeByte) ? (allocFlags >> Mmu::EAllocWipeByteShift) & 0xff : 0x03; |
|
308 |
||
309 |
const TUint indexEnd = aIndex + aCount; |
|
310 |
for (TUint index = aIndex; index < indexEnd; index++, aLinAddr += KPageSize, aPhysAddrs++) |
|
311 |
{ |
|
312 |
START_PAGING_BENCHMARK; |
|
313 |
||
314 |
MmuLock::Lock(); // MmuLock required for atomic access to manager data. |
|
315 |
TUint swapData = aMemory->PagingManagerData(index); |
|
316 |
||
317 |
if (!(swapData & EAllocated)) |
|
318 |
{// This page is not committed to the memory object |
|
319 |
MmuLock::Unlock(); |
|
320 |
return KErrNotFound; |
|
321 |
} |
|
322 |
if (swapData & EUninitialised) |
|
323 |
{// This page has not been written to yet so don't read from swap |
|
324 |
// just wipe it if required. |
|
325 |
MmuLock::Unlock(); |
|
326 |
if (wipePages) |
|
327 |
{ |
|
328 |
memset((TAny*)aLinAddr, wipeByte, KPageSize); |
|
329 |
} |
|
330 |
} |
|
331 |
else |
|
332 |
{ |
|
333 |
__NK_ASSERT_DEBUG(swapData & ESaved); |
|
334 |
TUint swapIndex = swapData >> ESwapIndexShift; |
|
335 |
// OK to release as if the object's data is decommitted the pager |
|
336 |
// will check that data is still valid before mapping it. |
|
337 |
MmuLock::Unlock(); |
|
338 |
TUint readStart = (swapIndex << KPageShift) >> readUnitShift; |
|
339 |
START_PAGING_BENCHMARK; |
|
340 |
r = iDevice->Read(msg, aLinAddr, readStart, readSize, DPagingDevice::EDriveDataPaging); |
|
341 |
if (r != KErrNone) |
|
342 |
__KTRACE_OPT(KPANIC, Kern::Printf("DSwapManager::ReadSwapPages: error reading media at %08x + %x: %d", readStart << readUnitShift, readSize << readUnitShift, r)); |
|
343 |
__NK_ASSERT_DEBUG(r!=KErrNoMemory); // not allowed to allocate memory, therefore can't fail with KErrNoMemory |
|
344 |
END_PAGING_BENCHMARK(EPagingBmReadDataMedia); |
|
345 |
// TODO: Work out what to do if page in fails, unmap all pages???? |
|
346 |
__NK_ASSERT_ALWAYS(r == KErrNone); |
|
347 |
} |
|
348 |
END_PAGING_BENCHMARK(EPagingBmReadDataPage); |
|
349 |
} |
|
350 |
||
351 |
return r; |
|
352 |
} |
|
353 |
||
354 |
||
355 |
/** |
|
356 |
Write the specified memory object's pages from the RAM into the swap. |
|
357 |
||
358 |
@param aMemory The memory object who owns the pages. |
|
359 |
@param aIndex The index within the memory object. |
|
360 |
@param aCount The number of pages to write out. |
|
361 |
@param aLinAddr The location of the pages to write out. |
|
362 |
@param aRequest The demand paging request to use. |
|
363 |
||
364 |
*/ |
|
365 |
TInt DSwapManager::WriteSwapPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TLinAddr aLinAddr, DPageWriteRequest* aRequest) |
|
366 |
{// The RamAllocLock prevents the object's swap pages being reassigned. |
|
367 |
__NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
|
368 |
||
369 |
// Write the page out to the swap. |
|
370 |
TInt r = KErrNone; |
|
371 |
const TUint readUnitShift = iDevice->iReadUnitShift; |
|
372 |
TUint writeSize = KPageSize >> readUnitShift; |
|
373 |
TThreadMessage* msg = const_cast<TThreadMessage*>(&aRequest->iMessage); |
|
374 |
||
375 |
const TUint indexEnd = aIndex + aCount; |
|
376 |
for (TUint index = aIndex; index < indexEnd; index++) |
|
377 |
{ |
|
378 |
START_PAGING_BENCHMARK; |
|
379 |
||
380 |
MmuLock::Lock(); |
|
381 |
TUint swapData = aMemory->PagingManagerData(index); |
|
382 |
// OK to release as ram alloc lock prevents manager data being updated. |
|
383 |
MmuLock::Unlock(); |
|
384 |
if (!(swapData & EAllocated)) |
|
385 |
{// This page is being decommited from aMemory so it is clean/unrequired. |
|
386 |
continue; |
|
387 |
} |
|
388 |
TInt swapIndex = swapData >> ESwapIndexShift; |
|
389 |
if (swapData & ESaved) |
|
390 |
{// An old version of this page has been saved to swap so free it now |
|
391 |
// as it will be out of date. |
|
392 |
iBitMap->Free(swapIndex); |
|
393 |
DoDeleteNotify(swapIndex); |
|
394 |
} |
|
395 |
// Get a new swap location for this page. |
|
396 |
swapIndex = iBitMap->AllocFrom(iAllocOffset); |
|
397 |
__NK_ASSERT_DEBUG(swapIndex != -1 && swapIndex < iBitMap->iSize); |
|
398 |
iAllocOffset = swapIndex + 1; |
|
399 |
if (iAllocOffset == (TUint)iBitMap->iSize) |
|
400 |
iAllocOffset = 0; |
|
401 |
||
402 |
TUint writeOffset = (swapIndex << KPageShift) >> readUnitShift; |
|
403 |
{ |
|
404 |
START_PAGING_BENCHMARK; |
|
405 |
r = iDevice->Write(msg, aLinAddr, writeOffset, writeSize, EFalse); |
|
406 |
if (r != KErrNone) |
|
407 |
__KTRACE_OPT(KPANIC, Kern::Printf("DSwapManager::WriteSwapPages: error writing media at %08x + %x: %d", writeOffset << readUnitShift, writeSize << readUnitShift, r)); |
|
408 |
__NK_ASSERT_DEBUG(r!=KErrNoMemory); // not allowed to allocate memory, therefore can't fail with KErrNoMemory |
|
409 |
END_PAGING_BENCHMARK(EPagingBmWriteDataMedia); |
|
410 |
} |
|
411 |
// TODO: Work out what to do if page out fails. |
|
412 |
__NK_ASSERT_ALWAYS(r == KErrNone); |
|
413 |
MmuLock::Lock(); |
|
414 |
// The swap data should not have been modified. |
|
415 |
__NK_ASSERT_DEBUG(swapData == aMemory->PagingManagerData(index)); |
|
416 |
// Store the new swap location and mark the page as saved. |
|
417 |
swapData &= ~(EUninitialised | ESwapIndexMask); |
|
418 |
swapData |= (swapIndex << ESwapIndexShift) | ESaved; |
|
419 |
aMemory->SetPagingManagerData(index, swapData); |
|
420 |
MmuLock::Unlock(); |
|
421 |
||
422 |
END_PAGING_BENCHMARK(EPagingBmWriteDataPage); |
|
423 |
} |
|
424 |
||
425 |
return r; |
|
426 |
} |
|
427 |
||
428 |
||
429 |
/** |
|
430 |
Notify the media driver that the page written to swap is no longer required. |
|
431 |
*/ |
|
432 |
void DSwapManager::DoDeleteNotify(TUint aSwapIndex) |
|
433 |
{ |
|
434 |
// Ram Alloc lock prevents the swap location being assigned to another page. |
|
435 |
__NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
|
436 |
||
437 |
#ifdef __PAGING_DELETE_NOTIFY_ENABLED |
|
438 |
const TUint readUnitShift = iDevice->iReadUnitShift; |
|
439 |
const TUint size = KPageSize >> readUnitShift; |
|
440 |
TUint offset = (aSwapIndex << KPageShift) >> readUnitShift; |
|
441 |
||
442 |
START_PAGING_BENCHMARK; |
|
443 |
// Ignore the return value as this is just an optimisation that is not supported on all media. |
|
444 |
(void)iDevice->DeleteNotify(&iDelNotifyMsg, offset, size); |
|
445 |
END_PAGING_BENCHMARK(EPagingBmDeleteNotifyDataPage); |
|
446 |
#endif |
|
447 |
} |
|
448 |
||
449 |
||
450 |
// Check swap thresholds and notify (see K::CheckFreeMemoryLevel) |
|
451 |
void DSwapManager::CheckSwapThresholds(TUint aInitial, TUint aFinal) |
|
452 |
{ |
|
453 |
TUint changes = 0; |
|
454 |
if (aFinal < iSwapThesholdLow && aInitial >= iSwapThesholdLow) |
|
455 |
changes |= (EChangesFreeMemory | EChangesLowMemory); |
|
456 |
if (aFinal >= iSwapThesholdGood && aInitial < iSwapThesholdGood) |
|
457 |
changes |= EChangesFreeMemory; |
|
458 |
if (changes) |
|
459 |
Kern::AsyncNotifyChanges(changes); |
|
460 |
} |
|
461 |
||
462 |
||
463 |
void DSwapManager::GetSwapInfo(SVMSwapInfo& aInfoOut) |
|
464 |
{ |
|
465 |
__NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
|
466 |
aInfoOut.iSwapSize = iBitMap->iSize << KPageShift; |
|
467 |
aInfoOut.iSwapFree = iBitMapFree << KPageShift; |
|
468 |
} |
|
469 |
||
470 |
||
471 |
TInt DSwapManager::SetSwapThresholds(const SVMSwapThresholds& aThresholds) |
|
472 |
{ |
|
473 |
__NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
|
474 |
if (aThresholds.iLowThreshold > aThresholds.iGoodThreshold) |
|
475 |
return KErrArgument; |
|
476 |
TInt low = (aThresholds.iLowThreshold + KPageSize - 1) >> KPageShift; |
|
477 |
TInt good = (aThresholds.iGoodThreshold + KPageSize - 1) >> KPageShift; |
|
478 |
if (good > iBitMap->iSize) |
|
479 |
return KErrArgument; |
|
480 |
iSwapThesholdLow = low; |
|
481 |
iSwapThesholdGood = good; |
|
482 |
return KErrNone; |
|
483 |
} |
|
484 |
||
485 |
||
486 |
||
487 |
TInt DDataPagedMemoryManager::InstallPagingDevice(DPagingDevice* aDevice) |
|
488 |
{ |
|
489 |
TRACEB(("DDataPagedMemoryManager::InstallPagingDevice(0x%08x)",aDevice)); |
|
490 |
||
491 |
TUint dataPolicy = TheSuperPage().KernelConfigFlags() & EKernelConfigDataPagingPolicyMask; |
|
492 |
TRACEB(("Data Paging Policy = %d", dataPolicy >> EKernelConfigDataPagingPolicyShift)); |
|
493 |
if (dataPolicy == EKernelConfigDataPagingPolicyNoPaging) |
|
494 |
{// No paging allowed so don't register the device. |
|
495 |
return KErrNone; |
|
496 |
} |
|
497 |
||
498 |
// Store the device, blocking any other devices from installing. |
|
499 |
if (!NKern::CompareAndSwap((TAny*&)iDevice, (TAny*)NULL, (TAny*)aDevice)) |
|
500 |
{// Data paging device already installed. |
|
501 |
__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("**** Attempt to install more than one data paging device !!!!!!!! ****")); |
|
502 |
return KErrAlreadyExists; |
|
503 |
} |
|
504 |
||
505 |
// Now we can determine the size of the swap, create the swap manager. |
|
506 |
iSwapManager = new DSwapManager; |
|
507 |
__NK_ASSERT_ALWAYS(iSwapManager); |
|
508 |
||
509 |
TInt r = iSwapManager->Create(iDevice); |
|
510 |
if (r != KErrNone) |
|
511 |
{// Couldn't create the swap manager. |
|
512 |
delete iSwapManager; |
|
513 |
iSwapManager = NULL; |
|
514 |
NKern::SafeSwap(NULL, (TAny*&)iDevice); |
|
515 |
return r; |
|
516 |
} |
|
517 |
NKern::LockedSetClear(K::MemModelAttributes, 0, EMemModelAttrDataPaging); |
|
518 |
||
519 |
return r; |
|
520 |
} |
|
521 |
||
522 |
||
523 |
TInt DDataPagedMemoryManager::AcquirePageReadRequest(DPageReadRequest*& aRequest, DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
|
524 |
{ |
|
525 |
aRequest = iDevice->iRequestPool->AcquirePageReadRequest(aMemory,aIndex,aCount); |
|
526 |
return KErrNone; |
|
527 |
} |
|
528 |
||
529 |
||
530 |
TInt DDataPagedMemoryManager::AcquirePageWriteRequest(DPageWriteRequest*& aRequest, DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
|
531 |
{ |
|
532 |
aRequest = iDevice->iRequestPool->AcquirePageWriteRequest(aMemory,aIndex,aCount); |
|
533 |
return KErrNone; |
|
534 |
} |
|
535 |
||
536 |
||
537 |
void DDataPagedMemoryManager::Init3() |
|
538 |
{ |
|
539 |
} |
|
540 |
||
541 |
||
542 |
TInt DDataPagedMemoryManager::Alloc(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
|
543 |
{ |
|
544 |
__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory)); |
|
545 |
||
546 |
// re-initialise any decommitted pages which we may still own because they were pinned... |
|
547 |
ReAllocDecommitted(aMemory,aIndex,aCount); |
|
548 |
||
549 |
// Reserve the swap pages required. |
|
550 |
RamAllocLock::Lock(); |
|
551 |
TInt r = iSwapManager->ReserveSwap(aMemory, aIndex, aCount); |
|
552 |
RamAllocLock::Unlock(); |
|
553 |
||
554 |
return r; |
|
555 |
} |
|
556 |
||
557 |
||
558 |
void DDataPagedMemoryManager::Free(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
|
559 |
{ |
|
560 |
TRACE2(("DDataPagedMemoryManager::Free(0x%08x,0x%x,0x%x)", aMemory, aIndex, aCount)); |
|
561 |
__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory)); |
|
562 |
||
563 |
// Unreserve the swap pages associated with the memory object. Do this before |
|
564 |
// removing the page array entries to prevent a page fault reallocating these pages. |
|
565 |
RamAllocLock::Lock(); |
|
566 |
TInt freed = iSwapManager->UnreserveSwap(aMemory, aIndex, aCount); |
|
567 |
(void)freed; |
|
568 |
RamAllocLock::Unlock(); |
|
569 |
||
570 |
DoFree(aMemory,aIndex,aCount); |
|
571 |
} |
|
572 |
||
573 |
||
574 |
/** |
|
575 |
@copydoc DMemoryManager::Wipe |
|
576 |
@todo Not yet implemented. |
|
577 |
Need to handle this smartly, e.g. throw RAM away and set to uninitialised |
|
578 |
*/ |
|
579 |
TInt DDataPagedMemoryManager::Wipe(DMemoryObject* aMemory) |
|
580 |
{ |
|
581 |
__NK_ASSERT_ALWAYS(0); // not implemented yet |
|
582 |
||
583 |
return KErrNotSupported; |
|
584 |
} |
|
585 |
||
586 |
||
587 |
TInt DDataPagedMemoryManager::ReadPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages, DPageReadRequest* aRequest) |
|
588 |
{ |
|
589 |
__NK_ASSERT_DEBUG(aRequest->CheckUse(aMemory,aIndex,aCount)); |
|
590 |
||
591 |
// Map pages temporarily so that we can copy into them. |
|
592 |
const TLinAddr linAddr = aRequest->MapPages(aIndex, aCount, aPages); |
|
593 |
||
594 |
TInt r = iSwapManager->ReadSwapPages(aMemory, aIndex, aCount, linAddr, aRequest, aPages); |
|
595 |
||
596 |
// The memory object allows executable mappings then need IMB. |
|
597 |
aRequest->UnmapPages(aMemory->IsExecutable()); |
|
598 |
||
599 |
return r; |
|
600 |
} |
|
601 |
||
602 |
||
603 |
TInt DDataPagedMemoryManager::WritePages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages, DPageWriteRequest* aRequest) |
|
604 |
{ |
|
605 |
__NK_ASSERT_DEBUG(aRequest->CheckUse(aMemory,aIndex,aCount)); |
|
606 |
||
607 |
// Map pages temporarily so that we can copy into them. |
|
608 |
const TLinAddr linAddr = aRequest->MapPages(aIndex, aCount, aPages); |
|
609 |
||
610 |
TInt r = iSwapManager->WriteSwapPages(aMemory, aIndex, aCount, linAddr, aRequest); |
|
611 |
||
612 |
// The memory object allows executable mappings then need IMB. |
|
613 |
aRequest->UnmapPages(aMemory->IsExecutable()); |
|
614 |
||
615 |
return r; |
|
616 |
} |
|
617 |
||
618 |
||
619 |
TInt DDataPagedMemoryManager::CleanPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TPhysAddr*& aPageArrayEntry) |
|
620 |
{ |
|
87
2f92ad2dc5db
Revision: 201013
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
621 |
if(!aPageInfo->IsDirty()) |
0 | 622 |
return KErrNone; |
623 |
||
624 |
// shouldn't be asked to clean a page which is writable... |
|
87
2f92ad2dc5db
Revision: 201013
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
625 |
__NK_ASSERT_DEBUG(!aPageInfo->IsWritable()); |
0 | 626 |
|
627 |
// mark page as being modified by us... |
|
628 |
TUint modifierInstance; // dummy variable used only for it's storage address on the stack |
|
629 |
aPageInfo->SetModifier(&modifierInstance); |
|
630 |
||
631 |
// get info about page... |
|
632 |
TUint index = aPageInfo->Index(); |
|
633 |
TPhysAddr physAddr = aPageInfo->PhysAddr(); |
|
634 |
||
635 |
// Release the mmu lock while we write out the page. This is safe as the |
|
636 |
// RamAllocLock stops the physical address being freed from this object. |
|
637 |
MmuLock::Unlock(); |
|
638 |
||
639 |
// get paging request object... |
|
640 |
DPageWriteRequest* req; |
|
641 |
TInt r = AcquirePageWriteRequest(req, aMemory, index, 1); |
|
642 |
__NK_ASSERT_DEBUG(r==KErrNone); // we should always get a write request because the previous function blocks until it gets one |
|
643 |
__NK_ASSERT_DEBUG(req); // we should always get a write request because the previous function blocks until it gets one |
|
644 |
||
645 |
r = WritePages(aMemory, index, 1, &physAddr, req); |
|
646 |
||
647 |
req->Release(); |
|
648 |
||
649 |
MmuLock::Lock(); |
|
650 |
||
651 |
if(r!=KErrNone) |
|
652 |
return r; |
|
653 |
||
654 |
// check if page is clean... |
|
655 |
if(aPageInfo->CheckModified(&modifierInstance) || aPageInfo->IsWritable()) |
|
656 |
{ |
|
657 |
// someone else modified the page, or it became writable, so fail... |
|
658 |
r = KErrInUse; |
|
659 |
} |
|
660 |
else |
|
661 |
{ |
|
662 |
// page is now clean! |
|
663 |
ThePager.SetClean(*aPageInfo); |
|
664 |
} |
|
665 |
||
666 |
return r; |
|
667 |
} |
|
668 |
||
669 |
||
670 |
TBool DDataPagedMemoryManager::IsAllocated(DMemoryObject* aMemory, TUint aIndex, TUint aCount) |
|
671 |
{// MmuLock required to protect manager data. |
|
672 |
// DPagedMemoryManager::DoPageInDone() won't allow MmuLock to be released |
|
673 |
// so can only cope with a maximum of KMaxPagesInOneGo. |
|
674 |
__NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
675 |
__NK_ASSERT_DEBUG(aCount <= KMaxPagesInOneGo); |
|
676 |
||
677 |
return iSwapManager->IsReserved(aMemory, aIndex, aCount); |
|
678 |
} |
|
679 |
||
680 |
||
681 |
void DDataPagedMemoryManager::GetSwapInfo(SVMSwapInfo& aInfoOut) |
|
682 |
{ |
|
683 |
NKern::ThreadEnterCS(); |
|
684 |
RamAllocLock::Lock(); |
|
685 |
iSwapManager->GetSwapInfo(aInfoOut); |
|
686 |
RamAllocLock::Unlock(); |
|
687 |
NKern::ThreadLeaveCS(); |
|
688 |
} |
|
689 |
||
690 |
||
691 |
TInt DDataPagedMemoryManager::SetSwapThresholds(const SVMSwapThresholds& aThresholds) |
|
692 |
{ |
|
693 |
NKern::ThreadEnterCS(); |
|
694 |
RamAllocLock::Lock(); |
|
695 |
TInt r = iSwapManager->SetSwapThresholds(aThresholds); |
|
696 |
RamAllocLock::Unlock(); |
|
697 |
NKern::ThreadLeaveCS(); |
|
698 |
return r; |
|
699 |
} |
|
700 |
||
701 |
||
702 |
void GetSwapInfo(SVMSwapInfo& aInfoOut) |
|
703 |
{ |
|
704 |
((DDataPagedMemoryManager*)TheDataPagedMemoryManager)->GetSwapInfo(aInfoOut); |
|
705 |
} |
|
706 |
||
707 |
||
708 |
TInt SetSwapThresholds(const SVMSwapThresholds& aThresholds) |
|
709 |
{ |
|
710 |
return ((DDataPagedMemoryManager*)TheDataPagedMemoryManager)->SetSwapThresholds(aThresholds); |
|
711 |
} |
|
712 |