author | Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com> |
Tue, 27 Apr 2010 18:02:57 +0300 | |
branch | RCL_3 |
changeset 24 | 41f0cfe18c80 |
parent 22 | 2f92ad2dc5db |
child 26 | c734af59ce98 |
permissions | -rw-r--r-- |
0 | 1 |
// Copyright (c) 2005-2009 Nokia Corporation and/or its subsidiary(-ies). |
2 |
// All rights reserved. |
|
3 |
// This component and the accompanying materials are made available |
|
4 |
// under the terms of the License "Eclipse Public License v1.0" |
|
5 |
// which accompanies this distribution, and is available |
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 |
// |
|
8 |
// Initial Contributors: |
|
9 |
// Nokia Corporation - initial contribution. |
|
10 |
// |
|
11 |
// Contributors: |
|
12 |
// |
|
13 |
// Description: |
|
14 |
// |
|
15 |
||
16 |
/** |
|
17 |
@file |
|
18 |
@internalComponent |
|
19 |
*/ |
|
20 |
||
21 |
#ifndef MPAGER_H |
|
22 |
#define MPAGER_H |
|
23 |
||
24 |
struct SVMCacheInfo; |
|
25 |
class DMemModelThread; |
|
26 |
class DMemoryMappingBase; |
|
27 |
||
28 |
class DPager |
|
29 |
{ |
|
30 |
public: |
|
31 |
DPager(); |
|
19
4a8fed1c0ef6
Revision: 201007
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
32 |
void InitCache(); |
0 | 33 |
void Init3(); |
34 |
||
19
4a8fed1c0ef6
Revision: 201007
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
35 |
FORCE_INLINE TBool CacheInitialised() |
4a8fed1c0ef6
Revision: 201007
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
36 |
{ |
4a8fed1c0ef6
Revision: 201007
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
37 |
return iYoungOldRatio && iMinimumPageLimit; |
4a8fed1c0ef6
Revision: 201007
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
38 |
} |
4a8fed1c0ef6
Revision: 201007
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
39 |
|
0 | 40 |
FORCE_INLINE TUint NumberOfFreePages() |
41 |
{ |
|
42 |
return iNumberOfFreePages; |
|
43 |
} |
|
44 |
||
45 |
FORCE_INLINE TUint NumberOfDirtyPages() |
|
46 |
{ |
|
47 |
TUint ret; |
|
48 |
MmuLock::Lock(); |
|
49 |
ret = iNumberOfDirtyPages; |
|
50 |
MmuLock::Unlock(); |
|
51 |
return ret; |
|
52 |
} |
|
22
2f92ad2dc5db
Revision: 201013
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
19
diff
changeset
|
53 |
|
2f92ad2dc5db
Revision: 201013
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
19
diff
changeset
|
54 |
FORCE_INLINE TUint MinimumPageCount() |
2f92ad2dc5db
Revision: 201013
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
19
diff
changeset
|
55 |
{ |
2f92ad2dc5db
Revision: 201013
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
19
diff
changeset
|
56 |
return iMinimumPageCount; |
2f92ad2dc5db
Revision: 201013
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
19
diff
changeset
|
57 |
} |
0 | 58 |
|
59 |
FORCE_INLINE void SetWritable(SPageInfo& aPageInfo) |
|
60 |
{ |
|
61 |
if (!aPageInfo.IsDirty()) |
|
62 |
{// This is the first mapping to write to the page so increase the |
|
63 |
// dirty page count. |
|
64 |
aPageInfo.SetWritable(); |
|
65 |
iNumberOfDirtyPages++; |
|
66 |
} |
|
67 |
} |
|
68 |
||
69 |
FORCE_INLINE void SetClean(SPageInfo& aPageInfo) |
|
70 |
{ |
|
71 |
__NK_ASSERT_DEBUG(iNumberOfDirtyPages); |
|
72 |
__NK_ASSERT_DEBUG(aPageInfo.IsDirty()); |
|
73 |
aPageInfo.SetClean(); |
|
74 |
iNumberOfDirtyPages--; |
|
75 |
} |
|
76 |
||
77 |
/** |
|
78 |
Remove RAM pages from the cache and return them to the system's free pool. |
|
79 |
(Free them.) |
|
80 |
||
81 |
This is called by class Mmu when it requires more free RAM to meet an |
|
82 |
allocation request. |
|
83 |
||
84 |
@param aNumPages The number of pages to free up. |
|
85 |
@return True if all pages could be freed, false otherwise |
|
86 |
@pre RamAlloc mutex held. |
|
87 |
*/ |
|
88 |
TBool GetFreePages(TInt aNumPages); |
|
89 |
||
90 |
||
91 |
/** |
|
92 |
Attempts to rejuvenate or page in the page to the mapping that took the page fault. |
|
93 |
||
94 |
@param aPc Address of instruction causing the fault. |
|
95 |
@param aFaultAddress Address of memory access which faulted. |
|
96 |
@param aFaultAsid The asid of the faulting thread's process. |
|
97 |
@param aAccessPermissions Bitmask of values from enum TAccessPermissions, which |
|
98 |
indicates the permissions required by faulting memory access. |
|
99 |
@param aMapInstanceCount The instance count of the mapping when it took the page fault. |
|
100 |
@param aThread The thread that took the page fault. |
|
101 |
@param aExceptionInfo The processor specific exception info. |
|
102 |
||
103 |
@return KErrNone if the page is now accessable, otherwise one of the system wide error codes. |
|
104 |
*/ |
|
105 |
TInt HandlePageFault( TLinAddr aPc, TLinAddr aFaultAddress, TUint aFaultAsid, TUint aFaultIndex, |
|
106 |
TUint aAccessPermissions, DMemoryObject* aMemory, DMemoryMapping* aMapping, |
|
107 |
TUint aMapInstanceCount, DThread* aThread, TAny* aExceptionInfo); |
|
108 |
||
109 |
||
110 |
/** |
|
111 |
Fault enumeration |
|
112 |
*/ |
|
113 |
enum TFault |
|
114 |
{ |
|
115 |
}; |
|
116 |
||
117 |
/** |
|
118 |
Fault the system. |
|
119 |
*/ |
|
120 |
static void Fault(TFault aFault); |
|
121 |
||
122 |
/** |
|
123 |
Get state of live page list. |
|
124 |
*/ |
|
125 |
void GetLiveListInfo(SVMCacheInfo& aInfo); |
|
126 |
||
127 |
/** |
|
128 |
Resize the live page list. |
|
129 |
*/ |
|
130 |
TInt ResizeLiveList(TUint aMinimumPageCount, TUint aMaximumPageCount); |
|
131 |
||
132 |
/** |
|
133 |
Recalculate live list size. |
|
134 |
*/ |
|
135 |
TInt ResizeLiveList(); |
|
136 |
||
137 |
/** |
|
138 |
Flush (unmap) all memory which is demand paged. |
|
139 |
This reduces the live page list to a minimum. |
|
140 |
*/ |
|
141 |
void FlushAll(); |
|
142 |
||
143 |
/** |
|
144 |
Give pages to paging system for managing. |
|
145 |
*/ |
|
146 |
void DonatePages(TUint aCount, TPhysAddr* aPages); |
|
147 |
||
148 |
/** |
|
149 |
Reclaim pages from paging system which were previously donated with DonatePages. |
|
150 |
||
151 |
@param aCount Number of pages. |
|
152 |
@param aPages Array of pages (as stored in an RPageArray). |
|
153 |
||
154 |
@return KErrNone if successful. |
|
155 |
KErrNoMemory if paging system doesn't have enough spare pages. This will leave some or all of the pages still managed by the pager. |
|
156 |
KErrNotFound if some of the pages were not actually being managed by the pager. |
|
157 |
*/ |
|
158 |
TInt ReclaimPages(TUint aCount, TPhysAddr* aPages); |
|
159 |
||
160 |
/** |
|
161 |
Called by class Mmu whenever a page of RAM is freed. The page state will be EUnused. |
|
162 |
If the page was being used by the pager then this gives it the opportunity to update |
|
163 |
any internal state. If the pager wishes to retain ownership of the page the it must |
|
164 |
return the result KErrNone, any other value will cause the page to be returned to the |
|
165 |
systems free pool. |
|
166 |
*/ |
|
167 |
TInt PageFreed(SPageInfo* aPageInfo); |
|
168 |
||
169 |
// |
|
170 |
// following public members for use by memory managers... |
|
171 |
// |
|
172 |
||
173 |
/** |
|
174 |
Allocate a number of RAM pages to store demand paged content. |
|
175 |
These pages are obtained from... |
|
176 |
||
177 |
1. An unused page in the live page list. |
|
178 |
2. The systems free pool. |
|
179 |
3. The oldest page from the live page list. |
|
180 |
*/ |
|
181 |
TInt PageInAllocPages(TPhysAddr* aPages, TUint aCount, Mmu::TRamAllocFlags aAllocFlags); |
|
182 |
||
183 |
/** |
|
184 |
Free a number of RAM pages allocated by PageInAllocPages. |
|
185 |
*/ |
|
186 |
void PageInFreePages(TPhysAddr* aPages, TUint aCount); |
|
187 |
||
188 |
/** |
|
189 |
Called to add a new page to the live list after a fault has occurred. |
|
190 |
||
191 |
@param aPageInfo The page. |
|
192 |
||
193 |
@pre MmuLock held |
|
194 |
@post MmuLock held (but may have been released by this function) |
|
195 |
*/ |
|
196 |
void PagedIn(SPageInfo* aPageInfo); |
|
197 |
||
198 |
/** |
|
199 |
@param aPageInfo The page. |
|
200 |
@param aPinArgs Owner of a replacement page which will be used to substitute for the pinned page. |
|
201 |
||
202 |
@pre MmuLock held |
|
203 |
@post MmuLock held (but may have been released by this function) |
|
204 |
*/ |
|
205 |
void PagedInPinned(SPageInfo* aPageInfo, TPinArgs& aPinArgs); |
|
206 |
||
207 |
/** |
|
208 |
@pre MmuLock held |
|
209 |
@post MmuLock left unchanged. |
|
210 |
*/ |
|
211 |
void PagedInUnneeded(SPageInfo* aPageInfo); |
|
212 |
||
213 |
/** |
|
214 |
@param aPageInfo The page to unpin. |
|
215 |
@param aPinArgs The resources used for pinning. The replacement pages allocated |
|
216 |
to this will be increased for each page which was became completely |
|
217 |
unpinned. |
|
218 |
||
219 |
@pre MmuLock held |
|
220 |
@post MmuLock held (but may have been released by this function) |
|
221 |
*/ |
|
222 |
void Unpin(SPageInfo* aPageInfo, TPinArgs& aPinArgs); |
|
223 |
||
224 |
/** |
|
225 |
@param aPageInfo The page to pin. Must be page being demand paged. |
|
226 |
@param aPinArgs Owner of a replacement page which will be used to substitute for the pinned page. |
|
227 |
||
228 |
@pre MmuLock held |
|
229 |
@post MmuLock held (but may have been released by this function) |
|
230 |
*/ |
|
231 |
void Pin(SPageInfo* aPageInfo, TPinArgs& aPinArgs); |
|
232 |
||
233 |
||
234 |
/** |
|
235 |
@pre MmuLock held |
|
236 |
@post MmuLock held (but may have been released by this function) |
|
237 |
*/ |
|
238 |
void RejuvenatePageTable(TPte* aPt); |
|
239 |
||
240 |
/** |
|
241 |
*/ |
|
242 |
TBool ReservePages(TUint aRequiredCount, TUint& aCount); |
|
243 |
||
244 |
/** |
|
245 |
*/ |
|
246 |
void UnreservePages(TUint& aCount); |
|
247 |
||
248 |
/** |
|
249 |
Enumeration of instrumented paging events which only require the |
|
250 |
SPageInfo object as an argument. |
|
251 |
*/ |
|
252 |
enum TEventSimple |
|
253 |
{ |
|
254 |
EEventPageInNew, |
|
255 |
EEventPageInAgain, |
|
256 |
EEventPageInUnneeded, |
|
257 |
EEventPageInFree, |
|
258 |
EEventPageOut, |
|
259 |
EEventPageAged, |
|
260 |
EEventPagePin, |
|
261 |
EEventPageUnpin, |
|
262 |
EEventPageDonate, |
|
263 |
EEventPageReclaim, |
|
264 |
EEventPageAgedClean, |
|
265 |
EEventPageAgedDirty, |
|
266 |
EEventPagePageTableAlloc |
|
267 |
}; |
|
268 |
||
269 |
/** |
|
270 |
Signal the occurrence of an event of type TEventSimple. |
|
271 |
*/ |
|
272 |
void Event(TEventSimple aEvent, SPageInfo* aPageInfo); |
|
273 |
||
274 |
/** |
|
275 |
Enumeration of instrumented paging events which require the faulting address |
|
276 |
and program counter as arguments. |
|
277 |
*/ |
|
278 |
enum TEventWithAddresses |
|
279 |
{ |
|
280 |
EEventPageInStart, |
|
281 |
EEventPageRejuvenate |
|
282 |
}; |
|
283 |
||
284 |
/** |
|
285 |
Signal the occurrence of an event of type TEventWithAddresses. |
|
286 |
*/ |
|
287 |
void Event(TEventWithAddresses aEvent, SPageInfo* aPageInfo, TLinAddr aPc, TLinAddr aFaultAddress, TUint aAccessPermissions); |
|
288 |
||
289 |
/** |
|
290 |
Get the pager's event info data. |
|
291 |
*/ |
|
292 |
void GetEventInfo(SVMEventInfo& aInfoOut); |
|
293 |
||
294 |
/** |
|
295 |
Reset the pager's event info data. |
|
296 |
*/ |
|
297 |
void ResetEventInfo(); |
|
298 |
||
299 |
/** |
|
300 |
Attempt to discard the specified page. |
|
301 |
||
302 |
@param aOldPageInfo The page info of the page to discard. |
|
303 |
@param aBlockZoneId The ID of the RAM zone not to allocate any required new page into. |
|
304 |
@param aBlockRest Set to ETrue when we don't want the allocator to search for new pages if the RAM |
|
305 |
zone with ID==aBlockZoneId is encountered, i.e. a general RAM defrag operation. |
|
306 |
*/ |
|
307 |
TInt DiscardPage(SPageInfo* aOldPageInfo, TUint aBlockZoneId, TBool aBlockRest); |
|
308 |
||
309 |
||
310 |
/** |
|
311 |
Update any live list links to replace the old page with the new page. |
|
312 |
This is to be used when a page has been moved. |
|
313 |
||
314 |
@param aOldPageInfo The page info of the page to replace. |
|
315 |
@param aNewPageInfo The page info of the page to be used instead of the old page. |
|
316 |
*/ |
|
317 |
void ReplacePage(SPageInfo& aOldPageInfo, SPageInfo& aNewPageInfo); |
|
318 |
||
319 |
||
320 |
// |
|
321 |
// following public members for use by TPinArgs... |
|
322 |
// |
|
323 |
||
324 |
/** |
|
325 |
*/ |
|
326 |
TBool AllocPinReplacementPages(TUint aNumPages); |
|
327 |
||
328 |
/** |
|
329 |
*/ |
|
330 |
void FreePinReplacementPages(TUint aNumPages); |
|
331 |
||
332 |
private: |
|
333 |
/** |
|
334 |
Add a page to the head of the live page list. I.e. make it the 'youngest' page. |
|
335 |
||
336 |
@pre MmuLock held |
|
337 |
@post MmuLock left unchanged. |
|
338 |
*/ |
|
339 |
void AddAsYoungestPage(SPageInfo* aPageInfo); |
|
340 |
||
341 |
/** |
|
342 |
Mark a page as type EUnused and add it to the end of the live page list. |
|
343 |
I.e. make it the 'oldest' page, so that it is the first page to be reused. |
|
344 |
||
345 |
@pre MmuLock held |
|
346 |
@post MmuLock left unchanged. |
|
347 |
*/ |
|
348 |
void AddAsFreePage(SPageInfo* aPageInfo); |
|
349 |
||
350 |
/** |
|
351 |
Remove a page from live page list. |
|
352 |
It paged state is set to EUnpaged. |
|
353 |
||
354 |
@pre MmuLock held |
|
355 |
@post MmuLock left unchanged. |
|
356 |
*/ |
|
357 |
void RemovePage(SPageInfo* aPageInfo); |
|
358 |
||
359 |
/** |
|
360 |
Remove the oldest page from the live page list and perform #StealPage. |
|
361 |
||
362 |
@pre MmuLock held |
|
363 |
@post MmuLock left unchanged. |
|
364 |
*/ |
|
365 |
SPageInfo* StealOldestPage(); |
|
366 |
||
367 |
/** |
|
368 |
Steal a page from the memory object (if any) which is using the page. |
|
369 |
If successful the returned page will be in the EUnknown state and the |
|
370 |
cache state for the page is indeterminate. This is the same state as |
|
371 |
if the page had been allocated by Mmu::AllocRam. |
|
372 |
||
373 |
@pre RamAlloc mutex held |
|
374 |
@pre MmuLock held |
|
375 |
@post MmuLock held (but may have been released by this function) |
|
376 |
*/ |
|
377 |
TInt StealPage(SPageInfo* aPageInfo); |
|
378 |
||
379 |
/** |
|
380 |
Restrict the access permissions for a page. |
|
381 |
||
382 |
@param aPageInfo The page. |
|
383 |
@param aRestriction The restriction type to apply. |
|
384 |
*/ |
|
385 |
TInt RestrictPage(SPageInfo* aPageInfo, TRestrictPagesType aRestriction); |
|
386 |
||
387 |
/** |
|
388 |
Get a RAM page from the system's free pool and add it to the live list as a free page. |
|
389 |
||
390 |
@return False if out of memory; |
|
391 |
true otherwise, though new free page may still have already been used. |
|
392 |
||
393 |
@pre MmuLock held |
|
394 |
@post MmuLock held (but may have been released by this function) |
|
395 |
*/ |
|
396 |
TBool TryGrowLiveList(); |
|
397 |
||
398 |
/** |
|
399 |
Get a RAM page from the system's free pool. |
|
400 |
||
401 |
@pre RamAllocLock held. |
|
402 |
||
403 |
@return The page or NULL if no page is available. |
|
404 |
*/ |
|
405 |
SPageInfo* GetPageFromSystem(Mmu::TRamAllocFlags aAllocFlags, TUint aBlockZoneId=KRamZoneInvalidId, TBool aBlockRest=EFalse); |
|
406 |
||
407 |
/** |
|
408 |
Put a page back on the system's free pool. |
|
409 |
||
410 |
@pre RamAllocLock held. |
|
411 |
*/ |
|
412 |
void ReturnPageToSystem(); |
|
413 |
||
414 |
/** |
|
415 |
Put a specific page back on the system's free pool. |
|
416 |
||
417 |
@pre RamAllocLock held. |
|
418 |
*/ |
|
419 |
void ReturnPageToSystem(SPageInfo& aPageInfo); |
|
420 |
||
421 |
/** |
|
422 |
Allocate a RAM page to store demand paged content. |
|
423 |
This tries to obtain a RAM from the following places: |
|
424 |
1. An unused page in the live page list. |
|
425 |
2. The systems free pool. |
|
426 |
3. The oldest page from the live page list. |
|
427 |
*/ |
|
428 |
SPageInfo* PageInAllocPage(Mmu::TRamAllocFlags aAllocFlags); |
|
429 |
||
430 |
/** |
|
431 |
If the number of young pages exceeds that specified by iYoungOldRatio then a |
|
432 |
single page is made 'old'. Call this after adding a new 'young' page. |
|
433 |
||
434 |
@pre MmuLock held |
|
435 |
@post MmuLock held (but may have been released by this function) |
|
436 |
*/ |
|
437 |
void BalanceAges(); |
|
438 |
||
439 |
/** |
|
440 |
If HaveTooManyPages() then return them to the system. |
|
441 |
*/ |
|
442 |
void RemoveExcessPages(); |
|
443 |
||
444 |
/** |
|
445 |
@return True if pager has too many pages, false otherwise. |
|
446 |
*/ |
|
447 |
TBool HaveTooManyPages(); |
|
448 |
||
449 |
/** |
|
450 |
@return True if pager has its maximum number of pages, false otherwise. |
|
451 |
*/ |
|
452 |
TBool HaveMaximumPages(); |
|
453 |
||
454 |
/** |
|
455 |
Attempt to rejuvenate a page in which a page fault occurred. |
|
456 |
||
457 |
@param aOsAsid Address space ID in which fault occurred. |
|
458 |
@param aAddress Address of memory access which faulted. |
|
459 |
@param aAccessPermissions Bitmask of values from enum TAccessPermissions, which |
|
460 |
indicates the permissions required by faulting memory access. |
|
461 |
@param aPc Address of instruction causing the fault. (Used for tracing.) |
|
462 |
@param aMapping The mapping that maps the page that took the fault. |
|
463 |
@param aMapInstanceCount The instance count of the mappig when the page fault occurred. |
|
464 |
@param aThread The thread that took the page fault. |
|
465 |
@param aExceptionInfo The processor specific exception info. |
|
466 |
||
467 |
@return KErrNone if the page was remapped, KErrAbort if the mapping has be reused or detached, |
|
468 |
KErrNotFound if it may be possible to page in the page. |
|
469 |
*/ |
|
470 |
TInt TryRejuvenate( TInt aOsAsid, TLinAddr aAddress, TUint aAccessPermissions, TLinAddr aPc, |
|
471 |
DMemoryMappingBase* aMapping, TUint aMapInstanceCount, DThread* aThread, |
|
472 |
TAny* aExceptionInfo); |
|
473 |
||
474 |
/** |
|
475 |
Reserve one page for guaranteed locking use. |
|
476 |
Increments iReservePageCount if successful. |
|
477 |
||
478 |
@return True if operation was successful. |
|
479 |
*/ |
|
480 |
TBool ReservePage(); |
|
481 |
||
482 |
/** |
|
483 |
Called when a realtime thread takes a paging fault. |
|
484 |
Checks whether it's OK for the thread to take to fault. |
|
485 |
@return KErrNone if the paging fault should be further processed |
|
486 |
*/ |
|
487 |
TInt CheckRealtimeThreadFault(DThread* aThread, TAny* aExceptionInfo); |
|
488 |
||
489 |
/** |
|
490 |
Attempt to find the page table entry and page info for a page in the specified mapping. |
|
491 |
||
492 |
@param aOsAsid The OsAsid of the process that owns the mapping. |
|
493 |
@param aAddress The linear address of the page. |
|
494 |
@param aMapping The mapping that maps the linear address. |
|
495 |
@param aMapInstanceCount The instance count of the mapping. |
|
496 |
@param[out] aPte Will return a pointer to the page table entry for the page. |
|
497 |
@param[out] aPageInfo Will return a pointer to the page info for the page. |
|
498 |
||
499 |
@return KErrNone on success, KErrAbort when the mapping is now invalid, KErrNotFound when |
|
500 |
the page table or page info can't be found. |
|
501 |
*/ |
|
502 |
TInt PteAndInfoFromLinAddr( TInt aOsAsid, TLinAddr aAddress, DMemoryMappingBase* aMapping, |
|
503 |
TUint aMapInstanceCount, TPte*& aPte, SPageInfo*& aPageInfo); |
|
504 |
#ifdef _DEBUG |
|
505 |
/** |
|
506 |
Check consistency of live list. |
|
507 |
*/ |
|
508 |
TBool CheckLists(); |
|
509 |
||
510 |
void TraceCounts(); |
|
511 |
#endif |
|
512 |
||
513 |
private: |
|
514 |
TUint iMinYoungPages; ///< Minimum number of young pages in live list required for correct functioning. |
|
515 |
TUint iAbsoluteMinPageCount;///< Absolute minimum number of pages in live to meet algorithm constraints |
|
516 |
private: |
|
517 |
TUint iMinimumPageCount; /**< Minimum size for the live page list, including locked pages */ |
|
518 |
TUint iMaximumPageCount; /**< Maximum size for the live page list, including locked pages */ |
|
519 |
TUint16 iYoungOldRatio; /**< Ratio of young to old pages in the live page list */ |
|
520 |
SDblQue iYoungList; /**< Head of 'young' page list. */ |
|
521 |
TUint iYoungCount; /**< Number of young pages */ |
|
522 |
SDblQue iOldList; /**< Head of 'old' page list. */ |
|
523 |
TUint iOldCount; /**< Number of old pages */ |
|
524 |
#ifdef _USE_OLDEST_LISTS |
|
525 |
SDblQue iOldestCleanList; /**< Head of 'oldestClean' page list. */ |
|
526 |
TUint iOldestCleanCount; /**< Number of 'oldestClean' pages */ |
|
527 |
SDblQue iOldestDirtyList; /**< Head of 'oldestDirty' page list. */ |
|
528 |
TUint iOldestDirtyCount; /**< Number of 'oldestDirty' pages */ |
|
529 |
TUint16 iOldOldestRatio; /**< Ratio of old pages to oldest to clean and dirty in the live page list*/ |
|
530 |
#endif |
|
531 |
TUint iNumberOfFreePages; |
|
532 |
TUint iNumberOfDirtyPages; /**< The total number of dirty pages in the paging cache. Protected by MmuLock */ |
|
533 |
TUint iInitMinimumPageCount;/**< Initial value for iMinimumPageCount */ |
|
534 |
TUint iInitMaximumPageCount;/**< Initial value for iMaximumPageCount */ |
|
535 |
TUint iReservePageCount; /**< Number of pages reserved for locking */ |
|
536 |
TUint iMinimumPageLimit; /**< Minimum size for iMinimumPageCount, not including locked pages. |
|
537 |
iMinimumPageCount >= iMinimumPageLimit + iReservePageCount */ |
|
538 |
SVMEventInfo iEventInfo; |
|
539 |
||
540 |
#ifdef __DEMAND_PAGING_BENCHMARKS__ |
|
541 |
public: |
|
542 |
void RecordBenchmarkData(TPagingBenchmark aBm, TUint32 aStartTime, TUint32 aEndTime); |
|
543 |
void ResetBenchmarkData(TPagingBenchmark aBm); |
|
544 |
SPagingBenchmarkInfo iBenchmarkInfo[EMaxPagingBm]; |
|
545 |
#endif //__DEMAND_PAGING_BENCHMARKS__ |
|
546 |
}; |
|
547 |
||
548 |
extern DPager ThePager; |
|
549 |
||
550 |
||
551 |
#ifdef __DEMAND_PAGING_BENCHMARKS__ |
|
552 |
||
553 |
#define START_PAGING_BENCHMARK TUint32 _bmStart = NKern::FastCounter() |
|
554 |
#define END_PAGING_BENCHMARK(bm) ThePager.RecordBenchmarkData(bm, _bmStart, NKern::FastCounter()) |
|
555 |
||
556 |
#else |
|
557 |
||
558 |
#define START_PAGING_BENCHMARK |
|
559 |
#define END_PAGING_BENCHMARK(bm) |
|
560 |
#endif // __DEMAND_PAGING_BENCHMARKS__ |
|
561 |
||
562 |
||
563 |
FORCE_INLINE void DPager::Event(TEventSimple aEvent, SPageInfo* aPageInfo) |
|
564 |
{ |
|
565 |
switch(aEvent) |
|
566 |
{ |
|
567 |
case EEventPageInNew: |
|
568 |
TRACEP(("DP: %O PageIn 0x%08x",TheCurrentThread,aPageInfo->PhysAddr())); |
|
569 |
#ifdef BTRACE_PAGING |
|
570 |
BTraceContext12(BTrace::EPaging,BTrace::EPagingPageIn,aPageInfo->PhysAddr(),aPageInfo->Owner(),aPageInfo->Index()); |
|
571 |
#endif |
|
572 |
++iEventInfo.iPageInReadCount; |
|
573 |
break; |
|
574 |
||
575 |
case EEventPageInAgain: |
|
576 |
TRACEP(("DP: %O PageIn (again) 0x%08x",TheCurrentThread,aPageInfo->PhysAddr())); |
|
577 |
#ifdef BTRACE_PAGING |
|
578 |
BTraceContext4(BTrace::EPaging,BTrace::EPagingMapPage,aPageInfo->PhysAddr()); |
|
579 |
#endif |
|
580 |
break; |
|
581 |
||
582 |
case EEventPageInUnneeded: |
|
583 |
TRACEP(("DP: %O PageIn (unneeded) 0x%08x",TheCurrentThread,aPageInfo->PhysAddr())); |
|
584 |
#ifdef BTRACE_PAGING |
|
585 |
BTraceContext0(BTrace::EPaging,BTrace::EPagingPageInUnneeded); |
|
586 |
#endif |
|
587 |
break; |
|
588 |
||
589 |
case EEventPageInFree: |
|
590 |
TRACEP(("DP: %O PageInFree 0x%08x",TheCurrentThread,aPageInfo->PhysAddr())); |
|
591 |
#ifdef BTRACE_PAGING |
|
592 |
BTraceContext4(BTrace::EPaging,BTrace::EPagingPageInFree,aPageInfo->PhysAddr()); |
|
593 |
#endif |
|
594 |
break; |
|
595 |
||
596 |
case EEventPageOut: |
|
597 |
TRACEP(("DP: %O PageOut 0x%08x",TheCurrentThread,aPageInfo->PhysAddr())); |
|
598 |
#ifdef BTRACE_PAGING |
|
599 |
BTraceContext4(BTrace::EPaging,BTrace::EPagingPageOut,aPageInfo->PhysAddr()); |
|
600 |
#endif |
|
601 |
break; |
|
602 |
||
603 |
case EEventPageAged: |
|
604 |
TRACEP(("DP: %O Aged 0x%08x",TheCurrentThread,aPageInfo->PhysAddr())); |
|
605 |
#ifdef BTRACE_PAGING_VERBOSE |
|
606 |
BTraceContext4(BTrace::EPaging,BTrace::EPagingAged,aPageInfo->PhysAddr()); |
|
607 |
#endif |
|
608 |
break; |
|
609 |
||
610 |
case EEventPagePin: |
|
611 |
TRACEP(("DP: %O Pin 0x%08x count=%d",TheCurrentThread,aPageInfo->PhysAddr(),aPageInfo->PinCount())); |
|
612 |
#ifdef BTRACE_PAGING |
|
613 |
BTraceContext8(BTrace::EPaging,BTrace::EPagingPageLock,aPageInfo->PhysAddr(),aPageInfo->PinCount()); |
|
614 |
#endif |
|
615 |
break; |
|
616 |
||
617 |
case EEventPageUnpin: |
|
618 |
TRACEP(("DP: %O Unpin 0x%08x count=%d",TheCurrentThread,aPageInfo->PhysAddr(),aPageInfo->PinCount())); |
|
619 |
#ifdef BTRACE_PAGING |
|
620 |
BTraceContext8(BTrace::EPaging,BTrace::EPagingPageUnlock,aPageInfo->PhysAddr(),aPageInfo->PinCount()); |
|
621 |
#endif |
|
622 |
break; |
|
623 |
||
624 |
case EEventPageDonate: |
|
625 |
TRACEP(("DP: %O Donate 0x%08x",TheCurrentThread,aPageInfo->PhysAddr())); |
|
626 |
#ifdef BTRACE_PAGING |
|
627 |
BTraceContext12(BTrace::EPaging,BTrace::EPagingDonatePage,aPageInfo->PhysAddr(),aPageInfo->Owner(),aPageInfo->Index()); |
|
628 |
#endif |
|
629 |
break; |
|
630 |
||
631 |
case EEventPageReclaim: |
|
632 |
TRACEP(("DP: %O Reclaim 0x%08x",TheCurrentThread,aPageInfo->PhysAddr())); |
|
633 |
#ifdef BTRACE_PAGING |
|
634 |
BTraceContext4(BTrace::EPaging,BTrace::EPagingReclaimPage,aPageInfo->PhysAddr()); |
|
635 |
#endif |
|
636 |
break; |
|
637 |
||
638 |
case EEventPageAgedClean: |
|
639 |
TRACEP(("DP: %O AgedClean 0x%08x",TheCurrentThread,aPageInfo->PhysAddr())); |
|
640 |
#ifdef BTRACE_PAGING_VERBOSE |
|
641 |
BTraceContext4(BTrace::EPaging,BTrace::EPagingAgedClean,aPageInfo->PhysAddr()); |
|
642 |
#endif |
|
643 |
break; |
|
644 |
||
645 |
case EEventPageAgedDirty: |
|
646 |
TRACEP(("DP: %O AgedDirty 0x%08x",TheCurrentThread,aPageInfo->PhysAddr())); |
|
647 |
#ifdef BTRACE_PAGING_VERBOSE |
|
648 |
BTraceContext4(BTrace::EPaging,BTrace::EPagingAgedDirty,aPageInfo->PhysAddr()); |
|
649 |
#endif |
|
650 |
break; |
|
651 |
||
652 |
case EEventPagePageTableAlloc: |
|
653 |
TRACEP(("DP: %O PageTableAlloc 0x%08x",TheCurrentThread,aPageInfo->PhysAddr())); |
|
654 |
#ifdef BTRACE_PAGING |
|
655 |
BTraceContext4(BTrace::EPaging,BTrace::EPagingPageTableAlloc,aPageInfo->PhysAddr()); |
|
656 |
#endif |
|
657 |
break; |
|
658 |
||
659 |
default: |
|
660 |
__NK_ASSERT_DEBUG(0); |
|
661 |
break; |
|
662 |
} |
|
663 |
} |
|
664 |
||
665 |
||
666 |
||
667 |
FORCE_INLINE void DPager::Event(TEventWithAddresses aEvent, SPageInfo* aPageInfo, TLinAddr aPc, TLinAddr aFaultAddress, TUint aAccessPermissions) |
|
668 |
{ |
|
669 |
switch(aEvent) |
|
670 |
{ |
|
671 |
case EEventPageInStart: |
|
672 |
TRACEP(("DP: %O HandlePageFault 0x%08x 0x%08x %d",TheCurrentThread,aFaultAddress,aPc,aAccessPermissions)); |
|
673 |
#ifdef BTRACE_PAGING |
|
674 |
BTraceContext12(BTrace::EPaging,BTrace::EPagingPageInBegin,aFaultAddress,aPc,aAccessPermissions); |
|
675 |
#endif |
|
676 |
++iEventInfo.iPageFaultCount; |
|
677 |
break; |
|
678 |
||
679 |
case EEventPageRejuvenate: |
|
680 |
TRACEP(("DP: %O Rejuvenate 0x%08x 0x%08x 0x%08x %d",TheCurrentThread,aPageInfo->PhysAddr(),aFaultAddress,aPc,aAccessPermissions)); |
|
681 |
#ifdef BTRACE_PAGING |
|
682 |
BTraceContext12(BTrace::EPaging,BTrace::EPagingRejuvenate,aPageInfo->PhysAddr(),aFaultAddress,aPc); |
|
683 |
#endif |
|
684 |
++iEventInfo.iPageFaultCount; |
|
685 |
break; |
|
686 |
||
687 |
default: |
|
688 |
__NK_ASSERT_DEBUG(0); |
|
689 |
break; |
|
690 |
} |
|
691 |
} |
|
692 |
||
693 |
||
694 |
||
695 |
/** |
|
696 |
Multiplier for number of request objects in pool per drive that supports paging. |
|
697 |
*/ |
|
698 |
const TInt KPagingRequestsPerDevice = 2; |
|
699 |
||
700 |
||
701 |
class DPagingRequest; |
|
702 |
class DPageReadRequest; |
|
703 |
class DPageWriteRequest; |
|
704 |
||
705 |
/** |
|
706 |
A pool of paging requests for use by a single paging device. |
|
707 |
*/ |
|
708 |
class DPagingRequestPool : public DBase |
|
709 |
{ |
|
710 |
public: |
|
711 |
DPagingRequestPool(TUint aNumPageReadRequest,TUint aNumPageWriteRequest); |
|
712 |
DPageReadRequest* AcquirePageReadRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount); |
|
713 |
DPageWriteRequest* AcquirePageWriteRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount); |
|
714 |
private: |
|
715 |
~DPagingRequestPool(); |
|
716 |
private: |
|
717 |
class TGroup |
|
718 |
{ |
|
719 |
public: |
|
720 |
TGroup(TUint aNumRequests); |
|
721 |
DPagingRequest* FindCollision(DMemoryObject* aMemory, TUint aIndex, TUint aCount); |
|
722 |
DPagingRequest* GetRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount); |
|
723 |
void Signal(DPagingRequest* aRequest); |
|
724 |
public: |
|
725 |
TUint iNumRequests; |
|
726 |
DPagingRequest** iRequests; |
|
727 |
SDblQue iFreeList; |
|
728 |
}; |
|
729 |
TGroup iPageReadRequests; |
|
730 |
TGroup iPageWriteRequests; |
|
731 |
||
732 |
friend class DPagingRequest; |
|
733 |
friend class DPageReadRequest; |
|
734 |
friend class DPageWriteRequest; |
|
735 |
}; |
|
736 |
||
737 |
||
738 |
/** |
|
739 |
Resources needed to service a paging request. |
|
740 |
*/ |
|
741 |
class DPagingRequest : public SDblQueLink |
|
742 |
{ |
|
743 |
public: |
|
744 |
DPagingRequest(DPagingRequestPool::TGroup& aPoolGroup); |
|
745 |
void Release(); |
|
746 |
void Wait(); |
|
747 |
void Signal(); |
|
748 |
void SetUse(DMemoryObject* aMemory, TUint aIndex, TUint aCount); |
|
749 |
TBool CheckUse(DMemoryObject* aMemory, TUint aIndex, TUint aCount); |
|
750 |
TBool IsCollision(DMemoryObject* aMemory, TUint aIndex, TUint aCount); |
|
751 |
TLinAddr MapPages(TUint aColour, TUint aCount, TPhysAddr* aPages); |
|
752 |
void UnmapPages(TBool aIMBRequired); |
|
753 |
public: |
|
754 |
TThreadMessage iMessage; /**< Used by the media driver to queue requests */ |
|
755 |
DMutex* iMutex; /**< A mutex for synchronisation and priority inheritance. */ |
|
756 |
TInt iUsageCount;/**< How many threads are using or waiting for this object. */ |
|
757 |
TLinAddr iBuffer; /**< A buffer to read compressed data into. Size is EMaxPages+1 pages.*/ |
|
758 |
protected: |
|
759 |
Mmu::TTempMapping iTempMapping; |
|
760 |
private: |
|
761 |
DPagingRequestPool::TGroup& iPoolGroup; |
|
762 |
// used to identify memory request is used for... |
|
763 |
DMemoryObject* iUseRegionMemory; |
|
764 |
TUint iUseRegionIndex; |
|
765 |
TUint iUseRegionCount; |
|
766 |
}; |
|
767 |
||
768 |
||
769 |
/** |
|
770 |
Resources needed to service a page in request. |
|
771 |
*/ |
|
772 |
class DPageReadRequest : public DPagingRequest |
|
773 |
{ |
|
774 |
public: |
|
775 |
inline DPageReadRequest(DPagingRequestPool::TGroup& aPoolGroup) |
|
776 |
: DPagingRequest(aPoolGroup) |
|
777 |
{} |
|
778 |
TInt Construct(); |
|
779 |
enum |
|
780 |
{ |
|
781 |
EMaxPages = 4 |
|
782 |
}; |
|
783 |
static TUint ReservedPagesRequired(); |
|
784 |
private: |
|
785 |
~DPageReadRequest(); // can't delete |
|
786 |
public: |
|
787 |
TLinAddr iBuffer; /**< A buffer to read compressed data into. Size is EMaxPages+1 pages.*/ |
|
788 |
private: |
|
789 |
DMemoryObject* iMemory; |
|
790 |
private: |
|
791 |
static TInt iAllocNext; |
|
792 |
}; |
|
793 |
||
794 |
||
795 |
FORCE_INLINE TUint DPageReadRequest::ReservedPagesRequired() |
|
796 |
{ |
|
797 |
return iAllocNext*EMaxPages; |
|
798 |
} |
|
799 |
||
800 |
||
801 |
/** |
|
802 |
Resources needed to service a page out request. |
|
803 |
*/ |
|
804 |
class DPageWriteRequest : public DPagingRequest |
|
805 |
{ |
|
806 |
public: |
|
807 |
inline DPageWriteRequest(DPagingRequestPool::TGroup& aPoolGroup) |
|
808 |
: DPagingRequest(aPoolGroup) |
|
809 |
{} |
|
810 |
TInt Construct(); |
|
811 |
enum |
|
812 |
{ |
|
813 |
EMaxPages = 1 |
|
814 |
}; |
|
815 |
private: |
|
816 |
~DPageWriteRequest(); // can't delete |
|
817 |
private: |
|
818 |
static TInt iAllocNext; |
|
819 |
}; |
|
820 |
||
821 |
||
822 |
#endif |