author | Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com> |
Fri, 17 Sep 2010 08:37:04 +0300 | |
changeset 266 | 0008ccd16016 |
parent 201 | 43365a9b78a3 |
permissions | -rw-r--r-- |
0 | 1 |
// Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies). |
2 |
// All rights reserved. |
|
3 |
// This component and the accompanying materials are made available |
|
4 |
// under the terms of the License "Eclipse Public License v1.0" |
|
5 |
// which accompanies this distribution, and is available |
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 |
// |
|
8 |
// Initial Contributors: |
|
9 |
// Nokia Corporation - initial contribution. |
|
10 |
// |
|
11 |
// Contributors: |
|
12 |
// |
|
13 |
// Description: |
|
14 |
// |
|
15 |
||
16 |
/** |
|
17 |
@file |
|
18 |
@internalComponent |
|
19 |
*/ |
|
20 |
||
21 |
#ifndef __MMU_H__ |
|
22 |
#define __MMU_H__ |
|
23 |
||
24 |
#include "mm.h" |
|
25 |
#include "mmboot.h" |
|
26 |
#include <mmtypes.h> |
|
27 |
#include <kern_priv.h> |
|
28 |
||
29 |
||
30 |
class DCoarseMemory; |
|
31 |
class DMemoryObject; |
|
32 |
class DMemoryMapping; |
|
33 |
||
34 |
/** |
|
35 |
A page information structure giving the current use and state for a |
|
36 |
RAM page being managed by the kernel. |
|
37 |
||
38 |
Any modification to the contents of any SPageInfo structure requires the |
|
39 |
#MmuLock to be held. The exceptions to this is when a page is unused (#Type()==#EUnused), |
|
40 |
in this case only the #RamAllocLock is required to use #SetAllocated(), #SetUncached(), |
|
41 |
and #CacheInvalidateCounter(). |
|
42 |
||
43 |
These structures are stored in an array at the virtual address #KPageInfoLinearBase |
|
44 |
which is indexed by the physical address of the page they are associated with, divided |
|
45 |
by #KPageSize. The memory for this array is allocated by the bootstrap and it has |
|
46 |
unallocated regions where no memory is required to store SPageInfo structures. |
|
47 |
These unallocated memory regions are indicated by zeros in the bitmap stored at |
|
48 |
#KPageInfoMap. |
|
49 |
*/ |
|
50 |
struct SPageInfo |
|
51 |
{ |
|
52 |
/** |
|
53 |
Enumeration for the usage of a RAM page. This is stored in #iType. |
|
54 |
*/ |
|
55 |
enum TType |
|
56 |
{ |
|
57 |
/** |
|
58 |
No physical RAM exists for this page. |
|
59 |
||
60 |
This represents memory which doesn't exist or is not part of the physical |
|
61 |
address range being managed by the kernel. |
|
62 |
*/ |
|
63 |
EInvalid, |
|
64 |
||
65 |
/** |
|
66 |
RAM fixed at boot time. |
|
67 |
||
68 |
This is for memory which was allocated by the bootstrap and which |
|
69 |
the kernel does not actively manage. |
|
70 |
*/ |
|
71 |
EFixed, |
|
72 |
||
73 |
/** |
|
74 |
Page is unused. |
|
75 |
||
76 |
The page is either free memory in Mmu::iRamPageAllocator or the demand |
|
77 |
paging 'live' list. |
|
78 |
||
79 |
To change from or to this type the #RamAllocLock must be held. |
|
80 |
*/ |
|
81 |
EUnused, |
|
82 |
||
83 |
/** |
|
84 |
Page is in an indeterminate state. |
|
85 |
||
86 |
A page is placed into this state by Mmu::PagesAllocated when it is |
|
102
ef2a444a7410
Revision: 201018
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
87 |
allocated (ceases to be #EUnused). Once the page has been assigned to |
ef2a444a7410
Revision: 201018
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
88 |
its new use its type will be updated. |
0 | 89 |
*/ |
90 |
EUnknown, |
|
91 |
||
92 |
/** |
|
93 |
Page was allocated with Mmu::AllocPhysicalRam, Mmu::ClaimPhysicalRam |
|
94 |
or is part of a reserved RAM bank set at system boot. |
|
95 |
*/ |
|
96 |
EPhysAlloc, |
|
97 |
||
98 |
/** |
|
99 |
Page is owned by a memory object. |
|
100 |
||
101 |
#iOwner will point to the owning memory object and #iIndex will |
|
102 |
be the page index into its memory for this page. |
|
103 |
*/ |
|
104 |
EManaged, |
|
105 |
||
106 |
/** |
|
107 |
Page is being used as a shadow page. |
|
108 |
||
109 |
@see DShadowPage. |
|
110 |
*/ |
|
111 |
EShadow |
|
112 |
}; |
|
113 |
||
114 |
||
115 |
/** |
|
116 |
Flags stored in #iFlags. |
|
117 |
||
118 |
The least significant bits of these flags are used for the #TMemoryAttributes |
|
119 |
value for the page. |
|
120 |
*/ |
|
121 |
enum TFlags |
|
122 |
{ |
|
123 |
// lower bits hold TMemoryAttribute value for this page |
|
124 |
||
125 |
/** |
|
126 |
Flag set to indicate that the page has writable mappings. |
|
127 |
(This is to facilitate demand paged memory.) |
|
128 |
*/ |
|
129 |
EWritable = 1<<(EMemoryAttributeShift), |
|
130 |
||
131 |
/** |
|
132 |
Flag set to indicate that the memory page contents may be different |
|
133 |
to those previously saved to backing store (contents are 'dirty'). |
|
134 |
This is set whenever a page gains a writeable mapping and only every |
|
135 |
cleared once a demand paging memory manager 'cleans' the page. |
|
136 |
*/ |
|
137 |
EDirty = 1<<(EMemoryAttributeShift+1) |
|
138 |
}; |
|
139 |
||
140 |
||
141 |
/** |
|
142 |
State for the page when being used to contain demand paged content. |
|
143 |
*/ |
|
144 |
enum TPagedState |
|
145 |
{ |
|
146 |
/** |
|
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
147 |
Page is not being managed for demand paging purposes, or has been transiently |
0 | 148 |
removed from the demand paging live list. |
149 |
*/ |
|
150 |
EUnpaged = 0x0, |
|
151 |
||
152 |
/** |
|
153 |
Page is in the live list as a young page. |
|
154 |
*/ |
|
155 |
EPagedYoung = 0x1, |
|
156 |
||
157 |
/** |
|
158 |
Page is in the live list as an old page. |
|
159 |
*/ |
|
160 |
EPagedOld = 0x2, |
|
161 |
||
162 |
/** |
|
163 |
Page was pinned but it has been moved but not yet freed. |
|
164 |
*/ |
|
165 |
EPagedPinnedMoved = 0x3, |
|
166 |
||
167 |
/** |
|
168 |
Page has been removed from live list to prevent contents being paged-out. |
|
169 |
*/ |
|
170 |
// NOTE - This must be the same value as EStatePagedLocked as defined in mmubase.h |
|
171 |
EPagedPinned = 0x4, |
|
172 |
||
173 |
/** |
|
174 |
Page is in the live list as one of oldest pages that is clean. |
|
175 |
*/ |
|
176 |
EPagedOldestClean = 0x5, |
|
177 |
||
178 |
/** |
|
179 |
Page is in the live list as one of oldest pages that is dirty. |
|
180 |
*/ |
|
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
181 |
EPagedOldestDirty = 0x6 |
0 | 182 |
}; |
183 |
||
184 |
||
185 |
/** |
|
186 |
Additional flags, stored in #iFlags2. |
|
187 |
*/ |
|
188 |
enum TFlags2 |
|
189 |
{ |
|
190 |
/** |
|
191 |
When #iPagedState==#EPagedPinned this indicates the page is a 'reserved' page |
|
192 |
and is does not increase free page count when returned to the live list. |
|
193 |
*/ |
|
194 |
EPinnedReserve = 1<<0, |
|
195 |
}; |
|
196 |
||
197 |
private: |
|
198 |
/** |
|
199 |
Value from enum #TType, returned by #Type(). |
|
200 |
*/ |
|
201 |
TUint8 iType; |
|
202 |
||
203 |
/** |
|
204 |
Bitmask of values from #TFlags, returned by #Flags(). |
|
205 |
*/ |
|
206 |
TUint8 iFlags; |
|
207 |
||
208 |
/** |
|
209 |
Value from enum #TPagedState, returned by #PagedState(). |
|
210 |
*/ |
|
211 |
TUint8 iPagedState; |
|
212 |
||
213 |
/** |
|
214 |
Bitmask of values from #TFlags2. |
|
215 |
*/ |
|
216 |
TUint8 iFlags2; |
|
217 |
||
218 |
union |
|
219 |
{ |
|
220 |
/** |
|
221 |
The memory object which owns this page. |
|
102
ef2a444a7410
Revision: 201018
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
222 |
Always set for #EManaged pages and can be set for #PhysAlloc pages. |
0 | 223 |
*/ |
224 |
DMemoryObject* iOwner; |
|
225 |
||
226 |
/** |
|
227 |
A pointer to the SPageInfo of the page that is being shadowed. |
|
228 |
For use with #EShadow pages only. |
|
229 |
*/ |
|
230 |
SPageInfo* iOriginalPageInfo; |
|
231 |
}; |
|
232 |
||
233 |
/** |
|
234 |
The index for this page within within the owning object's (#iOwner) memory. |
|
235 |
*/ |
|
236 |
TUint32 iIndex; |
|
237 |
||
238 |
/** |
|
239 |
Pointer identifying the current modifier of the page. See #SetModifier. |
|
240 |
*/ |
|
241 |
TAny* iModifier; |
|
242 |
||
243 |
/** |
|
244 |
Storage location for data specific to the memory manager object handling this page. |
|
245 |
See #SetPagingManagerData. |
|
246 |
*/ |
|
247 |
TUint32 iPagingManagerData; |
|
248 |
||
249 |
/** |
|
250 |
Union of values which vary depending of the current value of #iType. |
|
251 |
*/ |
|
252 |
union |
|
253 |
{ |
|
254 |
/** |
|
255 |
When #iType==#EPhysAlloc, this stores a count of the number of memory objects |
|
256 |
this page has been added to. |
|
257 |
*/ |
|
258 |
TUint32 iUseCount; |
|
259 |
||
260 |
/** |
|
261 |
When #iType==#EUnused, this stores the value of Mmu::iCacheInvalidateCounter |
|
262 |
at the time the page was freed. This is used for some cache maintenance optimisations. |
|
263 |
*/ |
|
264 |
TUint32 iCacheInvalidateCounter; |
|
265 |
||
266 |
/** |
|
267 |
When #iType==#EManaged, this holds the count of the number of times the page was pinned. |
|
268 |
This will only be non-zero for demand paged memory. |
|
269 |
*/ |
|
270 |
TUint32 iPinCount; |
|
271 |
}; |
|
272 |
||
273 |
public: |
|
274 |
/** |
|
275 |
Used for placing page into linked lists. E.g. the various demand paging live lists. |
|
276 |
*/ |
|
277 |
SDblQueLink iLink; |
|
278 |
||
279 |
public: |
|
280 |
/** |
|
281 |
Return the SPageInfo for a given page of physical RAM. |
|
282 |
*/ |
|
283 |
static SPageInfo* FromPhysAddr(TPhysAddr aAddress); |
|
284 |
||
285 |
/** |
|
286 |
Return physical address of the RAM page which this SPageInfo object is associated. |
|
287 |
If the address has no SPageInfo, then a null pointer is returned. |
|
288 |
*/ |
|
289 |
static SPageInfo* SafeFromPhysAddr(TPhysAddr aAddress); |
|
290 |
||
291 |
/** |
|
292 |
Return physical address of the RAM page which this SPageInfo object is associated. |
|
293 |
*/ |
|
294 |
FORCE_INLINE TPhysAddr PhysAddr(); |
|
295 |
||
296 |
/** |
|
297 |
Return a SPageInfo by conversion from the address of its embedded link member #iLink. |
|
298 |
*/ |
|
299 |
FORCE_INLINE static SPageInfo* FromLink(SDblQueLink* aLink) |
|
300 |
{ |
|
102
ef2a444a7410
Revision: 201018
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
301 |
return _LOFF(aLink, SPageInfo, iLink); |
0 | 302 |
} |
303 |
||
304 |
// |
|
305 |
// Getters... |
|
306 |
// |
|
307 |
||
308 |
/** |
|
309 |
Return the current #TType value stored in #iType. |
|
310 |
@pre #MmuLock held. |
|
311 |
*/ |
|
312 |
FORCE_INLINE TType Type() |
|
313 |
{ |
|
314 |
CheckAccess("Type"); |
|
315 |
return (TType)iType; |
|
316 |
} |
|
317 |
||
318 |
/** |
|
319 |
Return the current value of #iFlags. |
|
320 |
@pre #MmuLock held (if \a aNoCheck false). |
|
321 |
*/ |
|
322 |
FORCE_INLINE TUint Flags(TBool aNoCheck=false) |
|
323 |
{ |
|
324 |
if(!aNoCheck) |
|
325 |
CheckAccess("Flags"); |
|
326 |
return iFlags; |
|
327 |
} |
|
328 |
||
329 |
/** |
|
330 |
Return the current value of #iPagedState. |
|
331 |
@pre #MmuLock held. |
|
332 |
*/ |
|
333 |
FORCE_INLINE TPagedState PagedState() |
|
334 |
{ |
|
335 |
CheckAccess("PagedState"); |
|
336 |
return (TPagedState)iPagedState; |
|
337 |
} |
|
338 |
||
339 |
/** |
|
340 |
Return the current value of #iOwner. |
|
341 |
@pre #MmuLock held. |
|
342 |
*/ |
|
343 |
FORCE_INLINE DMemoryObject* Owner() |
|
344 |
{ |
|
345 |
CheckAccess("Owner"); |
|
346 |
return iOwner; |
|
347 |
} |
|
348 |
||
349 |
/** |
|
350 |
Return the current value of #iIndex. |
|
351 |
@pre #MmuLock held (if \a aNoCheck false). |
|
352 |
*/ |
|
353 |
FORCE_INLINE TUint32 Index(TBool aNoCheck=false) |
|
354 |
{ |
|
355 |
if(!aNoCheck) |
|
356 |
CheckAccess("Index"); |
|
357 |
return iIndex; |
|
358 |
} |
|
359 |
||
360 |
/** |
|
361 |
Return the current value of #iModifier. |
|
362 |
@pre #MmuLock held (if \a aNoCheck false). |
|
363 |
*/ |
|
364 |
FORCE_INLINE TAny* Modifier() |
|
365 |
{ |
|
366 |
CheckAccess("Modifier"); |
|
367 |
return iModifier; |
|
368 |
} |
|
369 |
||
370 |
||
371 |
// |
|
372 |
// Setters.. |
|
373 |
// |
|
374 |
||
375 |
/** |
|
376 |
Set this page as type #EFixed. |
|
377 |
This is only used during boot by Mmu::Init2Common. |
|
378 |
*/ |
|
379 |
inline void SetFixed(TUint32 aIndex=0) |
|
380 |
{ |
|
381 |
CheckAccess("SetFixed"); |
|
382 |
Set(EFixed,0,aIndex); |
|
383 |
} |
|
384 |
||
385 |
/** |
|
386 |
Set this page as type #EUnused. |
|
387 |
||
388 |
@pre #MmuLock held. |
|
389 |
@pre #RamAllocLock held if previous page type != #EUnknown. |
|
390 |
||
391 |
@post #iModifier==0 to indicate that page usage has changed. |
|
392 |
*/ |
|
393 |
inline void SetUnused() |
|
394 |
{ |
|
395 |
CheckAccess("SetUnused",ECheckNotUnused|((iType!=EUnknown)?(TInt)ECheckRamAllocLock:0)); |
|
396 |
iType = EUnused; |
|
397 |
iModifier = 0; |
|
398 |
// do not modify iFlags or iIndex in this function because page allocating cache cleaning operations rely on using this value |
|
399 |
} |
|
400 |
||
401 |
/** |
|
402 |
Set this page as type #EUnknown. |
|
403 |
This is only used by Mmu::PagesAllocated. |
|
404 |
||
405 |
@pre #RamAllocLock held. |
|
406 |
||
407 |
@post #iModifier==0 to indicate that page usage has changed. |
|
408 |
*/ |
|
409 |
inline void SetAllocated() |
|
410 |
{ |
|
411 |
CheckAccess("SetAllocated",ECheckUnused|ECheckRamAllocLock|ENoCheckMmuLock); |
|
412 |
iType = EUnknown; |
|
413 |
iModifier = 0; |
|
414 |
// do not modify iFlags or iIndex in this function because cache cleaning operations rely on using this value |
|
415 |
} |
|
416 |
||
417 |
/** |
|
418 |
Set this page as type #EPhysAlloc. |
|
419 |
@param aOwner Optional value for #iOwner. |
|
420 |
@param aIndex Optional value for #iIndex. |
|
421 |
||
422 |
@pre #MmuLock held. |
|
423 |
||
424 |
@post #iModifier==0 to indicate that page usage has changed. |
|
425 |
*/ |
|
426 |
inline void SetPhysAlloc(DMemoryObject* aOwner=0, TUint32 aIndex=0) |
|
427 |
{ |
|
428 |
CheckAccess("SetPhysAlloc"); |
|
429 |
Set(EPhysAlloc,aOwner,aIndex); |
|
430 |
iUseCount = 0; |
|
431 |
} |
|
432 |
||
433 |
/** |
|
434 |
Set this page as type #EManaged. |
|
435 |
||
436 |
@param aOwner Value for #iOwner. |
|
437 |
@param aIndex Value for #iIndex. |
|
438 |
@param aFlags Value for #iFlags (aOwner->PageInfoFlags()). |
|
439 |
||
440 |
@pre #MmuLock held. |
|
441 |
||
442 |
@post #iModifier==0 to indicate that page usage has changed. |
|
443 |
*/ |
|
444 |
inline void SetManaged(DMemoryObject* aOwner, TUint32 aIndex, TUint8 aFlags) |
|
445 |
{ |
|
446 |
CheckAccess("SetManaged"); |
|
447 |
Set(EManaged,aOwner,aIndex); |
|
448 |
iFlags = aFlags; |
|
449 |
iPinCount = 0; |
|
450 |
} |
|
451 |
||
452 |
/** |
|
453 |
Set this page as type #EShadow. |
|
454 |
||
455 |
This is for use by #DShadowPage. |
|
456 |
||
457 |
@param aIndex Value for #iIndex. |
|
458 |
@param aFlags Value for #iFlags. |
|
459 |
||
460 |
@pre #MmuLock held. |
|
461 |
||
462 |
@post #iModifier==0 to indicate that page usage has changed. |
|
463 |
*/ |
|
464 |
inline void SetShadow(TUint32 aIndex, TUint8 aFlags) |
|
465 |
{ |
|
466 |
CheckAccess("SetShadow"); |
|
467 |
Set(EShadow,0,aIndex); |
|
468 |
iFlags = aFlags; |
|
469 |
} |
|
470 |
||
471 |
/** |
|
472 |
Store a pointer to the SPageInfo of the page that this page is shadowing. |
|
473 |
||
474 |
@param aOrigPageInfo Pointer to the SPageInfo that this page is shadowing |
|
475 |
||
476 |
@pre #MmuLock held. |
|
477 |
*/ |
|
478 |
inline void SetOriginalPage(SPageInfo* aOrigPageInfo) |
|
479 |
{ |
|
480 |
CheckAccess("SetOriginalPage"); |
|
481 |
__NK_ASSERT_DEBUG(iType == EShadow); |
|
482 |
__NK_ASSERT_DEBUG(!iOriginalPageInfo); |
|
483 |
iOriginalPageInfo = aOrigPageInfo; |
|
484 |
} |
|
485 |
||
486 |
/** |
|
102
ef2a444a7410
Revision: 201018
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
487 |
Returns a pointer to the SPageInfo of the page that this page is shadowing. |
0 | 488 |
|
489 |
@return A pointer to the SPageInfo that this page is shadowing |
|
490 |
||
491 |
@pre #MmuLock held. |
|
492 |
*/ |
|
493 |
inline SPageInfo* GetOriginalPage() |
|
494 |
{ |
|
495 |
CheckAccess("GetOriginalPage"); |
|
496 |
__NK_ASSERT_DEBUG(iType == EShadow); |
|
497 |
__NK_ASSERT_DEBUG(iOriginalPageInfo); |
|
498 |
return iOriginalPageInfo; |
|
499 |
} |
|
500 |
||
501 |
||
502 |
private: |
|
503 |
/** Internal implementation factor for methods which set page type. */ |
|
504 |
FORCE_INLINE void Set(TType aType, DMemoryObject* aOwner, TUint32 aIndex) |
|
505 |
{ |
|
506 |
CheckAccess("Set",ECheckNotAllocated|ECheckNotPaged); |
|
507 |
(TUint32&)iType = aType; // also clears iFlags, iFlags2 and iPagedState |
|
508 |
iOwner = aOwner; |
|
509 |
iIndex = aIndex; |
|
510 |
iModifier = 0; |
|
511 |
} |
|
512 |
||
513 |
public: |
|
514 |
||
515 |
||
516 |
// |
|
517 |
// |
|
518 |
// |
|
519 |
||
520 |
/** |
|
521 |
Set #iFlags to indicate that the contents of this page have been removed from |
|
522 |
any caches. |
|
523 |
||
524 |
@pre #MmuLock held if #iType!=#EUnused, #RamAllocLock held if #iType==#EUnused. |
|
525 |
*/ |
|
526 |
FORCE_INLINE void SetUncached() |
|
527 |
{ |
|
528 |
CheckAccess("SetUncached",iType==EUnused ? ECheckRamAllocLock|ENoCheckMmuLock : 0); |
|
529 |
__NK_ASSERT_DEBUG(iType==EUnused || (iType==EPhysAlloc && iUseCount==0)); |
|
530 |
iFlags = EMemAttNormalUncached; |
|
531 |
} |
|
532 |
||
533 |
/** |
|
534 |
Set memory attributes and colour for a page of type #EPhysAlloc. |
|
535 |
||
536 |
This is set the first time a page of type #EPhysAlloc is added to a memory |
|
537 |
object with DMemoryManager::AddPages or DMemoryManager::AddContiguous. |
|
538 |
The set values are used to check constraints are met if the page is |
|
539 |
also added to other memory objects. |
|
540 |
||
541 |
@param aIndex The page index within a memory object at which this page |
|
542 |
has been added. This is stored in #iIndex and used to determine |
|
543 |
the page's 'colour'. |
|
544 |
@param aFlags Value for #iFlags. This sets the memory attributes for the page. |
|
545 |
||
546 |
@post #iModifier==0 to indicate that page usage has changed. |
|
547 |
*/ |
|
548 |
inline void SetMapped(TUint32 aIndex, TUint aFlags) |
|
549 |
{ |
|
550 |
CheckAccess("SetMapped"); |
|
551 |
__NK_ASSERT_DEBUG(iType==EPhysAlloc); |
|
552 |
__NK_ASSERT_DEBUG(iUseCount==0); // check page not already added to an object |
|
553 |
iIndex = aIndex; |
|
554 |
iFlags = aFlags; |
|
555 |
iModifier = 0; |
|
556 |
} |
|
557 |
||
558 |
/** |
|
559 |
Set #iPagedState |
|
560 |
||
561 |
@pre #MmuLock held. |
|
562 |
||
563 |
@post #iModifier==0 to indicate that page state has changed. |
|
564 |
*/ |
|
565 |
FORCE_INLINE void SetPagedState(TPagedState aPagedState) |
|
566 |
{ |
|
567 |
CheckAccess("SetPagedState"); |
|
568 |
__NK_ASSERT_DEBUG(aPagedState==iPagedState || iPagedState!=EPagedPinned || iPinCount==0); // make sure don't set an unpinned state if iPinCount!=0 |
|
569 |
iPagedState = aPagedState; |
|
570 |
iModifier = 0; |
|
571 |
} |
|
572 |
||
573 |
/** |
|
201
43365a9b78a3
Revision: 201027
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
152
diff
changeset
|
574 |
Mark this page as an oldest old page. |
43365a9b78a3
Revision: 201027
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
152
diff
changeset
|
575 |
|
43365a9b78a3
Revision: 201027
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
152
diff
changeset
|
576 |
This does not mark the object as modified as conceptually it's still an oldest page. This means |
43365a9b78a3
Revision: 201027
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
152
diff
changeset
|
577 |
that if a page goes from young -> old -> oldest the second transition will not interrupt the |
43365a9b78a3
Revision: 201027
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
152
diff
changeset
|
578 |
page restriction that happens on the first. |
43365a9b78a3
Revision: 201027
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
152
diff
changeset
|
579 |
|
43365a9b78a3
Revision: 201027
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
152
diff
changeset
|
580 |
@pre #MmuLock held. |
43365a9b78a3
Revision: 201027
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
152
diff
changeset
|
581 |
*/ |
43365a9b78a3
Revision: 201027
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
152
diff
changeset
|
582 |
FORCE_INLINE void SetOldestPage(TPagedState aPagedState) |
43365a9b78a3
Revision: 201027
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
152
diff
changeset
|
583 |
{ |
43365a9b78a3
Revision: 201027
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
152
diff
changeset
|
584 |
CheckAccess("SetPagedState"); |
43365a9b78a3
Revision: 201027
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
152
diff
changeset
|
585 |
__NK_ASSERT_DEBUG(iPagedState==EPagedOld); |
43365a9b78a3
Revision: 201027
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
152
diff
changeset
|
586 |
__NK_ASSERT_DEBUG(aPagedState==EPagedOldestClean || aPagedState==EPagedOldestDirty); |
43365a9b78a3
Revision: 201027
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
152
diff
changeset
|
587 |
iPagedState = aPagedState; |
43365a9b78a3
Revision: 201027
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
152
diff
changeset
|
588 |
} |
43365a9b78a3
Revision: 201027
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
152
diff
changeset
|
589 |
|
43365a9b78a3
Revision: 201027
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
152
diff
changeset
|
590 |
/** |
102
ef2a444a7410
Revision: 201018
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
591 |
Set the page's #iModifier value. |
0 | 592 |
|
593 |
#iModifier is cleared to zero whenever the usage or paging state of the page |
|
594 |
changes. So if a thread sets this to a suitable unique value (e.g. the address |
|
595 |
of a local variable) then it may perform a long running operation on the page |
|
596 |
and later check with #CheckModified that no other thread has changed the page |
|
597 |
state or used SetModifier in the intervening time. |
|
598 |
Example. |
|
599 |
||
600 |
@code |
|
601 |
TInt anyLocalVariable; // arbitrary local variable |
|
602 |
||
603 |
MmuLock::Lock(); |
|
604 |
SPageInfo* thePageInfo = GetAPage(); |
|
605 |
thePageInfo->SetModifier(&anyLocalVariable); // use &anyLocalVariable as value unique to this thread |
|
606 |
MmuLock::Unlock(); |
|
607 |
||
608 |
DoOperation(thePageInfo); |
|
609 |
||
610 |
MmuLock::Lock(); |
|
611 |
TInt r; |
|
612 |
if(!thePageInfo->CheckModified(&anyLocalVariable)); |
|
613 |
{ |
|
614 |
// nobody else touched the page... |
|
615 |
OperationSucceeded(thePageInfo); |
|
616 |
r = KErrNone; |
|
617 |
} |
|
618 |
else |
|
619 |
{ |
|
620 |
// somebody else changed our page... |
|
621 |
OperationInterrupted(thePageInfo); |
|
622 |
r = KErrAbort; |
|
623 |
} |
|
624 |
MmuLock::Unlock(); |
|
625 |
||
626 |
return r; |
|
627 |
@endcode |
|
628 |
||
629 |
@pre #MmuLock held. |
|
630 |
*/ |
|
631 |
FORCE_INLINE void SetModifier(TAny* aModifier) |
|
632 |
{ |
|
633 |
CheckAccess("SetModifier"); |
|
634 |
iModifier = aModifier; |
|
635 |
} |
|
636 |
||
637 |
/** |
|
638 |
Return true if the #iModifier value does not match a specified value. |
|
639 |
||
640 |
@param aModifier A 'modifier' value previously set with #SetModifier. |
|
641 |
||
642 |
@pre #MmuLock held. |
|
643 |
||
644 |
@see SetModifier. |
|
645 |
*/ |
|
646 |
FORCE_INLINE TBool CheckModified(TAny* aModifier) |
|
647 |
{ |
|
648 |
CheckAccess("CheckModified"); |
|
649 |
return iModifier!=aModifier; |
|
650 |
} |
|
651 |
||
652 |
/** |
|
653 |
Flag this page as having Page Table Entries which give writeable access permissions. |
|
654 |
This sets flags #EWritable and #EDirty. |
|
655 |
||
656 |
@pre #MmuLock held. |
|
657 |
*/ |
|
658 |
FORCE_INLINE void SetWritable() |
|
659 |
{ |
|
660 |
CheckAccess("SetWritable"); |
|
661 |
// This should only be invoked on paged pages. |
|
662 |
__NK_ASSERT_DEBUG(PagedState() != EUnpaged); |
|
663 |
iFlags |= EWritable; |
|
664 |
SetDirty(); |
|
665 |
} |
|
666 |
||
667 |
/** |
|
668 |
Flag this page as having no longer having any Page Table Entries which give writeable |
|
669 |
access permissions. |
|
670 |
This clears the flag #EWritable. |
|
671 |
||
672 |
@pre #MmuLock held. |
|
673 |
*/ |
|
674 |
FORCE_INLINE void SetReadOnly() |
|
675 |
{ |
|
676 |
CheckAccess("SetReadOnly"); |
|
677 |
iFlags &= ~EWritable; |
|
678 |
} |
|
679 |
||
680 |
/** |
|
681 |
Returns true if #SetWritable has been called without a subsequent #SetReadOnly. |
|
682 |
This returns the flag #EWritable. |
|
683 |
||
684 |
@pre #MmuLock held. |
|
685 |
*/ |
|
686 |
FORCE_INLINE TBool IsWritable() |
|
687 |
{ |
|
688 |
CheckAccess("IsWritable"); |
|
689 |
return iFlags&EWritable; |
|
690 |
} |
|
691 |
||
692 |
/** |
|
693 |
Flag this page as 'dirty', indicating that its contents may no longer match those saved |
|
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
694 |
to a backing store. This sets the flag #EDirty. |
0 | 695 |
|
696 |
This is used in the management of demand paged memory. |
|
697 |
||
698 |
@pre #MmuLock held. |
|
699 |
*/ |
|
700 |
FORCE_INLINE void SetDirty() |
|
701 |
{ |
|
702 |
CheckAccess("SetDirty"); |
|
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
703 |
__NK_ASSERT_DEBUG(IsWritable()); |
0 | 704 |
iFlags |= EDirty; |
705 |
} |
|
706 |
||
707 |
/** |
|
708 |
Flag this page as 'clean', indicating that its contents now match those saved |
|
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
709 |
to a backing store. This clears the flag #EDirty. |
0 | 710 |
|
711 |
This is used in the management of demand paged memory. |
|
712 |
||
713 |
@pre #MmuLock held. |
|
714 |
*/ |
|
715 |
FORCE_INLINE void SetClean() |
|
716 |
{ |
|
717 |
CheckAccess("SetClean"); |
|
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
718 |
__NK_ASSERT_DEBUG(!IsWritable()); |
0 | 719 |
iFlags &= ~EDirty; |
720 |
} |
|
721 |
||
722 |
/** |
|
723 |
Return the #EDirty flag. See #SetDirty and #SetClean. |
|
724 |
||
725 |
This is used in the management of demand paged memory. |
|
726 |
||
727 |
@pre #MmuLock held. |
|
728 |
*/ |
|
729 |
FORCE_INLINE TBool IsDirty() |
|
730 |
{ |
|
731 |
CheckAccess("IsDirty"); |
|
732 |
return iFlags&EDirty; |
|
733 |
} |
|
734 |
||
735 |
||
736 |
// |
|
737 |
// Type specific... |
|
738 |
// |
|
739 |
||
740 |
/** |
|
741 |
Set #iCacheInvalidateCounter to the specified value. |
|
742 |
||
743 |
@pre #MmuLock held. |
|
744 |
@pre #iType==#EUnused. |
|
745 |
*/ |
|
746 |
void SetCacheInvalidateCounter(TUint32 aCacheInvalidateCounter) |
|
747 |
{ |
|
748 |
CheckAccess("SetCacheInvalidateCounter"); |
|
749 |
__NK_ASSERT_DEBUG(iType==EUnused); |
|
750 |
iCacheInvalidateCounter = aCacheInvalidateCounter; |
|
751 |
} |
|
752 |
||
753 |
/** |
|
754 |
Return #iCacheInvalidateCounter. |
|
755 |
||
756 |
@pre #MmuLock held. |
|
757 |
@pre #iType==#EUnused. |
|
758 |
*/ |
|
759 |
TUint32 CacheInvalidateCounter() |
|
760 |
{ |
|
761 |
CheckAccess("CacheInvalidateCounter",ECheckRamAllocLock|ENoCheckMmuLock); |
|
762 |
__NK_ASSERT_DEBUG(iType==EUnused); |
|
763 |
return iCacheInvalidateCounter; |
|
764 |
} |
|
765 |
||
766 |
/** |
|
767 |
Increment #iUseCount to indicate that the page has been added to a memory object. |
|
768 |
||
769 |
@return New value of #iUseCount. |
|
770 |
||
771 |
@pre #MmuLock held. |
|
772 |
@pre #iType==#EPhysAlloc. |
|
773 |
*/ |
|
774 |
TUint32 IncUseCount() |
|
775 |
{ |
|
776 |
CheckAccess("IncUseCount"); |
|
777 |
__NK_ASSERT_DEBUG(iType==EPhysAlloc); |
|
778 |
return ++iUseCount; |
|
779 |
} |
|
780 |
||
781 |
/** |
|
782 |
Decrement #iUseCount to indicate that the page has been removed from a memory object. |
|
783 |
||
784 |
@return New value of #iUseCount. |
|
785 |
||
786 |
@pre #MmuLock held. |
|
787 |
@pre #iType==#EPhysAlloc. |
|
788 |
*/ |
|
789 |
TUint32 DecUseCount() |
|
790 |
{ |
|
791 |
CheckAccess("DecUseCount"); |
|
792 |
__NK_ASSERT_DEBUG(iType==EPhysAlloc); |
|
793 |
__NK_ASSERT_DEBUG(iUseCount); |
|
794 |
return --iUseCount; |
|
795 |
} |
|
796 |
||
797 |
/** |
|
798 |
Return #iUseCount, this indicates the number of times the page has been added to memory object(s). |
|
799 |
||
800 |
@return #iUseCount. |
|
801 |
||
802 |
@pre #MmuLock held. |
|
803 |
@pre #iType==#EPhysAlloc. |
|
804 |
*/ |
|
805 |
TUint32 UseCount() |
|
806 |
{ |
|
807 |
CheckAccess("UseCount"); |
|
808 |
__NK_ASSERT_DEBUG(iType==EPhysAlloc); |
|
809 |
return iUseCount; |
|
810 |
} |
|
811 |
||
812 |
/** |
|
813 |
Increment #iPinCount to indicate that a mapping has pinned this page. |
|
814 |
This is only done for demand paged memory; unpaged memory does not have |
|
815 |
#iPinCount updated when it is pinned. |
|
816 |
||
817 |
@return New value of #iPinCount. |
|
818 |
||
819 |
@pre #MmuLock held. |
|
820 |
@pre #iType==#EManaged. |
|
821 |
*/ |
|
822 |
TUint32 IncPinCount() |
|
823 |
{ |
|
824 |
CheckAccess("IncPinCount"); |
|
825 |
__NK_ASSERT_DEBUG(iType==EManaged); |
|
826 |
return ++iPinCount; |
|
827 |
} |
|
828 |
||
829 |
/** |
|
830 |
Decrement #iPinCount to indicate that a mapping which was pinning this page has been removed. |
|
831 |
This is only done for demand paged memory; unpaged memory does not have |
|
832 |
#iPinCount updated when it is unpinned. |
|
833 |
||
834 |
@return New value of #iPinCount. |
|
835 |
||
836 |
@pre #MmuLock held. |
|
837 |
@pre #iType==#EManaged. |
|
838 |
*/ |
|
839 |
TUint32 DecPinCount() |
|
840 |
{ |
|
841 |
CheckAccess("DecPinCount"); |
|
842 |
__NK_ASSERT_DEBUG(iType==EManaged); |
|
843 |
__NK_ASSERT_DEBUG(iPinCount); |
|
844 |
return --iPinCount; |
|
845 |
} |
|
846 |
||
847 |
/** |
|
848 |
Clear #iPinCount to zero as this page is no longer being used for the |
|
849 |
pinned page. |
|
850 |
This is only done for demand paged memory; unpaged memory does not have |
|
851 |
#iPinCount set. |
|
852 |
||
853 |
@pre #MmuLock held. |
|
854 |
@pre #iType==#EManaged. |
|
855 |
*/ |
|
856 |
void ClearPinCount() |
|
857 |
{ |
|
858 |
CheckAccess("ClearPinCount"); |
|
859 |
__NK_ASSERT_DEBUG(iType==EManaged); |
|
860 |
__NK_ASSERT_DEBUG(iPinCount); |
|
861 |
iPinCount = 0; |
|
862 |
} |
|
863 |
||
864 |
/** |
|
865 |
Return #iPinCount which indicates the number of mappings that have pinned this page. |
|
866 |
This is only valid for demand paged memory; unpaged memory does not have |
|
867 |
#iPinCount updated when it is pinned. |
|
868 |
||
869 |
@return #iPinCount. |
|
870 |
||
871 |
@pre #MmuLock held. |
|
872 |
@pre #iType==#EManaged. |
|
873 |
*/ |
|
874 |
TUint32 PinCount() |
|
875 |
{ |
|
876 |
CheckAccess("PinCount"); |
|
877 |
__NK_ASSERT_DEBUG(iType==EManaged); |
|
878 |
return iPinCount; |
|
879 |
} |
|
880 |
||
881 |
/** |
|
882 |
Set the #EPinnedReserve flag. |
|
883 |
@pre #MmuLock held. |
|
884 |
@see EPinnedReserve. |
|
885 |
*/ |
|
886 |
void SetPinnedReserve() |
|
887 |
{ |
|
888 |
CheckAccess("SetPinnedReserve"); |
|
889 |
iFlags2 |= EPinnedReserve; |
|
890 |
} |
|
891 |
||
892 |
/** |
|
893 |
Clear the #EPinnedReserve flag. |
|
894 |
@pre #MmuLock held. |
|
895 |
@see EPinnedReserve. |
|
896 |
*/ |
|
897 |
TBool ClearPinnedReserve() |
|
898 |
{ |
|
899 |
CheckAccess("ClearPinnedReserve"); |
|
900 |
TUint oldFlags2 = iFlags2; |
|
901 |
iFlags2 = oldFlags2&~EPinnedReserve; |
|
902 |
return oldFlags2&EPinnedReserve; |
|
903 |
} |
|
904 |
||
905 |
/** |
|
906 |
Set #iPagingManagerData to the specified value. |
|
907 |
@pre #MmuLock held. |
|
908 |
@pre #iType==#EManaged. |
|
909 |
*/ |
|
910 |
void SetPagingManagerData(TUint32 aPagingManagerData) |
|
911 |
{ |
|
912 |
CheckAccess("SetPagingManagerData"); |
|
913 |
__NK_ASSERT_DEBUG(iType==EManaged); |
|
914 |
iPagingManagerData = aPagingManagerData; |
|
915 |
} |
|
916 |
||
917 |
/** |
|
918 |
Return #iPagingManagerData. |
|
919 |
@pre #MmuLock held. |
|
920 |
@pre #iType==#EManaged. |
|
921 |
*/ |
|
922 |
TUint32 PagingManagerData() |
|
923 |
{ |
|
924 |
CheckAccess("PagingManagerData"); |
|
925 |
__NK_ASSERT_DEBUG(iType==EManaged); |
|
926 |
return iPagingManagerData; |
|
927 |
} |
|
928 |
||
929 |
// |
|
930 |
// Debug... |
|
931 |
// |
|
932 |
||
933 |
private: |
|
934 |
enum TChecks |
|
935 |
{ |
|
936 |
ECheckNotAllocated = 1<<0, |
|
937 |
ECheckNotUnused = 1<<1, |
|
938 |
ECheckUnused = 1<<2, |
|
939 |
ECheckNotPaged = 1<<3, |
|
940 |
ECheckRamAllocLock = 1<<4, |
|
941 |
ENoCheckMmuLock = 1<<5 |
|
942 |
}; |
|
943 |
#ifdef _DEBUG |
|
944 |
void CheckAccess(const char* aMessage, TUint aFlags=0); |
|
945 |
#else |
|
946 |
FORCE_INLINE void CheckAccess(const char* /*aMessage*/, TUint /*aFlags*/=0) |
|
947 |
{} |
|
948 |
#endif |
|
949 |
||
950 |
public: |
|
951 |
#ifdef _DEBUG |
|
952 |
/** |
|
953 |
Debug function which outputs the contents of this object to the kernel debug port. |
|
954 |
*/ |
|
955 |
void Dump(); |
|
956 |
#else |
|
957 |
FORCE_INLINE void Dump() |
|
958 |
{} |
|
959 |
#endif |
|
960 |
}; |
|
961 |
||
962 |
||
963 |
const TInt KPageInfosPerPageShift = KPageShift-KPageInfoShift; |
|
964 |
const TInt KPageInfosPerPage = 1<<KPageInfosPerPageShift; |
|
965 |
const TInt KNumPageInfoPagesShift = 32-KPageShift-KPageInfosPerPageShift; |
|
966 |
const TInt KNumPageInfoPages = 1<<KNumPageInfoPagesShift; |
|
967 |
||
968 |
FORCE_INLINE SPageInfo* SPageInfo::FromPhysAddr(TPhysAddr aAddress) |
|
969 |
{ |
|
970 |
return ((SPageInfo*)KPageInfoLinearBase)+(aAddress>>KPageShift); |
|
971 |
} |
|
972 |
||
973 |
FORCE_INLINE TPhysAddr SPageInfo::PhysAddr() |
|
974 |
{ |
|
975 |
return ((TPhysAddr)this)<<KPageInfosPerPageShift; |
|
976 |
} |
|
977 |
||
978 |
||
979 |
||
980 |
/** |
|
981 |
A page table information structure giving the current use and state for a |
|
982 |
page table. |
|
983 |
*/ |
|
984 |
struct SPageTableInfo |
|
985 |
{ |
|
986 |
public: |
|
987 |
||
988 |
/** |
|
989 |
Enumeration for the usage of a page table. This is stored in #iType. |
|
990 |
*/ |
|
991 |
enum TType |
|
992 |
{ |
|
993 |
/** |
|
994 |
Page table is unused (implementation assumes this enumeration == 0). |
|
995 |
@see #iUnused and #SPageTableInfo::TUnused. |
|
996 |
*/ |
|
997 |
EUnused=0, |
|
998 |
||
999 |
/** |
|
1000 |
Page table has undetermined use. |
|
1001 |
(Either created by the bootstrap or is newly allocated but not yet assigned.) |
|
1002 |
*/ |
|
1003 |
EUnknown=1, |
|
1004 |
||
1005 |
/** |
|
1006 |
Page table is being used by a coarse memory object. |
|
1007 |
@see #iCoarse and #SPageTableInfo::TCoarse. |
|
1008 |
*/ |
|
1009 |
ECoarseMapping=2, |
|
1010 |
||
1011 |
/** |
|
1012 |
Page table is being used for fine mappings. |
|
1013 |
@see #iFine and #SPageTableInfo::TFine. |
|
1014 |
*/ |
|
1015 |
EFineMapping=3 |
|
1016 |
}; |
|
1017 |
||
1018 |
private: |
|
1019 |
||
1020 |
/** |
|
1021 |
Flags stored in #iFlags. |
|
1022 |
*/ |
|
1023 |
enum TFlags |
|
1024 |
{ |
|
1025 |
/** |
|
1026 |
Page table if for mapping demand paged content. |
|
1027 |
*/ |
|
1028 |
EDemandPaged = 1<<0, |
|
1029 |
/** |
|
1030 |
Page table is in Page Table Allocator's cleanup list |
|
1031 |
(only set for first page table in a RAM page) |
|
1032 |
*/ |
|
1033 |
EOnCleanupList = 1<<1, |
|
1034 |
/** |
|
1035 |
The page table cluster that this page table info refers to is currently allocated. |
|
1036 |
*/ |
|
1037 |
EPtClusterAllocated = 1<<2 |
|
1038 |
}; |
|
1039 |
||
1040 |
/** |
|
1041 |
Value from enum #TType. |
|
1042 |
*/ |
|
1043 |
TUint8 iType; |
|
1044 |
||
1045 |
/** |
|
1046 |
Bitmask of values from #TFlags. |
|
1047 |
*/ |
|
1048 |
TUint8 iFlags; |
|
1049 |
||
1050 |
/** |
|
1051 |
Spare member used for padding. |
|
1052 |
*/ |
|
1053 |
TUint16 iSpare2; |
|
1054 |
||
1055 |
/** |
|
1056 |
Number of pages currently mapped by this page table. |
|
1057 |
Normally, when #iPageCount==0 and #iPermanenceCount==0, the page table is freed. |
|
1058 |
*/ |
|
1059 |
TUint16 iPageCount; |
|
1060 |
||
1061 |
/** |
|
1062 |
Count for the number of uses of this page table which require it to be permanently allocated; |
|
1063 |
even when it maps no pages (#iPageCount==0). |
|
1064 |
*/ |
|
1065 |
TUint16 iPermanenceCount; |
|
1066 |
||
1067 |
/** |
|
1068 |
Information about a page table when #iType==#EUnused. |
|
1069 |
*/ |
|
1070 |
struct TUnused |
|
1071 |
{ |
|
1072 |
/** |
|
1073 |
Cast this object to a SDblQueLink reference. |
|
1074 |
This is used for placing unused SPageTableInfo objects into free lists. |
|
1075 |
*/ |
|
1076 |
FORCE_INLINE SDblQueLink& Link() |
|
1077 |
{ return *(SDblQueLink*)this; } |
|
1078 |
private: |
|
1079 |
SDblQueLink* iNext; ///< Next free page table |
|
1080 |
SDblQueLink* iPrev; ///< Previous free page table |
|
1081 |
}; |
|
1082 |
||
1083 |
/** |
|
1084 |
Information about a page table when #iType==#ECoarseMapping. |
|
1085 |
*/ |
|
1086 |
struct TCoarse |
|
1087 |
{ |
|
1088 |
/** |
|
1089 |
Memory object which owns this page table. |
|
1090 |
*/ |
|
1091 |
DCoarseMemory* iMemoryObject; |
|
1092 |
||
1093 |
/** |
|
1094 |
The index of the page table, i.e. the offset, in 'chunks', |
|
1095 |
into the object's memory that the page table is being used to map. |
|
1096 |
*/ |
|
1097 |
TUint16 iChunkIndex; |
|
1098 |
||
1099 |
/** |
|
1100 |
The #TPteType the page table is being used for. |
|
1101 |
*/ |
|
1102 |
TUint8 iPteType; |
|
1103 |
}; |
|
1104 |
||
1105 |
/** |
|
1106 |
Information about a page table when #iType==#EFineMapping. |
|
1107 |
*/ |
|
1108 |
struct TFine |
|
1109 |
{ |
|
1110 |
/** |
|
1111 |
Start of the virtual address region that this page table is currently |
|
1112 |
mapping memory at, ORed with the OS ASID of the address space this lies in. |
|
1113 |
*/ |
|
1114 |
TLinAddr iLinAddrAndOsAsid; |
|
1115 |
}; |
|
1116 |
||
1117 |
/** |
|
1118 |
Union of type specific info. |
|
1119 |
*/ |
|
1120 |
union |
|
1121 |
{ |
|
1122 |
TUnused iUnused; ///< Information about a page table when #iType==#EUnused. |
|
1123 |
TCoarse iCoarse; ///< Information about a page table when #iType==#ECoarseMapping. |
|
1124 |
TFine iFine; ///< Information about a page table when #iType==#EFineMapping. |
|
1125 |
}; |
|
1126 |
||
1127 |
public: |
|
1128 |
/** |
|
1129 |
Return the SPageTableInfo for the page table in which a given PTE lies. |
|
1130 |
*/ |
|
1131 |
static SPageTableInfo* FromPtPtr(TPte* aPtPte); |
|
1132 |
||
1133 |
/** |
|
1134 |
Return the page table with which this SPageTableInfo is associated. |
|
1135 |
*/ |
|
1136 |
TPte* PageTable(); |
|
1137 |
||
1138 |
/** |
|
1139 |
Used at boot time to initialise page tables which were allocated by the bootstrap. |
|
1140 |
||
1141 |
@param aCount The number of pages being mapped by this page table. |
|
1142 |
*/ |
|
1143 |
FORCE_INLINE void Boot(TUint aCount) |
|
1144 |
{ |
|
1145 |
CheckInit("Boot"); |
|
1146 |
iPageCount = aCount; |
|
1147 |
iPermanenceCount = 1; // assume page table shouldn't be freed |
|
1148 |
iType = EUnknown; |
|
1149 |
iFlags = EPtClusterAllocated; |
|
1150 |
} |
|
1151 |
||
1152 |
/** |
|
1153 |
Initialise a page table after it has had memory allocated for it. |
|
1154 |
||
1155 |
@param aDemandPaged True if this page table has been allocated for use with |
|
1156 |
demand paged memory. |
|
1157 |
*/ |
|
1158 |
FORCE_INLINE void New(TBool aDemandPaged) |
|
1159 |
{ |
|
1160 |
iType = EUnused; |
|
1161 |
iFlags = EPtClusterAllocated | (aDemandPaged ? EDemandPaged : 0); |
|
1162 |
} |
|
1163 |
||
1164 |
/** |
|
1165 |
Return true if the page table cluster that this page table info refers to has |
|
1166 |
been previously allocated. |
|
1167 |
*/ |
|
1168 |
FORCE_INLINE TBool IsPtClusterAllocated() |
|
1169 |
{ |
|
1170 |
return iFlags & EPtClusterAllocated; |
|
1171 |
} |
|
1172 |
||
1173 |
/** |
|
1174 |
The page table cluster that this page table info refers to has been freed. |
|
1175 |
*/ |
|
1176 |
FORCE_INLINE void PtClusterFreed() |
|
1177 |
{ |
|
1178 |
__NK_ASSERT_DEBUG(IsPtClusterAllocated()); |
|
1179 |
iFlags &= ~EPtClusterAllocated; |
|
1180 |
} |
|
1181 |
||
1182 |
/** |
|
1183 |
The page table cluster that this page table info refers to has been allocated. |
|
1184 |
*/ |
|
1185 |
FORCE_INLINE void PtClusterAlloc() |
|
1186 |
{ |
|
1187 |
__NK_ASSERT_DEBUG(!IsPtClusterAllocated()); |
|
1188 |
iFlags |= EPtClusterAllocated; |
|
1189 |
} |
|
1190 |
||
1191 |
/** |
|
1192 |
Initialilse a page table to type #EUnknown after it has been newly allocated. |
|
1193 |
||
1194 |
@pre #PageTablesLockIsHeld. |
|
1195 |
*/ |
|
1196 |
FORCE_INLINE void Init() |
|
1197 |
{ |
|
1198 |
__NK_ASSERT_DEBUG(IsPtClusterAllocated()); |
|
1199 |
CheckInit("Init"); |
|
1200 |
iPageCount = 0; |
|
1201 |
iPermanenceCount = 0; |
|
1202 |
iType = EUnknown; |
|
1203 |
} |
|
1204 |
||
1205 |
/** |
|
1206 |
Increment #iPageCount to account for newly mapped pages. |
|
1207 |
||
1208 |
@param aStep Amount to add to #iPageCount. Default is one. |
|
1209 |
||
1210 |
@return New value of #iPageCount. |
|
1211 |
||
1212 |
@pre #MmuLock held. |
|
1213 |
*/ |
|
1214 |
FORCE_INLINE TUint IncPageCount(TUint aStep=1) |
|
1215 |
{ |
|
1216 |
CheckAccess("IncPageCount"); |
|
1217 |
TUint count = iPageCount; // compiler handles half-word values stupidly, so give it a hand |
|
1218 |
count += aStep; |
|
1219 |
iPageCount = count; |
|
1220 |
return count; |
|
1221 |
} |
|
1222 |
||
1223 |
/** |
|
1224 |
Decrement #iPageCount to account for removed pages. |
|
1225 |
||
1226 |
@param aStep Amount to subtract from #iPageCount. Default is one. |
|
1227 |
||
1228 |
@return New value of #iPageCount. |
|
1229 |
||
1230 |
@pre #MmuLock held. |
|
1231 |
*/ |
|
1232 |
FORCE_INLINE TUint DecPageCount(TUint aStep=1) |
|
1233 |
{ |
|
1234 |
CheckAccess("DecPageCount"); |
|
1235 |
TUint count = iPageCount; // compiler handles half-word values stupidly, so give it a hand |
|
1236 |
count -= aStep; |
|
1237 |
iPageCount = count; |
|
1238 |
return count; |
|
1239 |
} |
|
1240 |
||
1241 |
/** |
|
1242 |
Return #iPageCount. |
|
1243 |
@pre #MmuLock held. |
|
1244 |
*/ |
|
1245 |
FORCE_INLINE TUint PageCount() |
|
1246 |
{ |
|
1247 |
CheckAccess("PageCount"); |
|
1248 |
return iPageCount; |
|
1249 |
} |
|
1250 |
||
1251 |
/** |
|
1252 |
Increment #iPermanenceCount to indicate a new use of this page table which |
|
1253 |
requires it to be permanently allocated. |
|
1254 |
||
1255 |
@return New value of #iPermanenceCount. |
|
1256 |
||
1257 |
@pre #MmuLock held. |
|
1258 |
*/ |
|
1259 |
FORCE_INLINE TUint IncPermanenceCount() |
|
1260 |
{ |
|
1261 |
CheckAccess("IncPermanenceCount"); |
|
1262 |
TUint count = iPermanenceCount; // compiler handles half-word values stupidly, so give it a hand |
|
1263 |
++count; |
|
1264 |
iPermanenceCount = count; |
|
1265 |
return count; |
|
1266 |
} |
|
1267 |
||
1268 |
/** |
|
1269 |
Decrement #iPermanenceCount to indicate the removal of a use added by #IncPermanenceCount. |
|
1270 |
||
1271 |
@return New value of #iPermanenceCount. |
|
1272 |
||
1273 |
@pre #MmuLock held. |
|
1274 |
*/ |
|
1275 |
FORCE_INLINE TUint DecPermanenceCount() |
|
1276 |
{ |
|
1277 |
CheckAccess("DecPermanenceCount"); |
|
1278 |
TUint count = iPermanenceCount; // compiler handles half-word values stupidly, so give it a hand |
|
1279 |
__NK_ASSERT_DEBUG(count); |
|
1280 |
--count; |
|
1281 |
iPermanenceCount = count; |
|
1282 |
return count; |
|
1283 |
} |
|
1284 |
||
1285 |
/** |
|
1286 |
Return #iPermanenceCount. |
|
1287 |
||
1288 |
@pre #MmuLock held. |
|
1289 |
*/ |
|
1290 |
FORCE_INLINE TUint PermanenceCount() |
|
1291 |
{ |
|
1292 |
CheckAccess("PermanenceCount"); |
|
1293 |
return iPermanenceCount; |
|
1294 |
} |
|
1295 |
||
1296 |
/** |
|
1297 |
Set page table to the #EUnused state. |
|
1298 |
This is only intended for use by #PageTableAllocator. |
|
1299 |
||
1300 |
@pre #MmuLock held and #PageTablesLockIsHeld. |
|
1301 |
*/ |
|
1302 |
FORCE_INLINE void SetUnused() |
|
1303 |
{ |
|
1304 |
CheckChangeUse("SetUnused"); |
|
1305 |
iType = EUnused; |
|
1306 |
} |
|
1307 |
||
1308 |
/** |
|
1309 |
Return true if the page table is in the #EUnused state. |
|
1310 |
This is only intended for use by #PageTableAllocator. |
|
1311 |
||
1312 |
@pre #MmuLock held or #PageTablesLockIsHeld. |
|
1313 |
*/ |
|
1314 |
FORCE_INLINE TBool IsUnused() |
|
1315 |
{ |
|
1316 |
CheckCheckUse("IsUnused"); |
|
1317 |
return iType==EUnused; |
|
1318 |
} |
|
1319 |
||
1320 |
/** |
|
1321 |
Set page table as being used by a coarse memory object. |
|
1322 |
||
1323 |
@param aMemory Memory object which owns this page table. |
|
1324 |
@param aChunkIndex The index of the page table, i.e. the offset, in 'chunks', |
|
1325 |
into the object's memory that the page table is being used to map. |
|
1326 |
@param aPteType The #TPteType the page table is being used for. |
|
1327 |
||
1328 |
@pre #MmuLock held and #PageTablesLockIsHeld. |
|
1329 |
||
1330 |
@see TCoarse. |
|
1331 |
*/ |
|
1332 |
inline void SetCoarse(DCoarseMemory* aMemory, TUint aChunkIndex, TUint aPteType) |
|
1333 |
{ |
|
1334 |
CheckChangeUse("SetCoarse"); |
|
1335 |
iPageCount = 0; |
|
1336 |
iPermanenceCount = 0; |
|
1337 |
iType = ECoarseMapping; |
|
1338 |
iCoarse.iMemoryObject = aMemory; |
|
1339 |
iCoarse.iChunkIndex = aChunkIndex; |
|
1340 |
iCoarse.iPteType = aPteType; |
|
1341 |
} |
|
1342 |
||
1343 |
/** |
|
1344 |
Return true if this page table is currently being used by a coarse memory object |
|
1345 |
matching the specified arguments. |
|
1346 |
For arguments, see #SetCoarse. |
|
1347 |
||
1348 |
@pre #MmuLock held or #PageTablesLockIsHeld. |
|
1349 |
*/ |
|
1350 |
inline TBool CheckCoarse(DCoarseMemory* aMemory, TUint aChunkIndex, TUint aPteType) |
|
1351 |
{ |
|
1352 |
CheckCheckUse("CheckCoarse"); |
|
1353 |
return iType==ECoarseMapping |
|
1354 |
&& iCoarse.iMemoryObject==aMemory |
|
1355 |
&& iCoarse.iChunkIndex==aChunkIndex |
|
1356 |
&& iCoarse.iPteType==aPteType; |
|
1357 |
} |
|
1358 |
||
1359 |
/** |
|
1360 |
Set page table as being used for fine mappings. |
|
1361 |
||
1362 |
@param aLinAddr Start of the virtual address region that the page table is |
|
1363 |
mapping memory at. |
|
1364 |
@param aOsAsid The OS ASID of the address space which \a aLinAddr lies in. |
|
1365 |
||
1366 |
@pre #MmuLock held and #PageTablesLockIsHeld. |
|
1367 |
*/ |
|
1368 |
inline void SetFine(TLinAddr aLinAddr, TUint aOsAsid) |
|
1369 |
{ |
|
1370 |
CheckChangeUse("SetFine"); |
|
1371 |
__NK_ASSERT_DEBUG((aLinAddr&KPageMask)==0); |
|
1372 |
iPageCount = 0; |
|
1373 |
iPermanenceCount = 0; |
|
1374 |
iType = EFineMapping; |
|
1375 |
iFine.iLinAddrAndOsAsid = aLinAddr|aOsAsid; |
|
1376 |
} |
|
1377 |
||
1378 |
/** |
|
1379 |
Return true if this page table is currently being used for fine mappings |
|
1380 |
matching the specified arguments. |
|
1381 |
For arguments, see #SetFine. |
|
1382 |
||
1383 |
@pre #MmuLock held or #PageTablesLockIsHeld. |
|
1384 |
*/ |
|
1385 |
inline TBool CheckFine(TLinAddr aLinAddr, TUint aOsAsid) |
|
1386 |
{ |
|
1387 |
CheckCheckUse("CheckFine"); |
|
1388 |
__NK_ASSERT_DEBUG((aLinAddr&KPageMask)==0); |
|
1389 |
return iType==EFineMapping |
|
1390 |
&& iFine.iLinAddrAndOsAsid==(aLinAddr|aOsAsid); |
|
1391 |
} |
|
1392 |
||
1393 |
/** |
|
1394 |
Set a previously unknown page table as now being used for fine mappings. |
|
1395 |
This is used during the boot process by DFineMemory::ClaimInitialPages |
|
1396 |
to initialise the state of a page table allocated by the bootstrap. |
|
1397 |
||
1398 |
@param aLinAddr Start of the virtual address region that the page table is |
|
1399 |
mapping memory at. |
|
1400 |
@param aOsAsid The OS ASID of the address space which \a aLinAddr lies in. |
|
1401 |
(This should be KKernelOsAsid.) |
|
1402 |
||
1403 |
@pre #MmuLock held and #PageTablesLockIsHeld. |
|
1404 |
*/ |
|
1405 |
inline TBool ClaimFine(TLinAddr aLinAddr, TUint aOsAsid) |
|
1406 |
{ |
|
1407 |
CheckChangeUse("ClaimFine"); |
|
1408 |
__NK_ASSERT_DEBUG((aLinAddr&KPageMask)==0); |
|
1409 |
if(iType==EFineMapping) |
|
1410 |
return CheckFine(aLinAddr,aOsAsid); |
|
1411 |
if(iType!=EUnknown) |
|
1412 |
return false; |
|
1413 |
iType = EFineMapping; |
|
1414 |
iFine.iLinAddrAndOsAsid = aLinAddr|aOsAsid; |
|
1415 |
return true; |
|
1416 |
} |
|
1417 |
||
1418 |
/** |
|
1419 |
Return true if page table was allocated for use with demand paged memory. |
|
1420 |
*/ |
|
1421 |
FORCE_INLINE TBool IsDemandPaged() |
|
1422 |
{ |
|
1423 |
return iFlags&EDemandPaged; |
|
1424 |
} |
|
1425 |
||
1426 |
#ifdef _DEBUG |
|
1427 |
/** |
|
1428 |
Debug check returning true if the value of #iPageCount is consistent with |
|
1429 |
the PTEs in this page table. |
|
1430 |
||
1431 |
@pre #MmuLock held. |
|
1432 |
*/ |
|
1433 |
TBool CheckPageCount(); |
|
1434 |
#endif |
|
1435 |
||
1436 |
/** |
|
1437 |
Return a reference to an embedded SDblQueLink which is used for placing this |
|
1438 |
SPageTableInfo objects into free lists. |
|
1439 |
@pre #PageTablesLockIsHeld. |
|
1440 |
@pre #iType==#EUnused. |
|
1441 |
*/ |
|
1442 |
inline SDblQueLink& FreeLink() |
|
1443 |
{ |
|
1444 |
__NK_ASSERT_DEBUG(IsUnused()); |
|
1445 |
return iUnused.Link(); |
|
1446 |
} |
|
1447 |
||
1448 |
/** |
|
1449 |
Return a pointer to a SPageTableInfo by conversion from the address |
|
1450 |
of its embedded link as returned by #FreeLink. |
|
1451 |
*/ |
|
1452 |
FORCE_INLINE static SPageTableInfo* FromFreeLink(SDblQueLink* aLink) |
|
1453 |
{ |
|
102
ef2a444a7410
Revision: 201018
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
1454 |
return _LOFF(aLink, SPageTableInfo, iUnused); |
0 | 1455 |
} |
1456 |
||
1457 |
/** |
|
1458 |
Return the SPageTableInfo for the first page table in the same |
|
1459 |
physical ram page as the page table for this SPageTableInfo. |
|
1460 |
*/ |
|
1461 |
FORCE_INLINE SPageTableInfo* FirstInPage() |
|
1462 |
{ |
|
1463 |
return (SPageTableInfo*)(TLinAddr(this)&~(KPtClusterMask*sizeof(SPageTableInfo))); |
|
1464 |
} |
|
1465 |
||
1466 |
/** |
|
1467 |
Return the SPageTableInfo for the last page table in the same |
|
1468 |
physical ram page as the page table for this SPageTableInfo. |
|
1469 |
*/ |
|
1470 |
FORCE_INLINE SPageTableInfo* LastInPage() |
|
1471 |
{ |
|
1472 |
return (SPageTableInfo*)(TLinAddr(this)|(KPtClusterMask*sizeof(SPageTableInfo))); |
|
1473 |
} |
|
1474 |
||
1475 |
/** |
|
1476 |
Return true if the page table for this SPageTableInfo is |
|
1477 |
the first page table in the physical page it occupies. |
|
1478 |
*/ |
|
1479 |
FORCE_INLINE TBool IsFirstInPage() |
|
1480 |
{ |
|
1481 |
return (TLinAddr(this)&(KPtClusterMask*sizeof(SPageTableInfo)))==0; |
|
1482 |
} |
|
1483 |
||
1484 |
/** |
|
1485 |
Return true if this page table has been added to the cleanup list with |
|
1486 |
#AddToCleanupList. |
|
1487 |
Must only be used for page tables which return true for #IsFirstInPage. |
|
1488 |
||
1489 |
@pre #PageTablesLockIsHeld. |
|
1490 |
*/ |
|
1491 |
FORCE_INLINE TBool IsOnCleanupList() |
|
1492 |
{ |
|
1493 |
__NK_ASSERT_DEBUG(IsFirstInPage()); |
|
1494 |
return iFlags&EOnCleanupList; |
|
1495 |
} |
|
1496 |
||
1497 |
/** |
|
1498 |
Add the RAM page containing this page table to the specified cleanup list. |
|
1499 |
Must only be used for page tables which return true for #IsFirstInPage. |
|
1500 |
||
1501 |
@pre #PageTablesLockIsHeld. |
|
1502 |
*/ |
|
1503 |
FORCE_INLINE void AddToCleanupList(SDblQue& aCleanupList) |
|
1504 |
{ |
|
1505 |
__NK_ASSERT_DEBUG(IsUnused()); |
|
1506 |
__NK_ASSERT_DEBUG(IsFirstInPage()); |
|
1507 |
__NK_ASSERT_DEBUG(!IsOnCleanupList()); |
|
1508 |
aCleanupList.Add(&FreeLink()); |
|
1509 |
iFlags |= EOnCleanupList; |
|
1510 |
} |
|
1511 |
||
1512 |
/** |
|
1513 |
Remove the RAM page containing this page table from a cleanup list it |
|
1514 |
was added to with aCleanupList. |
|
1515 |
Must only be used for page tables which return true for #IsFirstInPage. |
|
1516 |
||
1517 |
@pre #PageTablesLockIsHeld. |
|
1518 |
*/ |
|
1519 |
FORCE_INLINE void RemoveFromCleanupList() |
|
1520 |
{ |
|
1521 |
__NK_ASSERT_DEBUG(IsUnused()); |
|
1522 |
__NK_ASSERT_DEBUG(IsFirstInPage()); |
|
1523 |
__NK_ASSERT_DEBUG(IsOnCleanupList()); |
|
1524 |
iFlags &= ~EOnCleanupList; |
|
1525 |
FreeLink().Deque(); |
|
1526 |
} |
|
1527 |
||
1528 |
/** |
|
1529 |
Remove this page table from its owner and free it. |
|
1530 |
This is only used with page tables which map demand paged memory |
|
1531 |
and is intended for use in implementing #DPageTableMemoryManager. |
|
1532 |
||
1533 |
@return KErrNone if successful, |
|
1534 |
otherwise one of the system wide error codes. |
|
1535 |
||
1536 |
@pre #MmuLock held and #PageTablesLockIsHeld. |
|
1537 |
*/ |
|
1538 |
TInt ForcedFree(); |
|
1539 |
||
1540 |
private: |
|
1541 |
||
1542 |
#ifdef _DEBUG |
|
1543 |
void CheckChangeUse(const char* aName); |
|
1544 |
void CheckCheckUse(const char* aName); |
|
1545 |
void CheckAccess(const char* aName); |
|
1546 |
void CheckInit(const char* aName); |
|
1547 |
#else |
|
1548 |
FORCE_INLINE void CheckChangeUse(const char* /*aName*/) |
|
1549 |
{} |
|
1550 |
FORCE_INLINE void CheckCheckUse(const char* /*aName*/) |
|
1551 |
{} |
|
1552 |
FORCE_INLINE void CheckAccess(const char* /*aName*/) |
|
1553 |
{} |
|
1554 |
FORCE_INLINE void CheckInit(const char* /*aName*/) |
|
1555 |
{} |
|
1556 |
#endif |
|
1557 |
}; |
|
1558 |
||
1559 |
||
1560 |
const TInt KPageTableInfoShift = 4; |
|
1561 |
__ASSERT_COMPILE(sizeof(SPageTableInfo)==(1<<KPageTableInfoShift)); |
|
1562 |
||
1563 |
FORCE_INLINE SPageTableInfo* SPageTableInfo::FromPtPtr(TPte* aPtPte) |
|
1564 |
{ |
|
1565 |
TUint id = ((TLinAddr)aPtPte-KPageTableBase)>>KPageTableShift; |
|
1566 |
return (SPageTableInfo*)KPageTableInfoBase+id; |
|
1567 |
} |
|
1568 |
||
1569 |
FORCE_INLINE TPte* SPageTableInfo::PageTable() |
|
1570 |
{ |
|
1571 |
return (TPte*) |
|
1572 |
(KPageTableBase+ |
|
1573 |
( |
|
1574 |
((TLinAddr)this-(TLinAddr)KPageTableInfoBase) |
|
1575 |
<<(KPageTableShift-KPageTableInfoShift) |
|
1576 |
) |
|
1577 |
); |
|
1578 |
} |
|
1579 |
||
1580 |
||
1581 |
||
1582 |
/** |
|
1583 |
Class providing access to the mutex used to protect memory allocation operations; |
|
1584 |
this is the mutex Mmu::iRamAllocatorMutex. |
|
1585 |
In addition to providing locking, these functions monitor the system's free RAM |
|
1586 |
levels and call K::CheckFreeMemoryLevel to notify the system of changes. |
|
1587 |
*/ |
|
1588 |
class RamAllocLock |
|
1589 |
{ |
|
1590 |
public: |
|
1591 |
/** |
|
1592 |
Acquire the lock. |
|
1593 |
The lock may be acquired multiple times by a thread, and will remain locked |
|
1594 |
until #Unlock has been used enough times to balance this. |
|
1595 |
*/ |
|
1596 |
static void Lock(); |
|
1597 |
||
1598 |
/** |
|
1599 |
Release the lock. |
|
1600 |
||
1601 |
@pre The current thread has previously acquired the lock. |
|
1602 |
*/ |
|
1603 |
static void Unlock(); |
|
1604 |
||
1605 |
/** |
|
1606 |
Allow another thread to acquire the lock. |
|
1607 |
This is equivalent to #Unlock followed by #Lock, but optimised |
|
1608 |
to only do this if there is another thread waiting on the lock. |
|
1609 |
||
1610 |
@return True if the lock was released by this function. |
|
1611 |
||
1612 |
@pre The current thread has previously acquired the lock. |
|
1613 |
*/ |
|
1614 |
static TBool Flash(); |
|
1615 |
||
1616 |
/** |
|
1617 |
Return true if the current thread holds the lock. |
|
1618 |
This is used for debug checks. |
|
1619 |
*/ |
|
1620 |
static TBool IsHeld(); |
|
1621 |
}; |
|
1622 |
||
1623 |
||
1624 |
||
1625 |
/** |
|
1626 |
Return true if the PageTableLock is held by the current thread. |
|
1627 |
This lock is the mutex used to protect page table allocation; it is acquired |
|
1628 |
with |
|
1629 |
@code |
|
1630 |
::PageTables.Lock(); |
|
1631 |
@endcode |
|
1632 |
and released with |
|
1633 |
@code |
|
1634 |
::PageTables.Unlock(); |
|
1635 |
@endcode |
|
1636 |
*/ |
|
1637 |
TBool PageTablesLockIsHeld(); |
|
1638 |
||
1639 |
||
1640 |
||
1641 |
/** |
|
1642 |
Class providing access to the fast mutex used to protect various |
|
1643 |
low level memory operations. |
|
1644 |
||
1645 |
This lock must only be held for a very short and bounded time. |
|
1646 |
*/ |
|
1647 |
class MmuLock |
|
1648 |
{ |
|
1649 |
public: |
|
1650 |
/** |
|
1651 |
Acquire the lock. |
|
1652 |
*/ |
|
1653 |
static void Lock(); |
|
1654 |
||
1655 |
/** |
|
1656 |
Release the lock. |
|
1657 |
||
1658 |
@pre The current thread has previously acquired the lock. |
|
1659 |
*/ |
|
1660 |
static void Unlock(); |
|
1661 |
||
1662 |
/** |
|
1663 |
Allow another thread to acquire the lock. |
|
1664 |
This is equivalent to #Unlock followed by #Lock, but optimised |
|
1665 |
to only do this if there is another thread waiting on the lock. |
|
1666 |
||
1667 |
@return True if the lock was released by this function. |
|
1668 |
||
1669 |
@pre The current thread has previously acquired the lock. |
|
1670 |
*/ |
|
1671 |
static TBool Flash(); |
|
1672 |
||
1673 |
/** |
|
1674 |
Return true if the current thread holds the lock. |
|
1675 |
This is used for debug checks. |
|
1676 |
*/ |
|
1677 |
static TBool IsHeld(); |
|
1678 |
||
1679 |
/** |
|
1680 |
Increment a counter and perform the action of #Flash() once a given threshold |
|
1681 |
value is reached. After flashing the counter is reset. |
|
1682 |
||
1683 |
This is typically used in long running loops to periodically flash the lock |
|
1684 |
and so avoid holding it for too long, e.g. |
|
1685 |
||
1686 |
@code |
|
1687 |
MmuLock::Lock(); |
|
1688 |
TUint flash = 0; |
|
1689 |
const TUint KMaxInterationsWithLock = 10; |
|
1690 |
while(WorkToDo) |
|
1691 |
{ |
|
1692 |
DoSomeWork(); |
|
1693 |
MmuLock::Flash(flash,KMaxInterationsWithLock); // flash every N loops |
|
1694 |
} |
|
1695 |
MmuLock::Unlock(); |
|
1696 |
@endcode |
|
1697 |
||
1698 |
@param aCounter Reference to the counter. |
|
1699 |
@param aFlashThreshold Value \a aCounter must reach before flashing the lock. |
|
1700 |
@param aStep Value to add to \a aCounter. |
|
1701 |
||
1702 |
@return True if the lock was released by this function. |
|
1703 |
||
1704 |
@pre The current thread has previously acquired the lock. |
|
1705 |
*/ |
|
1706 |
static FORCE_INLINE TBool Flash(TUint& aCounter, TUint aFlashThreshold, TUint aStep=1) |
|
1707 |
{ |
|
1708 |
UnlockGuardCheck(); |
|
1709 |
if((aCounter+=aStep)<aFlashThreshold) |
|
1710 |
return EFalse; |
|
1711 |
aCounter -= aFlashThreshold; |
|
1712 |
return MmuLock::Flash(); |
|
1713 |
} |
|
1714 |
||
1715 |
/** |
|
1716 |
Begin a debug check to test that the MmuLock is not unlocked unexpectedly. |
|
1717 |
||
1718 |
This is used in situations where a series of operation must be performed |
|
1719 |
atomically with the MmuLock held. It is usually used via the |
|
1720 |
#__UNLOCK_GUARD_START macro, e.g. |
|
1721 |
||
1722 |
@code |
|
1723 |
__UNLOCK_GUARD_START(MmuLock); |
|
1724 |
SomeCode(); |
|
1725 |
SomeMoreCode(); |
|
1726 |
__UNLOCK_GUARD_END(MmuLock); // fault if MmuLock released by SomeCode or SomeMoreCode |
|
1727 |
@endcode |
|
1728 |
*/ |
|
1729 |
static FORCE_INLINE void UnlockGuardStart() |
|
1730 |
{ |
|
102
ef2a444a7410
Revision: 201018
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
1731 |
#ifdef _DEBUG |
ef2a444a7410
Revision: 201018
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
1732 |
++UnlockGuardNest; |
ef2a444a7410
Revision: 201018
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
1733 |
#endif |
0 | 1734 |
} |
1735 |
||
1736 |
/** |
|
1737 |
End a debug check testing that the MmuLock is not unlocked unexpectedly. |
|
1738 |
This is usually used via the #__UNLOCK_GUARD_END which faults if true is returned. |
|
1739 |
||
1740 |
@see UnlockGuardStart |
|
1741 |
||
102
ef2a444a7410
Revision: 201018
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
1742 |
@return EFalse if the MmuLock was released between a previous #UnlockGuardStart |
0 | 1743 |
and the call this function. |
1744 |
*/ |
|
1745 |
static FORCE_INLINE TBool UnlockGuardEnd() |
|
1746 |
{ |
|
102
ef2a444a7410
Revision: 201018
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
1747 |
#ifdef _DEBUG |
ef2a444a7410
Revision: 201018
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
1748 |
__NK_ASSERT_DEBUG(UnlockGuardNest); |
ef2a444a7410
Revision: 201018
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
1749 |
--UnlockGuardNest; |
ef2a444a7410
Revision: 201018
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
1750 |
return UnlockGuardFail==0; |
ef2a444a7410
Revision: 201018
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
1751 |
#else |
ef2a444a7410
Revision: 201018
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
1752 |
return ETrue; |
ef2a444a7410
Revision: 201018
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
1753 |
#endif |
0 | 1754 |
} |
1755 |
||
1756 |
private: |
|
1757 |
/** |
|
1758 |
Exectued whenever the lock is released to check that |
|
1759 |
#UnlockGuardStart and #UnlockGuardEnd are balanced. |
|
1760 |
*/ |
|
1761 |
static FORCE_INLINE void UnlockGuardCheck() |
|
1762 |
{ |
|
102
ef2a444a7410
Revision: 201018
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
1763 |
#ifdef _DEBUG |
ef2a444a7410
Revision: 201018
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
1764 |
if(UnlockGuardNest) |
ef2a444a7410
Revision: 201018
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
1765 |
UnlockGuardFail = ETrue; |
ef2a444a7410
Revision: 201018
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
1766 |
#endif |
0 | 1767 |
} |
1768 |
||
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1769 |
public: |
0 | 1770 |
/** The lock */ |
1771 |
static NFastMutex iLock; |
|
1772 |
||
1773 |
#ifdef _DEBUG |
|
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1774 |
private: |
0 | 1775 |
static TUint UnlockGuardNest; |
1776 |
static TUint UnlockGuardFail; |
|
1777 |
#endif |
|
1778 |
}; |
|
1779 |
||
1780 |
||
1781 |
||
1782 |
/** |
|
1783 |
Interface for accessing the lock mutex being used to serialise |
|
1784 |
explicit modifications to a specified memory object. |
|
1785 |
||
1786 |
The lock mutex is either the one which was previously assigned with |
|
1787 |
DMemoryObject::SetLock. Or, if none was set, a dynamically assigned |
|
1788 |
mutex from #MemoryObjectMutexPool will be of 'order' #KMutexOrdMemoryObject. |
|
1789 |
*/ |
|
1790 |
class MemoryObjectLock |
|
1791 |
{ |
|
1792 |
public: |
|
1793 |
/** |
|
1794 |
Acquire the lock for the specified memory object. |
|
1795 |
If the object has no lock, one is assigned from #MemoryObjectMutexPool. |
|
1796 |
*/ |
|
1797 |
static void Lock(DMemoryObject* aMemory); |
|
1798 |
||
1799 |
/** |
|
1800 |
Release the lock for the specified memory object, which was acquired |
|
1801 |
with #Lock. If the lock was one which was dynamically assigned, and there |
|
1802 |
are no threads waiting for it, the the lock is unassigned from the memory |
|
1803 |
object. |
|
1804 |
*/ |
|
1805 |
static void Unlock(DMemoryObject* aMemory); |
|
1806 |
||
1807 |
/** |
|
1808 |
Return true if the current thread holds lock for the specified memory object. |
|
1809 |
This is used for debug checks. |
|
1810 |
*/ |
|
1811 |
static TBool IsHeld(DMemoryObject* aMemory); |
|
1812 |
}; |
|
1813 |
||
1814 |
||
1815 |
#define __UNLOCK_GUARD_START(_l) __DEBUG_ONLY(_l::UnlockGuardStart()) |
|
1816 |
#define __UNLOCK_GUARD_END(_l) __NK_ASSERT_DEBUG(_l::UnlockGuardEnd()) |
|
1817 |
||
1818 |
||
1819 |
const TUint KMutexOrdAddresSpace = KMutexOrdKernelHeap + 2; |
|
1820 |
const TUint KMutexOrdMemoryObject = KMutexOrdKernelHeap + 1; |
|
1821 |
const TUint KMutexOrdMmuAlloc = KMutexOrdRamAlloc + 1; |
|
1822 |
||
1823 |
||
1824 |
#ifdef _DEBUG |
|
1825 |
//#define FORCE_TRACE |
|
1826 |
//#define FORCE_TRACE2 |
|
1827 |
//#define FORCE_TRACEB |
|
1828 |
//#define FORCE_TRACEP |
|
1829 |
#endif |
|
1830 |
||
1831 |
||
1832 |
||
1833 |
#define TRACE_printf Kern::Printf |
|
1834 |
||
1835 |
#define TRACE_ALWAYS(t) TRACE_printf t |
|
1836 |
||
1837 |
#ifdef FORCE_TRACE |
|
1838 |
#define TRACE(t) TRACE_printf t |
|
1839 |
#else |
|
1840 |
#define TRACE(t) __KTRACE_OPT(KMMU2,TRACE_printf t) |
|
1841 |
#endif |
|
1842 |
||
1843 |
#ifdef FORCE_TRACE2 |
|
1844 |
#define TRACE2(t) TRACE_printf t |
|
1845 |
#else |
|
1846 |
#define TRACE2(t) __KTRACE_OPT(KMMU2,TRACE_printf t) |
|
1847 |
#endif |
|
1848 |
||
1849 |
#ifdef FORCE_TRACEB |
|
1850 |
#define TRACEB(t) TRACE_printf t |
|
1851 |
#else |
|
1852 |
#define TRACEB(t) __KTRACE_OPT2(KMMU,KBOOT,TRACE_printf t) |
|
1853 |
#endif |
|
1854 |
||
1855 |
#ifdef FORCE_TRACEP |
|
1856 |
#define TRACEP(t) TRACE_printf t |
|
1857 |
#else |
|
1858 |
#define TRACEP(t) __KTRACE_OPT(KPAGING,TRACE_printf t) |
|
1859 |
#endif |
|
1860 |
||
1861 |
||
1862 |
/** |
|
1863 |
The maximum number of consecutive updates to #SPageInfo structures which |
|
1864 |
should be executed without releasing the #MmuLock. |
|
1865 |
||
1866 |
This value must be an integer power of two. |
|
1867 |
*/ |
|
1868 |
const TUint KMaxPageInfoUpdatesInOneGo = 64; |
|
1869 |
||
1870 |
/** |
|
1871 |
The maximum number of simple operations on memory page state which should |
|
1872 |
occur without releasing the #MmuLock. Examples of the operations are |
|
1873 |
read-modify-write of a Page Table Entry (PTE) or entries in a memory objects |
|
1874 |
RPageArray. |
|
1875 |
||
1876 |
This value must be an integer power of two. |
|
1877 |
*/ |
|
1878 |
const TUint KMaxPagesInOneGo = KMaxPageInfoUpdatesInOneGo/2; |
|
1879 |
||
1880 |
/** |
|
1881 |
The maximum number of Page Directory Entries which should be updated |
|
1882 |
without releasing the #MmuLock. |
|
1883 |
||
1884 |
This value must be an integer power of two. |
|
1885 |
*/ |
|
1886 |
const TUint KMaxPdesInOneGo = KMaxPageInfoUpdatesInOneGo; |
|
1887 |
||
1888 |
||
1889 |
/******************************************** |
|
1890 |
* MMU stuff |
|
1891 |
********************************************/ |
|
1892 |
||
1893 |
class DRamAllocator; |
|
1894 |
class TPinArgs; |
|
1895 |
class Defrag; |
|
1896 |
||
1897 |
/** |
|
1898 |
Interface to RAM allocation and MMU data structure manipulation. |
|
1899 |
*/ |
|
1900 |
class Mmu |
|
1901 |
{ |
|
1902 |
public: |
|
1903 |
enum TPanic |
|
1904 |
{ |
|
1905 |
EInvalidRamBankAtBoot, |
|
1906 |
EInvalidReservedBankAtBoot, |
|
1907 |
EInvalidPageTableAtBoot, |
|
1908 |
EInvalidPdeAtBoot, |
|
1909 |
EBadMappedPageAfterBoot, |
|
1910 |
ERamAllocMutexCreateFailed, |
|
1911 |
EBadFreePhysicalRam, |
|
1912 |
EUnsafePageInfoAccess, |
|
1913 |
EUnsafePageTableInfoAccess, |
|
1914 |
EPhysMemSyncMutexCreateFailed, |
|
1915 |
EDefragAllocFailed |
|
1916 |
}; |
|
1917 |
||
1918 |
/** |
|
1919 |
Attribute flags used when allocating RAM pages. |
|
1920 |
See #AllocRam etc. |
|
1921 |
||
1922 |
The least significant bits of these flags are used for the #TMemoryType |
|
1923 |
value for the memory. |
|
1924 |
*/ |
|
1925 |
enum TRamAllocFlags |
|
1926 |
{ |
|
1927 |
// lower bits hold TMemoryType |
|
1928 |
||
1929 |
/** |
|
1930 |
If this flag is set, don't wipe the contents of the memory when allocated. |
|
1931 |
By default, for security and confidentiality reasons, the memory is filled |
|
1932 |
with a 'wipe' value to erase the previous contents. |
|
1933 |
*/ |
|
1934 |
EAllocNoWipe = 1<<(KMemoryTypeShift), |
|
1935 |
||
1936 |
/** |
|
1937 |
If this flag is set, any memory wiping will fill memory with the byte |
|
1938 |
value starting at bit position #EAllocWipeByteShift in these flags. |
|
1939 |
*/ |
|
1940 |
EAllocUseCustomWipeByte = 1<<(KMemoryTypeShift+1), |
|
1941 |
||
1942 |
/** |
|
1943 |
If this flag is set, memory allocation won't attempt to reclaim pages |
|
1944 |
from the demand paging system. |
|
1945 |
This is used to prevent deadlock when the paging system itself attempts |
|
1946 |
to allocate memory for itself. |
|
1947 |
*/ |
|
1948 |
EAllocNoPagerReclaim = 1<<(KMemoryTypeShift+2), |
|
1949 |
||
1950 |
/** |
|
1951 |
@internal |
|
1952 |
*/ |
|
1953 |
EAllocFlagLast, |
|
1954 |
||
1955 |
/* |
|
1956 |
Bit position within these flags, for the least significant bit of the |
|
1957 |
byte value used when #EAllocUseCustomWipeByte is set. |
|
1958 |
*/ |
|
1959 |
EAllocWipeByteShift = 8 |
|
1960 |
}; |
|
1961 |
||
1962 |
public: |
|
1963 |
void Init1(); |
|
1964 |
void Init1Common(); |
|
1965 |
void Init2(); |
|
1966 |
void Init2Common(); |
|
1967 |
void Init2Final(); |
|
1968 |
void Init2FinalCommon(); |
|
1969 |
void Init3(); |
|
1970 |
||
102
ef2a444a7410
Revision: 201018
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
1971 |
void BTracePrime(TUint aCategory); |
ef2a444a7410
Revision: 201018
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
1972 |
|
0 | 1973 |
static void Panic(TPanic aPanic); |
1974 |
||
1975 |
static TInt HandlePageFault(TLinAddr aPc, TLinAddr aFaultAddress, TUint aAccessPermissions, TAny* aExceptionInfo); |
|
1976 |
||
1977 |
TUint FreeRamInPages(); |
|
1978 |
TUint TotalPhysicalRamPages(); |
|
1979 |
||
1980 |
TInt AllocRam( TPhysAddr* aPages, TUint aCount, TRamAllocFlags aFlags, TZonePageType aZonePageType, |
|
1981 |
TUint aBlockZoneId=KRamZoneInvalidId, TBool aBlockRest=EFalse); |
|
152
657f875b013e
Revision: 201023
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
102
diff
changeset
|
1982 |
void MarkPageAllocated(TPhysAddr aPhysAddr, TZonePageType aZonePageType); |
0 | 1983 |
void FreeRam(TPhysAddr* aPages, TUint aCount, TZonePageType aZonePageType); |
1984 |
TInt AllocContiguousRam(TPhysAddr& aPhysAddr, TUint aCount, TUint aAlign, TRamAllocFlags aFlags); |
|
1985 |
void FreeContiguousRam(TPhysAddr aPhysAddr, TUint aCount); |
|
1986 |
||
1987 |
const SRamZone* RamZoneConfig(TRamZoneCallback& aCallback) const; |
|
1988 |
void SetRamZoneConfig(const SRamZone* aZones, TRamZoneCallback aCallback); |
|
1989 |
TInt ModifyRamZoneFlags(TUint aId, TUint aClearMask, TUint aSetMask); |
|
1990 |
TInt GetRamZonePageCount(TUint aId, SRamZonePageCount& aPageData); |
|
1991 |
TInt ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aBytes, TPhysAddr& aPhysAddr, TInt aAlign); |
|
1992 |
TInt ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aNumPages, TPhysAddr* aPageList); |
|
1993 |
TInt RamHalFunction(TInt aFunction, TAny* a1, TAny* a2); |
|
1994 |
void ChangePageType(SPageInfo* aPageInfo, TZonePageType aOldPageType, TZonePageType aNewPageType); |
|
102
ef2a444a7410
Revision: 201018
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
1995 |
TInt FreeRamZone(TUint aZoneId); |
0 | 1996 |
|
1997 |
TInt AllocPhysicalRam(TPhysAddr* aPages, TUint aCount, TRamAllocFlags aFlags); |
|
1998 |
void FreePhysicalRam(TPhysAddr* aPages, TUint aCount); |
|
1999 |
TInt AllocPhysicalRam(TPhysAddr& aPhysAddr, TUint aCount, TUint aAlign, TRamAllocFlags aFlags); |
|
2000 |
void FreePhysicalRam(TPhysAddr aPhysAddr, TUint aCount); |
|
2001 |
TInt ClaimPhysicalRam(TPhysAddr aPhysAddr, TUint aCount, TRamAllocFlags aFlags); |
|
2002 |
void AllocatedPhysicalRam(TPhysAddr aPhysAddr, TUint aCount, TRamAllocFlags aFlags); |
|
102
ef2a444a7410
Revision: 201018
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
2003 |
private: |
ef2a444a7410
Revision: 201018
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
2004 |
void SetAllocPhysRam(TPhysAddr aPhysAddr, TUint aCount); |
ef2a444a7410
Revision: 201018
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
2005 |
void SetAllocPhysRam(TPhysAddr* aPageList, TUint aNumPages); |
0 | 2006 |
|
102
ef2a444a7410
Revision: 201018
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
2007 |
public: |
0 | 2008 |
TLinAddr MapTemp(TPhysAddr aPage, TUint aColour, TUint aSlot=0); |
2009 |
void UnmapTemp(TUint aSlot=0); |
|
2010 |
void RemoveAliasesForPageTable(TPhysAddr aPageTable); |
|
2011 |
||
2012 |
static TBool MapPages(TPte* const aPtePtr, const TUint aCount, TPhysAddr* aPages, TPte aBlankPte); |
|
2013 |
static TBool UnmapPages(TPte* const aPtePtr, TUint aCount); |
|
2014 |
static TBool UnmapPages(TPte* const aPtePtr, TUint aCount, TPhysAddr* aPages); |
|
2015 |
static void RemapPage(TPte* const aPtePtr, TPhysAddr& aPage, TPte aBlankPte); |
|
2016 |
static void RestrictPagesNA(TPte* const aPtePtr, TUint aCount, TPhysAddr* aPages); |
|
2017 |
static TBool PageInPages(TPte* const aPtePtr, const TUint aCount, TPhysAddr* aPages, TPte aBlankPte); |
|
2018 |
||
2019 |
// implemented in CPU-specific code... |
|
2020 |
static TUint PteType(TMappingPermissions aPermissions, TBool aGlobal); |
|
2021 |
static TUint PdeType(TMemoryAttributes aAttributes); |
|
2022 |
static TPte BlankPte(TMemoryAttributes aAttributes, TUint aPteType); |
|
2023 |
static TPde BlankPde(TMemoryAttributes aAttributes); |
|
2024 |
static TPde BlankSectionPde(TMemoryAttributes aAttributes, TUint aPteType); |
|
2025 |
static TBool CheckPteTypePermissions(TUint aPteType, TUint aAccessPermissions); |
|
2026 |
static TMappingPermissions PermissionsFromPteType(TUint aPteType); |
|
2027 |
void PagesAllocated(TPhysAddr* aPageList, TUint aCount, TRamAllocFlags aFlags, TBool aReallocate=false); |
|
2028 |
void PageFreed(SPageInfo* aPageInfo); |
|
2029 |
void CleanAndInvalidatePages(TPhysAddr* aPages, TUint aCount, TMemoryAttributes aAttributes, TUint aColour); |
|
2030 |
public: |
|
2031 |
// utils, implemented in CPU-specific code... |
|
2032 |
static TPde* PageDirectory(TInt aOsAsid); |
|
2033 |
static TPde* PageDirectoryEntry(TInt aOsAsid, TLinAddr aAddress); |
|
2034 |
static TPhysAddr PdePhysAddr(TPde aPde); |
|
2035 |
static TPhysAddr PtePhysAddr(TPte aPte, TUint aPteIndex); |
|
2036 |
static TPte* PageTableFromPde(TPde aPde); |
|
2037 |
static TPte* SafePageTableFromPde(TPde aPde); |
|
2038 |
static TPhysAddr SectionBaseFromPde(TPde aPde); |
|
2039 |
static TPte* PtePtrFromLinAddr(TLinAddr aAddress, TInt aOsAsid); |
|
2040 |
static TPte* SafePtePtrFromLinAddr(TLinAddr aAddress, TInt aOsAsid); |
|
2041 |
static TPhysAddr PageTablePhysAddr(TPte* aPt); |
|
2042 |
static TPhysAddr LinearToPhysical(TLinAddr aAddr, TInt aOsAsid=KKernelOsAsid); |
|
2043 |
static TPhysAddr UncheckedLinearToPhysical(TLinAddr aAddr, TInt aOsAsid); |
|
2044 |
static TPte MakePteInaccessible(TPte aPte, TBool aReadOnly); |
|
2045 |
static TPte MakePteAccessible(TPte aPte, TBool aWrite); |
|
2046 |
static TBool IsPteReadOnly(TPte aPte); |
|
2047 |
static TBool IsPteMoreAccessible(TPte aNewPte, TPte aOldPte); |
|
2048 |
static TBool IsPteInaccessible(TPte aPte); |
|
2049 |
static TBool PdeMapsPageTable(TPde aPde); |
|
2050 |
static TBool PdeMapsSection(TPde aPde); |
|
2051 |
||
2052 |
void SyncPhysicalMemoryBeforeDmaWrite(TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr); |
|
2053 |
void SyncPhysicalMemoryBeforeDmaRead (TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr); |
|
2054 |
void SyncPhysicalMemoryAfterDmaRead (TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr); |
|
2055 |
||
2056 |
static TPte SectionToPageEntry(TPde& aPde); |
|
2057 |
static TPde PageToSectionEntry(TPte aPte, TPde aPde); |
|
2058 |
static TMemoryAttributes CanonicalMemoryAttributes(TMemoryAttributes aAttr); |
|
2059 |
||
2060 |
public: |
|
2061 |
/** |
|
2062 |
Class representing the resources and methods required to create temporary |
|
2063 |
mappings of physical memory pages in order to make them accessible to |
|
2064 |
software. |
|
2065 |
These mare required by various memory model functions and are created only |
|
2066 |
during system boot. |
|
2067 |
*/ |
|
2068 |
class TTempMapping |
|
2069 |
{ |
|
2070 |
public: |
|
2071 |
void Alloc(TUint aNumPages); |
|
2072 |
TLinAddr Map(TPhysAddr aPage, TUint aColour); |
|
2073 |
TLinAddr Map(TPhysAddr aPage, TUint aColour, TPte aBlankPte); |
|
2074 |
TLinAddr Map(TPhysAddr* aPages, TUint aCount, TUint aColour); |
|
2075 |
void Unmap(); |
|
2076 |
void Unmap(TBool aIMBRequired); |
|
2077 |
FORCE_INLINE TTempMapping() |
|
2078 |
: iSize(0) |
|
2079 |
{} |
|
2080 |
public: |
|
2081 |
TLinAddr iLinAddr; ///< Virtual address of the memory page mapped by #iPtePtr. |
|
2082 |
TPte* iPtePtr; ///< Pointer to first PTE allocated to this object. |
|
2083 |
private: |
|
2084 |
TPte iBlankPte; ///< PTE value to use for mapping pages, with the physical address component equal to zero. |
|
2085 |
TUint8 iSize; ///< Maximum number of pages which can be mapped in one go. |
|
2086 |
TUint8 iCount; ///< Number of pages currently mapped. |
|
2087 |
TUint8 iColour; ///< Colour of any pages mapped (acts as index from #iLinAddr and #iPtePtr). |
|
2088 |
TUint8 iSpare1; |
|
2089 |
private: |
|
2090 |
static TLinAddr iNextLinAddr; |
|
2091 |
}; |
|
2092 |
private: |
|
2093 |
enum { KNumTempMappingSlots=2 }; |
|
2094 |
/** |
|
2095 |
Temporary mappings used by various functions. |
|
2096 |
Use of these is serialised by the #RamAllocLock. |
|
2097 |
*/ |
|
2098 |
TTempMapping iTempMap[KNumTempMappingSlots]; |
|
2099 |
||
2100 |
TTempMapping iPhysMemSyncTemp; ///< Temporary mapping used for physical memory sync. |
|
2101 |
DMutex* iPhysMemSyncMutex; ///< Mutex used to serialise use of #iPhysMemSyncTemp. |
|
2102 |
||
2103 |
public: |
|
2104 |
TPte iTempPteCached; ///< PTE value for cached temporary mappings |
|
2105 |
TPte iTempPteUncached; ///< PTE value for uncached temporary mappings |
|
2106 |
TPte iTempPteCacheMaintenance; ///< PTE value for temporary mapping of cache maintenance |
|
2107 |
private: |
|
2108 |
DRamAllocator* iRamPageAllocator; ///< The RAM allocator used for managing free RAM pages. |
|
2109 |
const SRamZone* iRamZones; ///< A pointer to the RAM zone configuration from the variant. |
|
2110 |
TRamZoneCallback iRamZoneCallback; ///< Pointer to the RAM zone callback function. |
|
2111 |
Defrag* iDefrag; ///< The RAM defrag class implementation. |
|
2112 |
||
2113 |
/** |
|
2114 |
A counter incremented every time Mmu::PagesAllocated invalidates the L1 cache. |
|
2115 |
This is used as part of a cache maintenance optimisation. |
|
2116 |
*/ |
|
2117 |
TInt iCacheInvalidateCounter; |
|
2118 |
||
2119 |
/** |
|
2120 |
Number of free RAM pages which are cached at L1 and have |
|
2121 |
SPageInfo::CacheInvalidateCounter()==#iCacheInvalidateCounter. |
|
2122 |
This is used as part of a cache maintenance optimisation. |
|
2123 |
*/ |
|
2124 |
TInt iCacheInvalidatePageCount; |
|
2125 |
||
2126 |
public: |
|
2127 |
/** |
|
2128 |
Linked list of threads which have an active IPC alias. I.e. have called |
|
2129 |
DMemModelThread::Alias. Threads are linked by their DMemModelThread::iAliasLink member. |
|
2130 |
Updates to this list are protected by the #MmuLock. |
|
2131 |
*/ |
|
2132 |
SDblQue iAliasList; |
|
2133 |
||
2134 |
/** |
|
2135 |
The mutex used to protect RAM allocation. |
|
2136 |
This is the mutex #RamAllocLock operates on. |
|
2137 |
*/ |
|
2138 |
DMutex* iRamAllocatorMutex; |
|
2139 |
||
2140 |
private: |
|
2141 |
/** |
|
2142 |
Number of nested calls to RamAllocLock::Lock. |
|
2143 |
*/ |
|
2144 |
TUint iRamAllocLockCount; |
|
2145 |
||
2146 |
/** |
|
2147 |
Set by various memory allocation routines to indicate that a memory allocation |
|
2148 |
has failed. This is used by #RamAllocLock in its management of out-of-memory |
|
2149 |
notifications. |
|
2150 |
*/ |
|
2151 |
TBool iRamAllocFailed; |
|
2152 |
||
2153 |
/** |
|
2154 |
Saved value for #FreeRamInPages which is used by #RamAllocLock in its management |
|
2155 |
of memory level change notifications. |
|
2156 |
*/ |
|
2157 |
TUint iRamAllocInitialFreePages; |
|
2158 |
||
2159 |
friend class RamAllocLock; |
|
102
ef2a444a7410
Revision: 201018
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
2160 |
|
ef2a444a7410
Revision: 201018
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
2161 |
#ifdef FMM_VERIFY_RAM |
0 | 2162 |
private: |
2163 |
void VerifyRam(); |
|
102
ef2a444a7410
Revision: 201018
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
2164 |
#endif |
0 | 2165 |
}; |
2166 |
||
2167 |
/** |
|
2168 |
The single instance of class #Mmu. |
|
2169 |
*/ |
|
2170 |
extern Mmu TheMmu; |
|
2171 |
||
2172 |
||
2173 |
#ifndef _DEBUG |
|
2174 |
/** |
|
2175 |
Perform a page table walk to return the physical address of |
|
2176 |
the memory mapped at virtual address \a aLinAddr in the |
|
2177 |
address space \a aOsAsid. |
|
2178 |
||
2179 |
If the page table used was not one allocated by the kernel |
|
2180 |
then the results are unpredictable and may cause a system fault. |
|
2181 |
||
2182 |
@pre #MmuLock held. |
|
2183 |
*/ |
|
2184 |
FORCE_INLINE TPhysAddr Mmu::LinearToPhysical(TLinAddr aAddr, TInt aOsAsid) |
|
2185 |
{ |
|
2186 |
return Mmu::UncheckedLinearToPhysical(aAddr,aOsAsid); |
|
2187 |
} |
|
2188 |
#endif |
|
2189 |
||
2190 |
||
2191 |
__ASSERT_COMPILE((Mmu::EAllocFlagLast>>Mmu::EAllocWipeByteShift)==0); // make sure flags don't run into wipe byte value |
|
2192 |
||
2193 |
||
2194 |
/** |
|
2195 |
Create a temporary mapping of a physical page. |
|
2196 |
The RamAllocatorMutex must be held before this function is called and not released |
|
2197 |
until after UnmapTemp has been called. |
|
2198 |
||
2199 |
@param aPage The physical address of the page to be mapped. |
|
2200 |
@param aColour The 'colour' of the page if relevant. |
|
2201 |
@param aSlot Slot number to use, must be less than Mmu::KNumTempMappingSlots. |
|
2202 |
||
2203 |
@return The linear address of where the page has been mapped. |
|
2204 |
*/ |
|
2205 |
FORCE_INLINE TLinAddr Mmu::MapTemp(TPhysAddr aPage, TUint aColour, TUint aSlot) |
|
2206 |
{ |
|
2207 |
// Kern::Printf("Mmu::MapTemp(0x%08x,%d,%d)",aPage,aColour,aSlot); |
|
2208 |
__NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
|
2209 |
__NK_ASSERT_DEBUG(aSlot<KNumTempMappingSlots); |
|
2210 |
return iTempMap[aSlot].Map(aPage,aColour); |
|
2211 |
} |
|
2212 |
||
2213 |
||
2214 |
/** |
|
2215 |
Remove the temporary mapping created with MapTemp. |
|
2216 |
||
2217 |
@param aSlot Slot number which was used when temp mapping was made. |
|
2218 |
*/ |
|
2219 |
FORCE_INLINE void Mmu::UnmapTemp(TUint aSlot) |
|
2220 |
{ |
|
2221 |
// Kern::Printf("Mmu::UnmapTemp(%d)",aSlot); |
|
2222 |
__NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
|
2223 |
__NK_ASSERT_DEBUG(aSlot<KNumTempMappingSlots); |
|
2224 |
iTempMap[aSlot].Unmap(); |
|
2225 |
} |
|
2226 |
||
2227 |
||
2228 |
/** |
|
2229 |
Class representing the resources and arguments needed for various |
|
2230 |
memory pinning operations. |
|
2231 |
||
2232 |
The term 'replacement pages' in this documentation means excess |
|
2233 |
RAM pages which have been allocated to the demand paging pool so |
|
2234 |
that when a demand paged memory is pinned and removed the pool |
|
2235 |
does not become too small. |
|
2236 |
||
2237 |
Relacement pages are allocated with #AllocReplacementPages and their |
|
2238 |
number remembered in #iReplacementPages. When a memory pinning operation |
|
2239 |
removes pages from the paging pool it will reduce #iReplacementPages |
|
2240 |
accordingly. At the end of the pinning operation, #FreeReplacementPages |
|
2241 |
is used to free any unused replacement pages. |
|
2242 |
*/ |
|
2243 |
class TPinArgs |
|
2244 |
{ |
|
2245 |
public: |
|
2246 |
/** |
|
2247 |
Boolean value set to true if the requester of the pinning operation |
|
2248 |
will only read from the pinned memory, not write to it. |
|
2249 |
This is used as an optimisation to avoid unnecessarily marking |
|
2250 |
demand paged memory as dirty. |
|
2251 |
*/ |
|
2252 |
TBool iReadOnly; |
|
2253 |
||
2254 |
/** |
|
2255 |
Boolean value set to true if sufficient replacement pages already exists |
|
2256 |
in the demand paging pool and that #AllocReplacementPages does not need |
|
2257 |
to actually allocated any. |
|
2258 |
*/ |
|
2259 |
TBool iUseReserve; |
|
2260 |
||
2261 |
/** |
|
2262 |
The number of replacement pages allocated to this object by #AllocReplacementPages. |
|
2263 |
A value of #EUseReserveForPinReplacementPages indicates that #iUseReserve |
|
2264 |
was true, and there is sufficient RAM already reserved for the operation |
|
2265 |
being attempted. |
|
2266 |
*/ |
|
2267 |
TUint iReplacementPages; |
|
2268 |
||
2269 |
/** |
|
2270 |
The number of page tables which have been pinned during the course |
|
2271 |
of an operation. This is the number of valid entries written to |
|
2272 |
#iPinnedPageTables. |
|
2273 |
*/ |
|
2274 |
TUint iNumPinnedPageTables; |
|
2275 |
||
2276 |
/** |
|
2277 |
Pointer to the location to store the addresses of any page tables |
|
2278 |
which have been pinned during the course of an operation. This is |
|
2279 |
incremented as entries are added. |
|
2280 |
||
2281 |
The null-pointer indicates that page tables do not require pinning. |
|
2282 |
*/ |
|
2283 |
TPte** iPinnedPageTables; |
|
2284 |
||
2285 |
public: |
|
2286 |
/** |
|
2287 |
Construct an empty TPinArgs, one which owns no resources. |
|
2288 |
*/ |
|
2289 |
inline TPinArgs() |
|
2290 |
: iReadOnly(0), iUseReserve(0), iReplacementPages(0), iNumPinnedPageTables(0), iPinnedPageTables(0) |
|
2291 |
{ |
|
2292 |
} |
|
2293 |
||
2294 |
/** |
|
2295 |
Return true if this TPinArgs has at least \a aRequired number of |
|
2296 |
replacement pages allocated. |
|
2297 |
*/ |
|
2298 |
FORCE_INLINE TBool HaveSufficientPages(TUint aRequired) |
|
2299 |
{ |
|
2300 |
return iReplacementPages>=aRequired; // Note, EUseReserveForPinReplacementPages will always return true. |
|
2301 |
} |
|
2302 |
||
2303 |
/** |
|
2304 |
Allocate replacement pages for this TPinArgs so that it has at least |
|
2305 |
\a aNumPages. |
|
2306 |
*/ |
|
2307 |
TInt AllocReplacementPages(TUint aNumPages); |
|
2308 |
||
2309 |
/** |
|
2310 |
Free all replacement pages which this TPinArgs still owns. |
|
2311 |
*/ |
|
2312 |
void FreeReplacementPages(); |
|
2313 |
||
2314 |
#ifdef _DEBUG |
|
2315 |
~TPinArgs(); |
|
2316 |
#endif |
|
2317 |
||
2318 |
/** |
|
2319 |
Value used to indicate that replacement pages are to come |
|
2320 |
from an already allocated reserve and don't need specially |
|
2321 |
allocating. |
|
2322 |
*/ |
|
2323 |
enum { EUseReserveForPinReplacementPages = 0xffffffffu }; |
|
2324 |
}; |
|
2325 |
||
2326 |
||
2327 |
#ifdef _DEBUG |
|
2328 |
inline TPinArgs::~TPinArgs() |
|
2329 |
{ |
|
2330 |
__NK_ASSERT_DEBUG(!iReplacementPages); |
|
2331 |
} |
|
2332 |
#endif |
|
2333 |
||
2334 |
||
2335 |
/** |
|
2336 |
Enumeration used in various RestrictPages APIs to specify the type of restrictions to apply. |
|
2337 |
*/ |
|
2338 |
enum TRestrictPagesType |
|
2339 |
{ |
|
2340 |
/** |
|
2341 |
Make all mappings of page not accessible. |
|
2342 |
Pinned mappings will veto this operation. |
|
2343 |
*/ |
|
2344 |
ERestrictPagesNoAccess = 1, |
|
2345 |
||
2346 |
/** |
|
2347 |
Demand paged memory being made 'old'. |
|
2348 |
Specific case of ERestrictPagesNoAccess. |
|
2349 |
*/ |
|
2350 |
ERestrictPagesNoAccessForOldPage = ERestrictPagesNoAccess|0x80000000, |
|
2351 |
||
2352 |
/** |
|
2353 |
For page moving pinned mappings always veto the moving operation. |
|
2354 |
*/ |
|
2355 |
ERestrictPagesForMovingFlag = 0x40000000, |
|
2356 |
||
2357 |
/** |
|
2358 |
Movable memory being made no access whilst its being copied. |
|
2359 |
Special case of ERestrictPagesNoAccess where pinned mappings always veto |
|
2360 |
this operation even if they are read-only mappings. |
|
2361 |
*/ |
|
2362 |
ERestrictPagesNoAccessForMoving = ERestrictPagesNoAccess|ERestrictPagesForMovingFlag, |
|
2363 |
}; |
|
2364 |
||
2365 |
#include "xmmu.h" |
|
2366 |
||
2367 |
#endif |