0
|
1 |
// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
|
|
2 |
// All rights reserved.
|
|
3 |
// This component and the accompanying materials are made available
|
|
4 |
// under the terms of the License "Eclipse Public License v1.0"
|
|
5 |
// which accompanies this distribution, and is available
|
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
|
|
7 |
//
|
|
8 |
// Initial Contributors:
|
|
9 |
// Nokia Corporation - initial contribution.
|
|
10 |
//
|
|
11 |
// Contributors:
|
|
12 |
//
|
|
13 |
// Description:
|
|
14 |
//
|
|
15 |
|
|
16 |
/**
|
|
17 |
@file
|
|
18 |
@internalComponent
|
|
19 |
*/
|
|
20 |
|
|
21 |
#ifndef MMAPPING_H
|
|
22 |
#define MMAPPING_H
|
|
23 |
|
|
24 |
#include "mrefcntobj.h"
|
|
25 |
#include "mmappinglist.h"
|
|
26 |
#include "mpagearray.h"
|
|
27 |
|
|
28 |
|
|
29 |
|
|
30 |
/**
|
|
31 |
Base class for memory mappings.
|
|
32 |
|
|
33 |
This provides the methods for linking a mapping to a memory object
|
|
34 |
as well as the interface for updating the MMU page tables associated
|
|
35 |
with a mapping when the memory state changes.
|
|
36 |
*/
|
|
37 |
class DMemoryMappingBase : public DReferenceCountedObject
|
|
38 |
{
|
|
39 |
private:
|
|
40 |
/**
|
|
41 |
Memory object to which this mapping is currently attached.
|
|
42 |
Updates to the are protected by the MmuLock.
|
|
43 |
*/
|
|
44 |
DMemoryObject* iMemory;
|
|
45 |
|
|
46 |
public:
|
|
47 |
/**
|
|
48 |
Link used to maintain list of mappings attached to a memory object.
|
|
49 |
*/
|
|
50 |
TMappingListLink iLink;
|
|
51 |
|
|
52 |
/**
|
|
53 |
Offset, in page units, within the memory object's memory for start of this mapping.
|
|
54 |
*/
|
|
55 |
TUint iStartIndex;
|
|
56 |
|
|
57 |
/**
|
|
58 |
Size of this mapping, in page units.
|
|
59 |
*/
|
|
60 |
TUint iSizeInPages;
|
|
61 |
|
|
62 |
private:
|
|
63 |
/**
|
|
64 |
Instance count which is incremented every time a mapping is attached to a memory object.
|
|
65 |
When code is manipulating mappings, the instance count is used to detect that a
|
|
66 |
mapping has been reused and that the operation it is performing is no long needed.
|
|
67 |
*/
|
|
68 |
TUint iMapInstanceCount;
|
|
69 |
|
|
70 |
public:
|
|
71 |
|
|
72 |
/**
|
|
73 |
Bit flags stored in #Flags giving various state and attributes of the mapping.
|
|
74 |
*/
|
|
75 |
enum TFlags
|
|
76 |
{
|
|
77 |
/**
|
|
78 |
Flag set during object construction to indicate that this mapping is of
|
|
79 |
class #DCoarseMapping.
|
|
80 |
*/
|
|
81 |
ECoarseMapping = 1<<0,
|
|
82 |
|
|
83 |
/**
|
|
84 |
Flag set during object construction to indicate that this mapping will pin
|
|
85 |
any memory pages it maps. This may not be used with coarse memory mappings.
|
|
86 |
*/
|
|
87 |
EPinned = 1<<1,
|
|
88 |
|
|
89 |
/**
|
|
90 |
Pages have already been reserved for pinning, so when this mapping is attached
|
|
91 |
to a memory object no additional pages need to be reserved. Pre-reserving pages
|
|
92 |
is used to prevent the possibility of failing to pin due to an out of memory
|
|
93 |
condition. It is essential that the users of these mappings ensure that there
|
|
94 |
are enough reserved pages in the paging pool to meet the maximum mapping size
|
|
95 |
used.
|
|
96 |
*/
|
|
97 |
EPinningPagesReserved = 1<<2,
|
|
98 |
|
|
99 |
/**
|
|
100 |
Pages have been successfully pinned by this mapping. This is set after demand
|
|
101 |
paged memory has been succeeded pinned and is used to indicate that the pages
|
|
102 |
need unpinning again when the mapping is later unmapped.
|
|
103 |
*/
|
|
104 |
EPagesPinned = 1<<3,
|
|
105 |
|
|
106 |
/**
|
|
107 |
Flag set during object construction to indicate that MMU page tables are to
|
|
108 |
be permanently allocated for use by this mapping. Normally, page tables are
|
|
109 |
allocated as needed to map memory which can result in out-of-memory errors
|
|
110 |
when mapping memory pages.
|
|
111 |
*/
|
|
112 |
EPermanentPageTables = 1<<4,
|
|
113 |
|
|
114 |
/**
|
|
115 |
Permanent page tables have been successfully been allocated for this mapping.
|
|
116 |
This flag is used to track allocation so they can be released when the mapping
|
|
117 |
is destroyed.
|
|
118 |
*/
|
|
119 |
EPageTablesAllocated = 1<<5,
|
|
120 |
|
|
121 |
/**
|
|
122 |
For pinned mappings (EPinned) this flag is set whenever the mapping prevents
|
|
123 |
any pages of memory from being fully decommitted from a memory object. When a
|
|
124 |
mapping is finally unmapped from the memory object this flag is checked, and,
|
|
125 |
if set, further cleanup of the decommitted pages triggered.
|
|
126 |
*/
|
|
127 |
EPageUnmapVetoed = 1<<6,
|
|
128 |
|
|
129 |
/**
|
|
130 |
Mapping is being, or has been, detached from a memory object.
|
|
131 |
When set, operations on the mapping should act as though the mapping is no
|
|
132 |
longer attached to a memory object. Specifically, no further pages of memory
|
|
133 |
should be mapped into this mapping.
|
|
134 |
|
|
135 |
This flag is only set when the MmuLock is held.
|
|
136 |
*/
|
|
137 |
EDetaching = 1<<7,
|
|
138 |
|
|
139 |
/**
|
|
140 |
This mapping is a physical pinning mapping. The pages it pins
|
|
141 |
cannot be paged out or moved.
|
|
142 |
|
|
143 |
This flag is set when DPhysicalPinMapping objects are created.
|
|
144 |
*/
|
|
145 |
EPhysicalPinningMapping = 1<<8,
|
|
146 |
|
|
147 |
/**
|
|
148 |
Flag set during object construction to indicate that this mapping is of
|
|
149 |
class #DLargeMapping.
|
|
150 |
|
|
151 |
Note that #DLargeMapping is derived from #DCoarseMapping, therefore presence of this flag
|
|
152 |
implies presence of #ECoarseMapping as well.
|
|
153 |
*/
|
|
154 |
ELargeMapping = 1<<9,
|
|
155 |
};
|
|
156 |
|
|
157 |
/**
|
|
158 |
Bitmask of values from enum #TPteType which will be used to calculate
|
|
159 |
the correct attributes for any page table entries this mapping uses.
|
|
160 |
*/
|
|
161 |
FORCE_INLINE TUint8& PteType()
|
|
162 |
{ return iLink.iSpare1; }
|
|
163 |
|
|
164 |
/**
|
|
165 |
Bitmask of values from enum #TFlags.
|
|
166 |
The flags 16 bits and are stored in iLink.iSpare2 and iLink.iSpare3.
|
|
167 |
*/
|
|
168 |
FORCE_INLINE TUint16& Flags()
|
|
169 |
{ return (TUint16&)iLink.iSpare2; }
|
|
170 |
|
|
171 |
public:
|
|
172 |
/**
|
|
173 |
Return the memory object to which this mapping is currently attached.
|
|
174 |
|
|
175 |
@pre MmuLock is held. (If aNoCheck==false)
|
|
176 |
*/
|
|
177 |
FORCE_INLINE DMemoryObject* Memory(TBool aNoCheck=false)
|
|
178 |
{
|
|
179 |
if(!aNoCheck)
|
|
180 |
__NK_ASSERT_DEBUG(MmuLock::IsHeld());
|
|
181 |
return iMemory;
|
|
182 |
}
|
|
183 |
|
|
184 |
/**
|
|
185 |
Return true if the mapping is currently attached to a memory object.
|
|
186 |
*/
|
|
187 |
FORCE_INLINE TBool IsAttached()
|
|
188 |
{ return iLink.IsLinked(); }
|
|
189 |
|
|
190 |
/**
|
|
191 |
Return true if the mapping is being, or has been, detached from a memory object.
|
|
192 |
The mapping may or may not still be attached to a memory object, i.e. #IsAttached
|
|
193 |
is indeterminate.
|
|
194 |
*/
|
|
195 |
FORCE_INLINE TBool BeingDetached()
|
|
196 |
{ return Flags()&EDetaching; }
|
|
197 |
|
|
198 |
/**
|
|
199 |
Return the mapping instance count.
|
|
200 |
@see #iMapInstanceCount.
|
|
201 |
*/
|
|
202 |
FORCE_INLINE TUint MapInstanceCount()
|
|
203 |
{ return iMapInstanceCount; }
|
|
204 |
|
|
205 |
/**
|
|
206 |
Return true if this mapping provides read only access to memory.
|
|
207 |
*/
|
|
208 |
FORCE_INLINE TBool IsReadOnly()
|
|
209 |
{ return !(PteType()&EPteTypeWritable); }
|
|
210 |
|
|
211 |
#ifdef MMU_SUPPORTS_EXECUTE_NEVER
|
|
212 |
/**
|
|
213 |
Return true if this mapping provides access to memory which allows
|
|
214 |
code to be executed from it.
|
|
215 |
*/
|
|
216 |
FORCE_INLINE TBool IsExecutable()
|
|
217 |
{ return (PteType()&EPteTypeExecutable); }
|
|
218 |
#endif
|
|
219 |
|
|
220 |
/**
|
|
221 |
Return true if this is a coarse mapping, in other words it is an instance of #DCoarseMapping or
|
|
222 |
#DLargeMapping.
|
|
223 |
*/
|
|
224 |
FORCE_INLINE TBool IsCoarse()
|
|
225 |
{ return Flags()&ECoarseMapping; }
|
|
226 |
|
|
227 |
/**
|
|
228 |
Return true if this mapping is a large mapping, in other words an instance of #DLargeMapping.
|
|
229 |
|
|
230 |
Note that all large mappings are also coarse mappings.
|
|
231 |
*/
|
|
232 |
FORCE_INLINE TBool IsLarge()
|
|
233 |
{ return Flags()&ELargeMapping; }
|
|
234 |
|
|
235 |
/**
|
|
236 |
Return true if this mapping pins the memory it maps.
|
|
237 |
*/
|
|
238 |
FORCE_INLINE TBool IsPinned()
|
|
239 |
{ return Flags()&EPinned; }
|
|
240 |
|
|
241 |
/**
|
|
242 |
Return true if this mapping physically pins the memory it maps.
|
|
243 |
*/
|
|
244 |
FORCE_INLINE TBool IsPhysicalPinning()
|
|
245 |
{ return Flags()&EPhysicalPinningMapping; }
|
|
246 |
|
|
247 |
/**
|
|
248 |
Return the access permissions which this mapping uses to maps memory.
|
|
249 |
*/
|
|
250 |
FORCE_INLINE TMappingPermissions Permissions()
|
|
251 |
{ return Mmu::PermissionsFromPteType(PteType()); }
|
|
252 |
|
|
253 |
/**
|
|
254 |
Link this mapping to a memory object.
|
|
255 |
|
|
256 |
This is called by the memory object during processing of #Attach.
|
|
257 |
|
|
258 |
@param aMemory The memory object the mapping is being attached to.
|
|
259 |
@param aMappingList The list to add this mapping to.
|
|
260 |
|
|
261 |
@pre MmuLock is held.
|
|
262 |
@pre Mapping list lock is held.
|
|
263 |
*/
|
|
264 |
void LinkToMemory(DMemoryObject* aMemory, TMappingList& aMappingList);
|
|
265 |
|
|
266 |
/**
|
|
267 |
Unlink this mapping from the memory object it was previously linked to with
|
|
268 |
#LinkToMemory.
|
|
269 |
|
|
270 |
This is called by the memory object during processing of #Detach.
|
|
271 |
|
|
272 |
@param aMappingList The list that the mapping appears on.
|
|
273 |
*/
|
|
274 |
void UnlinkFromMemory(TMappingList& aMappingList);
|
|
275 |
|
|
276 |
protected:
|
|
277 |
/**
|
|
278 |
@param aType Initial value for #Flags.
|
|
279 |
*/
|
|
280 |
DMemoryMappingBase(TUint aType);
|
|
281 |
|
|
282 |
/**
|
|
283 |
Attach this mapping to a memory object so that it maps a specified region of its memory.
|
|
284 |
|
|
285 |
@param aMemory The memory object.
|
|
286 |
@param aIndex The page index of the first page of memory to be mapped by the mapping.
|
|
287 |
@param aCount The number of pages of memory to be mapped by the mapping.
|
|
288 |
|
|
289 |
@return KErrNone if successful, otherwise one of the system wide error codes.
|
|
290 |
*/
|
|
291 |
TInt Attach(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
|
|
292 |
|
|
293 |
/**
|
|
294 |
Remove this mapping from the memory object it was previously attached to by #Attach.
|
|
295 |
*/
|
|
296 |
void Detach();
|
|
297 |
|
|
298 |
public:
|
|
299 |
/**
|
|
300 |
Update the page table entries corresponding to this mapping to add entries for
|
|
301 |
a specified set of memory pages.
|
|
302 |
|
|
303 |
This method is called by DMemoryObject::MapPages to update each mapping attached
|
|
304 |
to a memory object whenever new pages of memory are added. However, it won't be
|
|
305 |
called for any mapping with the #EPinned attribute as such mappings are unchanging.
|
|
306 |
|
|
307 |
@param aPages An RPageArray::TIter which refers to a range of pages
|
|
308 |
in a memory object. This has been clipped to fit within
|
|
309 |
the range of pages mapped by this mapping.
|
|
310 |
Only array entries which have state RPageArray::ECommitted
|
|
311 |
should be mapped into the mapping's page tables.
|
|
312 |
|
|
313 |
@param aMapInstanceCount The instance of this mapping which is to be updated.
|
|
314 |
Whenever this no longer matches the current #MapInstanceCount
|
|
315 |
the function must not update any more of the mapping's
|
|
316 |
page table entries, (but must still return KErrNone).
|
|
317 |
|
|
318 |
@return KErrNone if successful, otherwise one of the system wide error codes.
|
|
319 |
*/
|
|
320 |
virtual TInt MapPages(RPageArray::TIter aPages, TUint aMapInstanceCount) =0;
|
|
321 |
|
|
322 |
/**
|
|
323 |
Update the page table entries corresponding to this mapping to remove entries for
|
|
324 |
a specified set of memory pages.
|
|
325 |
|
|
326 |
This method is called by DMemoryObject::UnmapPages to update each mapping attached
|
|
327 |
to a memory object whenever pages of memory are removed.
|
|
328 |
|
|
329 |
@param aPages An RPageArray::TIter which refers to a range of pages
|
|
330 |
in a memory object. This has been clipped to fit within
|
|
331 |
the range of pages mapped by this mapping.
|
|
332 |
Only array entries which return true for
|
|
333 |
RPageArray::TargetStateIsDecommitted should be unmapped
|
|
334 |
from the mapping's page tables.
|
|
335 |
|
|
336 |
@param aMapInstanceCount The instance of this mapping which is to be updated.
|
|
337 |
Whenever this no longer matches the current #MapInstanceCount
|
|
338 |
the function must not update any more of the mapping's
|
|
339 |
page table entries.
|
|
340 |
*/
|
|
341 |
virtual void UnmapPages(RPageArray::TIter aPages, TUint aMapInstanceCount) =0;
|
|
342 |
|
|
343 |
/**
|
|
344 |
Update the page table entry corresponding to this mapping to update an entry for a specified
|
|
345 |
page that has just been moved or shadowed.
|
|
346 |
|
|
347 |
@param aPages The page array entry of the page in a memory object.
|
|
348 |
Only array entries which have a target state of
|
|
349 |
RPageArray::ECommitted should be mapped into the
|
|
350 |
mapping's page tables.
|
|
351 |
|
|
352 |
@param aIndex The index of the page in the memory object.
|
|
353 |
|
|
354 |
@param aMapInstanceCount The instance of this mapping which is to be updated.
|
|
355 |
Whenever this no longer matches the current #MapInstanceCount
|
|
356 |
the function must not update any more of the mapping's
|
|
357 |
page table entries, (but must still return KErrNone).
|
|
358 |
|
|
359 |
@param aInvalidateTLB Set to ETrue when the TLB entries associated with this page
|
|
360 |
should be invalidated. This must be done when there is
|
|
361 |
already a valid pte for this page, i.e. if the page is still
|
|
362 |
mapped.
|
|
363 |
*/
|
|
364 |
virtual void RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB)=0;
|
|
365 |
|
|
366 |
/**
|
|
367 |
Update the page table entries corresponding to this mapping to apply access restrictions
|
|
368 |
to a specified set of memory pages.
|
|
369 |
|
|
370 |
This method is called by DMemoryObject::RestrictPages to update each mapping attached
|
|
371 |
to a memory object whenever pages of memory are restricted.
|
|
372 |
|
|
373 |
@param aPages An RPageArray::TIter which refers to a range of pages
|
|
374 |
in a memory object. This has been clipped to fit within
|
|
375 |
the range of pages mapped by this mapping.
|
|
376 |
Only array entries which return true for
|
|
377 |
RPageArray::TargetStateIsDecommitted should be unmapped
|
|
378 |
from the mapping's page tables.
|
|
379 |
|
|
380 |
@param aMapInstanceCount The instance of this mapping which is to be updated.
|
|
381 |
Whenever this no longer matches the current #MapInstanceCount
|
|
382 |
the function must not update any more of the mapping's
|
|
383 |
page table entries.
|
|
384 |
*/
|
|
385 |
virtual void RestrictPagesNA(RPageArray::TIter aPages, TUint aMapInstanceCount) =0;
|
|
386 |
|
|
387 |
/**
|
|
388 |
Update the page table entries corresponding to this mapping to add entries for
|
|
389 |
a specified set of demand paged memory pages following a 'page in' or memory
|
|
390 |
pinning operation.
|
|
391 |
|
|
392 |
@param aPages An RPageArray::TIter which refers to a range of pages
|
|
393 |
in a memory object. This will be within the range of pages
|
|
394 |
mapped by this mapping.
|
|
395 |
Only array entries which have state RPageArray::ECommitted
|
|
396 |
should be mapped into the mapping's page tables.
|
|
397 |
|
|
398 |
@param aPinArgs The resources required to pin any page tables the mapping uses.
|
|
399 |
Page table must be pinned if \a aPinArgs.iPinnedPageTables is
|
|
400 |
not the null pointer, in which case this the virtual address
|
|
401 |
of the pinned must be stored in the array this points to.
|
|
402 |
\a aPinArgs.iReadOnly is true if write access permissions
|
|
403 |
are not needed.
|
|
404 |
|
|
405 |
@return KErrNone if successful, otherwise one of the system wide error codes.
|
|
406 |
*/
|
|
407 |
virtual TInt PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount) =0;
|
|
408 |
|
|
409 |
|
|
410 |
/**
|
|
411 |
Update the page table entry corresponding to this mapping to add an entry for
|
|
412 |
a specified page which is in the process of being moved.
|
|
413 |
|
|
414 |
@param aPageArrayPtr The page array entry for the page to be mapped which must be
|
|
415 |
within this mapping range of pages.
|
|
416 |
Only array entries which have a target state of
|
|
417 |
RPageArray::ECommitted should be mapped into the mapping's
|
|
418 |
page tables.
|
|
419 |
|
|
420 |
@param aIndex The index of the page.
|
|
421 |
|
|
422 |
@return ETrue if successful, EFalse otherwise.
|
|
423 |
*/
|
|
424 |
virtual TBool MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex)=0;
|
|
425 |
|
|
426 |
|
|
427 |
/**
|
|
428 |
In debug builds, dump information about this mapping to the kernel trace port.
|
|
429 |
*/
|
|
430 |
virtual void Dump();
|
|
431 |
|
|
432 |
private:
|
|
433 |
/**
|
|
434 |
Update this mapping's MMU data structures to map all pages of memory
|
|
435 |
currently committed to the memory object (#iMemory) in the region covered
|
|
436 |
by this mapping.
|
|
437 |
|
|
438 |
This method is called by #Attach after the mapping has been linked
|
|
439 |
into the memory object.
|
|
440 |
|
|
441 |
@return KErrNone if successful, otherwise one of the system wide error codes.
|
|
442 |
*/
|
|
443 |
virtual TInt DoMap() =0;
|
|
444 |
|
|
445 |
/**
|
|
446 |
Update this mapping's MMU data structures to unmap all pages of memory.
|
|
447 |
|
|
448 |
This method is called by #Detach before the mapping has been unlinked
|
|
449 |
from the memory object but after the #EDetaching flag has been set.
|
|
450 |
*/
|
|
451 |
virtual void DoUnmap() =0;
|
|
452 |
|
|
453 |
protected:
|
|
454 |
/**
|
|
455 |
For pinned mapping, this virtual method is called by #Attach in order to pin
|
|
456 |
pages of memory if required. This is called after the mapping has been linked
|
|
457 |
into the memory object but before #DoMap.
|
|
458 |
|
|
459 |
The default implementation of this method simply calls DMemoryManager::Pin.
|
|
460 |
|
|
461 |
@param aPinArgs The resources to use for pinning. This has sufficient replacement
|
|
462 |
pages allocated to pin every page the mapping covers, and the
|
|
463 |
value of \a aPinArgs.iReadOnly has been set to correspond to the
|
|
464 |
mappings access permissions.
|
|
465 |
|
|
466 |
@return KErrNone if successful, otherwise one of the system wide error codes.
|
|
467 |
*/
|
|
468 |
virtual TInt DoPin(TPinArgs& aPinArgs);
|
|
469 |
|
|
470 |
/**
|
|
471 |
For pinned mapping, this virtual method is called by #Detach in order to unpin
|
|
472 |
pages of memory if required. This is called before the mapping has been unlinked
|
|
473 |
from the memory object but after #DoUnmap.
|
|
474 |
|
|
475 |
The default implementation of this method simply calls DMemoryManager::Unpin.
|
|
476 |
|
|
477 |
@param aPinArgs The resources used for pinning. The replacement pages allocated
|
|
478 |
to this will be increased for each page which was became completely
|
|
479 |
unpinned.
|
|
480 |
*/
|
|
481 |
virtual void DoUnpin(TPinArgs& aPinArgs);
|
|
482 |
};
|
|
483 |
|
|
484 |
|
|
485 |
|
|
486 |
/**
|
|
487 |
Base class for memory mappings which map memory contents into a address space.
|
|
488 |
|
|
489 |
This provides methods for allocating virtual memory and holds the attributes needed
|
|
490 |
for MMU page table entries.
|
|
491 |
*/
|
|
492 |
class DMemoryMapping : public DMemoryMappingBase
|
|
493 |
{
|
|
494 |
protected:
|
|
495 |
/**
|
|
496 |
The page directory entry (PDE) value for use when mapping this mapping's page tables.
|
|
497 |
This value has the physical address component being zero, so a page table's physical
|
|
498 |
address can be simply ORed in.
|
|
499 |
|
|
500 |
This could potentially be removed (see DMemoryMapping::PdeType()).
|
|
501 |
*/
|
|
502 |
TPde iBlankPde;
|
|
503 |
|
|
504 |
/**
|
|
505 |
The page table entry (PTE) value for use when mapping pages into this mapping.
|
|
506 |
This value has the physical address component being zero, so a page's physical
|
|
507 |
address can be simply ORed in.
|
|
508 |
*/
|
|
509 |
TPte iBlankPte;
|
|
510 |
|
|
511 |
/**
|
|
512 |
Start of the virtual address region allocated for use by this mapping
|
|
513 |
ORed with the OS ASID of the address space this lies in.
|
|
514 |
|
|
515 |
Note, the address at which memory is mapped (#iLinAddrAndOsAsid) may be different
|
|
516 |
to this allocated address due to page colouring restrictions.
|
|
517 |
|
|
518 |
@see iAllocatedSize
|
|
519 |
*/
|
|
520 |
TLinAddr iAllocatedLinAddrAndOsAsid;
|
|
521 |
|
|
522 |
/**
|
|
523 |
Size of virtual address region memory allocated for use by this mapping.
|
|
524 |
|
|
525 |
@see iAllocatedLinAddrAndOsAsid
|
|
526 |
*/
|
|
527 |
TUint iAllocatedSize;
|
|
528 |
|
|
529 |
private:
|
|
530 |
/**
|
|
531 |
Start of the virtual address region that this mapping is currently
|
|
532 |
mapping memory at, ORed with the OS ASID of the address space this lies in.
|
|
533 |
|
|
534 |
This value is set by #Map which is called from #Attach when the mapping
|
|
535 |
is attached to a memory object. The address used may be different to
|
|
536 |
#iAllocatedLinAddrAndOsAsid due to page colouring restrictions.
|
|
537 |
|
|
538 |
The size of the region mapped is #iSizeInPages.
|
|
539 |
|
|
540 |
Note, access to this value is through #Base() and #OsAsid().
|
|
541 |
*/
|
|
542 |
TLinAddr iLinAddrAndOsAsid;
|
|
543 |
|
|
544 |
public:
|
|
545 |
/**
|
|
546 |
Second phase constructor.
|
|
547 |
|
|
548 |
The main function of this is to allocate a virtual address region for the mapping
|
|
549 |
and to add it to an address space.
|
|
550 |
|
|
551 |
@param aAttributes The attributes of the memory which this mapping is intended to map.
|
|
552 |
This is only needed to setup #PdeType which is required for correct
|
|
553 |
virtual address allocation so in practice the only relevant attribute
|
|
554 |
is to set EMemoryAttributeUseECC if required, else use
|
|
555 |
EMemoryAttributeStandard.
|
|
556 |
|
|
557 |
@param aFlags A combination of the options from enum TMappingCreateFlags.
|
|
558 |
|
|
559 |
@param aOsAsid The OS ASID of the address space the mapping is to be added to.
|
|
560 |
|
|
561 |
@param aAddr The virtual address to use for the mapping, or zero if this is
|
|
562 |
to be allocated by this function.
|
|
563 |
|
|
564 |
@param aSize The maximum size of memory, in bytes, this mapping will be used to
|
|
565 |
map. This determines the size of the virtual address region the
|
|
566 |
mapping will use.
|
|
567 |
|
|
568 |
@param aColourOffset The byte offset within a memory object's memory which this mapping
|
|
569 |
is to start. This is used to adjust virtual memory allocation to
|
|
570 |
meet page colouring restrictions. If this value is not known leave
|
|
571 |
this argument unspecified; however, it must be specified if \a aAddr
|
|
572 |
is specified.
|
|
573 |
|
|
574 |
@return KErrNone if successful, otherwise one of the system wide error codes.
|
|
575 |
*/
|
|
576 |
TInt Construct(TMemoryAttributes aAttributes, TMappingCreateFlags aFlags, TInt aOsAsid, TLinAddr aAddr, TUint aSize, TLinAddr aColourOffset=~(TLinAddr)0);
|
|
577 |
|
|
578 |
/**
|
|
579 |
Add this mapping to a memory object so that it maps a specified region of its memory.
|
|
580 |
|
|
581 |
Most of the action of this method is performed by #Attach.
|
|
582 |
|
|
583 |
@param aMemory The memory object.
|
|
584 |
@param aIndex The page index of the first page of memory to be mapped by the mapping.
|
|
585 |
@param aCount The number of pages of memory to be mapped by the mapping.
|
|
586 |
@param aPermissions The memory access permissions to apply to the mapping.
|
|
587 |
|
|
588 |
@return KErrNone if successful, otherwise one of the system wide error codes.
|
|
589 |
*/
|
|
590 |
TInt Map(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions);
|
|
591 |
|
|
592 |
/**
|
|
593 |
Remove this mapping from the memory object it was previously added to by #Map.
|
|
594 |
|
|
595 |
Most of the action of this method is performed by #Detach.
|
|
596 |
*/
|
|
597 |
void Unmap();
|
|
598 |
|
|
599 |
/**
|
|
600 |
Return the OS ASID for the address space that this mapping is currently mapping memory in.
|
|
601 |
*/
|
|
602 |
FORCE_INLINE TInt OsAsid()
|
|
603 |
{
|
|
604 |
__NK_ASSERT_DEBUG(iLinAddrAndOsAsid); // check mapping has been added to an address space
|
|
605 |
return iLinAddrAndOsAsid&KPageMask;
|
|
606 |
}
|
|
607 |
|
|
608 |
/**
|
|
609 |
Return starting virtual address that this mapping is currently mapping memory at.
|
|
610 |
The size of the region mapped is #iSizeInPages.
|
|
611 |
*/
|
|
612 |
FORCE_INLINE TLinAddr Base()
|
|
613 |
{
|
|
614 |
__NK_ASSERT_DEBUG(iLinAddrAndOsAsid); // check mapping has been added to an address space
|
|
615 |
return iLinAddrAndOsAsid&~KPageMask;
|
|
616 |
}
|
|
617 |
|
|
618 |
/**
|
|
619 |
Return #Base()|#OsAsid()
|
|
620 |
*/
|
|
621 |
FORCE_INLINE TLinAddr LinAddrAndOsAsid()
|
|
622 |
{
|
|
623 |
__NK_ASSERT_DEBUG(iLinAddrAndOsAsid); // check mapping has been added to an address space
|
|
624 |
return iLinAddrAndOsAsid;
|
|
625 |
}
|
|
626 |
|
|
627 |
/**
|
|
628 |
Return #iBlankPde.
|
|
629 |
*/
|
|
630 |
FORCE_INLINE TPde BlankPde()
|
|
631 |
{
|
|
632 |
return iBlankPde;
|
|
633 |
}
|
|
634 |
|
|
635 |
/**
|
|
636 |
Emit BTrace traces identifying this mappings virtual address usage.
|
|
637 |
*/
|
|
638 |
void BTraceCreate();
|
|
639 |
|
|
640 |
/**
|
|
641 |
In debug builds, dump information about this mapping to the kernel trace port.
|
|
642 |
*/
|
|
643 |
virtual void Dump();
|
|
644 |
|
|
645 |
/**
|
|
646 |
Function to return a page table pointer for the specified linear address and
|
|
647 |
index to this mapping.
|
|
648 |
|
|
649 |
This is called by #Epoc::MovePhysicalPage when moving page table or page table info pages.
|
|
650 |
|
|
651 |
@param aLinAddr The linear address to find the page table entry for.
|
|
652 |
@param aMemoryIndex The memory object index of the page to find the page
|
|
653 |
table entry for.
|
|
654 |
|
|
655 |
@return A pointer to the page table entry, if the page table entry couldn't
|
|
656 |
be found this will be NULL
|
|
657 |
*/
|
|
658 |
virtual TPte* FindPageTable(TLinAddr aLinAddr, TUint aMemoryIndex)=0;
|
|
659 |
|
|
660 |
protected:
|
|
661 |
/**
|
|
662 |
@param aType Initial value for #Flags.
|
|
663 |
*/
|
|
664 |
DMemoryMapping(TUint aType);
|
|
665 |
|
|
666 |
/**
|
|
667 |
This destructor removes the mapping from any address space it was added to and
|
|
668 |
frees any virtual addresses allocated to it.
|
|
669 |
*/
|
|
670 |
~DMemoryMapping();
|
|
671 |
|
|
672 |
/**
|
|
673 |
Allocatate virtual addresses for this mapping to use.
|
|
674 |
This is called from #Construct and the arguments to this function are the same.
|
|
675 |
|
|
676 |
On success, iAllocatedLinAddrAndOsAsid and iAllocatedSize will be initialised.
|
|
677 |
*/
|
|
678 |
virtual TInt AllocateVirtualMemory(TMappingCreateFlags aFlags, TInt aOsAsid, TLinAddr aAddr, TUint aSize, TLinAddr aColourOffset);
|
|
679 |
|
|
680 |
/**
|
|
681 |
Free the virtual addresses allocated to this mapping with AllocateVirtualMemory.
|
|
682 |
*/
|
|
683 |
virtual void FreeVirtualMemory();
|
|
684 |
};
|
|
685 |
|
|
686 |
|
|
687 |
|
|
688 |
/**
|
|
689 |
A memory mapping to map a 'chunk' aligned region of a DCoarseMemory object into
|
|
690 |
an address space. A 'chunk' is the size of memory mapped by a whole MMU page table
|
|
691 |
and is #KChunkSize bytes.
|
|
692 |
|
|
693 |
These mappings make use of page tables owned by a DCoarseMemory and when
|
|
694 |
they are attached to a memory object they are linked into
|
|
695 |
DCoarseMemory::DPageTables::iMappings not DCoarseMemory::iMappings.
|
|
696 |
*/
|
|
697 |
class DCoarseMapping : public DMemoryMapping
|
|
698 |
{
|
|
699 |
public:
|
|
700 |
DCoarseMapping();
|
|
701 |
~DCoarseMapping();
|
|
702 |
|
|
703 |
protected:
|
|
704 |
DCoarseMapping(TUint aFlags);
|
|
705 |
|
|
706 |
protected:
|
|
707 |
// from DMemoryMappingBase...
|
|
708 |
virtual TInt MapPages(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Not implemented. Faults in debug builds.
|
|
709 |
virtual void UnmapPages(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Not implemented. Faults in debug builds.
|
|
710 |
virtual void RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB); ///< Not implemented. Faults in debug builds.
|
|
711 |
virtual void RestrictPagesNA(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Not implemented. Faults in debug builds.
|
|
712 |
virtual TInt PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount);
|
|
713 |
virtual TBool MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex);
|
|
714 |
virtual TInt DoMap();
|
|
715 |
virtual void DoUnmap();
|
|
716 |
|
|
717 |
// from DMemoryMapping...
|
|
718 |
virtual TPte* FindPageTable(TLinAddr aLinAddr, TUint aMemoryIndex);
|
|
719 |
};
|
|
720 |
|
|
721 |
|
|
722 |
|
|
723 |
/**
|
|
724 |
A memory mapping to map a page aligned region of a memory object into
|
|
725 |
an address space. The may be used with any memory object: DFineMemory or DCoarseMemory.
|
|
726 |
*/
|
|
727 |
class DFineMapping : public DMemoryMapping
|
|
728 |
{
|
|
729 |
public:
|
|
730 |
DFineMapping();
|
|
731 |
~DFineMapping();
|
|
732 |
|
|
733 |
private:
|
|
734 |
// from DMemoryMappingBase...
|
|
735 |
virtual TInt MapPages(RPageArray::TIter aPages, TUint aMapInstanceCount);
|
|
736 |
virtual void UnmapPages(RPageArray::TIter aPages, TUint aMapInstanceCount);
|
|
737 |
virtual void RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB);
|
|
738 |
virtual void RestrictPagesNA(RPageArray::TIter aPages, TUint aMapInstanceCount);
|
|
739 |
virtual TInt PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount);
|
|
740 |
virtual TBool MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex);
|
|
741 |
virtual TInt DoMap();
|
|
742 |
virtual void DoUnmap();
|
|
743 |
|
|
744 |
// from DMemoryMapping...
|
|
745 |
|
|
746 |
/**
|
|
747 |
Allocatate virtual addresses for this mapping to use.
|
|
748 |
|
|
749 |
In addition to performing the action of DMemoryMapping::AllocateVirtualMemory
|
|
750 |
this will also allocate all permanent page tables for the mapping if it has attribute
|
|
751 |
#EPermanentPageTables.
|
|
752 |
*/
|
|
753 |
virtual TInt AllocateVirtualMemory(TMappingCreateFlags aFlags, TInt aOsAsid, TLinAddr aAddr, TUint aSize, TLinAddr aColourOffset);
|
|
754 |
|
|
755 |
/**
|
|
756 |
Free the virtual addresses and permanent page tables allocated to this mapping with
|
|
757 |
AllocateVirtualMemory.
|
|
758 |
*/
|
|
759 |
virtual void FreeVirtualMemory();
|
|
760 |
|
|
761 |
virtual TPte* FindPageTable(TLinAddr aLinAddr, TUint aMemoryIndex);
|
|
762 |
|
|
763 |
// new...
|
|
764 |
|
|
765 |
/**
|
|
766 |
Allocate all the page tables required for this mapping. This is called by
|
|
767 |
AllocateVirtualMemory if the #EPermanentPageTables attribute is set.
|
|
768 |
|
|
769 |
Each page table for the virtual address region used by the mapping is
|
|
770 |
allocated if not already present. The permanence count of any page table
|
|
771 |
(SPageTableInfo::iPermanenceCount) is then incremented so that it is not
|
|
772 |
freed even when it no longer maps any pages.
|
|
773 |
|
|
774 |
If successful, the #EPageTablesAllocated flag in #Flags will be set.
|
|
775 |
|
|
776 |
@return KErrNone if successful, otherwise one of the system wide error codes.
|
|
777 |
*/
|
|
778 |
TInt AllocatePermanentPageTables();
|
|
779 |
|
|
780 |
/**
|
|
781 |
Free all permanent page tables allocated to this mapping.
|
|
782 |
|
|
783 |
This reverses the action of #AllocatePermanentPageTables by decrementing
|
|
784 |
the permanence count for each page table and freeing it if is no longer in use.
|
|
785 |
*/
|
|
786 |
void FreePermanentPageTables();
|
|
787 |
|
|
788 |
/**
|
|
789 |
Free a range of permanent page tables.
|
|
790 |
|
|
791 |
This is an implementation factor for FreePermanentPageTables and
|
|
792 |
AllocatePermanentPageTables. It decrements the permanence count
|
|
793 |
for each page table and frees it if is no longer in use
|
|
794 |
|
|
795 |
@param aFirstPde The address of the page directory entry which refers to
|
|
796 |
the first page table to be freed.
|
|
797 |
@param aLastPde The address of the page directory entry which refers to
|
|
798 |
the last page table to be freed.
|
|
799 |
*/
|
|
800 |
void FreePermanentPageTables(TPde* aFirstPde, TPde* aLastPde);
|
|
801 |
|
|
802 |
#ifdef _DEBUG
|
|
803 |
/**
|
|
804 |
Validate the contents of the page table are valid.
|
|
805 |
|
|
806 |
@param aPt The page table to validate.
|
|
807 |
*/
|
|
808 |
void ValidatePageTable(TPte* aPt, TLinAddr aAddr);
|
|
809 |
#endif
|
|
810 |
|
|
811 |
/**
|
|
812 |
Get the page table being used to map a specified virtual address if it exists.
|
|
813 |
|
|
814 |
@param aAddr A virtual address in the region allocated to this mapping.
|
|
815 |
|
|
816 |
@return The virtual address of the page table mapping \a aAddr,
|
|
817 |
or the null pointer if one wasn't found.
|
|
818 |
*/
|
|
819 |
TPte* GetPageTable(TLinAddr aAddr);
|
|
820 |
|
|
821 |
/**
|
|
822 |
Get the page table being used to map a specified virtual address; allocating
|
|
823 |
a new one if it didn't previously exist.
|
|
824 |
|
|
825 |
@param aAddr A virtual address in the region allocated to this mapping.
|
|
826 |
|
|
827 |
@return The virtual address of the page table mapping \a aAddr,
|
|
828 |
or the null pointer if one wasn't found and couldn't be allocated.
|
|
829 |
*/
|
|
830 |
TPte* GetOrAllocatePageTable(TLinAddr aAddr);
|
|
831 |
|
|
832 |
/**
|
|
833 |
Get and pin the page table being used to map a specified virtual address;
|
|
834 |
allocating a new one if it didn't previously exist.
|
|
835 |
|
|
836 |
@param aAddr A virtual address in the region allocated to this mapping.
|
|
837 |
@param aPinArgs The resources required to pin the page table.
|
|
838 |
On success, the page table will have been appended to
|
|
839 |
\a aPinArgs.iPinnedPageTables.
|
|
840 |
|
|
841 |
@return The virtual address of the page table mapping \a aAddr,
|
|
842 |
or the null pointer if one wasn't found and couldn't be allocated.
|
|
843 |
*/
|
|
844 |
TPte* GetOrAllocatePageTable(TLinAddr aAddr, TPinArgs& aPinArgs);
|
|
845 |
|
|
846 |
/**
|
|
847 |
Allocate a single page table.
|
|
848 |
|
|
849 |
@param aAddr The virtual address the page table will be used to map.
|
|
850 |
@param aPdeAddress Address of the page directory entry which is to map
|
|
851 |
the newly allocated page table.
|
|
852 |
@param aPermanent True, if the page table's permanence count is to be incremented.
|
|
853 |
|
|
854 |
@return The virtual address of the page table if it was successfully allocated,
|
|
855 |
otherwise the null pointer.
|
|
856 |
*/
|
|
857 |
TPte* AllocatePageTable(TLinAddr aAddr, TPde* aPdeAddress, TBool aPermanent=false);
|
|
858 |
|
|
859 |
/**
|
|
860 |
Free a single page table if it is unused.
|
|
861 |
|
|
862 |
@param aPdeAddress Address of the page directory entry (PDE) which maps the page table.
|
|
863 |
If the page table is freed, this PDE will be set to an 'unallocated' value.
|
|
864 |
*/
|
|
865 |
void FreePageTable(TPde* aPdeAddress);
|
|
866 |
};
|
|
867 |
|
|
868 |
|
|
869 |
|
|
870 |
/**
|
|
871 |
A mapping which provides access to the physical address used by a memory object
|
|
872 |
without mapping these at any virtual address accessible to software.
|
|
873 |
|
|
874 |
These mappings are always of the 'pinned' type to prevent the obtained physical addresses
|
|
875 |
from becoming invalid.
|
|
876 |
*/
|
|
877 |
class DPhysicalPinMapping : public DMemoryMappingBase
|
|
878 |
{
|
|
879 |
public:
|
|
880 |
DPhysicalPinMapping();
|
|
881 |
|
|
882 |
/**
|
|
883 |
Attach this mapping to a memory object so that it pins a specified region of its memory.
|
|
884 |
|
|
885 |
Most of the action of this method is performed by #Attach.
|
|
886 |
|
|
887 |
@param aMemory The memory object.
|
|
888 |
@param aIndex The page index of the first page of memory to be pinned by the mapping.
|
|
889 |
@param aCount The number of pages of memory to be pinned by the mapping.
|
|
890 |
@param aPermissions The memory access permissions appropriate to the intended use
|
|
891 |
of the physical addresses. E.g. if the memory contents will be
|
|
892 |
changes, use EReadWrite. These permissions are used for error
|
|
893 |
checking, e.g. detecting attempted writes to read-only memory.
|
|
894 |
They are also used for optimising access to demand paged memory;
|
|
895 |
which is more efficient if only read-only access is required.
|
|
896 |
|
|
897 |
@return KErrNone if successful,
|
|
898 |
KErrNotFound if any part of the memory to be pinned was not present,
|
|
899 |
KErrNoMemory if there was insufficient memory,
|
|
900 |
otherwise one of the system wide error codes.
|
|
901 |
*/
|
|
902 |
TInt Pin(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions);
|
|
903 |
|
|
904 |
/**
|
|
905 |
Remove this mapping from the memory object it was previously added to by #Pin.
|
|
906 |
|
|
907 |
Most of the action of this method is performed by #Detach.
|
|
908 |
*/
|
|
909 |
virtual void Unpin();
|
|
910 |
|
|
911 |
/**
|
|
912 |
Get the physical address(es) for a region of pages in this mapping.
|
|
913 |
|
|
914 |
@param aIndex Page index, within the mapping, for start of the region.
|
|
915 |
@param aCount Number of pages in the region.
|
|
916 |
@param aPhysicalAddress On success, this value is set to one of two values.
|
|
917 |
If the specified region is physically contiguous,
|
|
918 |
the value is the physical address of the first page
|
|
919 |
in the region. If the region is discontiguous, the
|
|
920 |
value is set to KPhysAddrInvalid.
|
|
921 |
@param aPhysicalPageList If not zero, this points to an array of TPhysAddr
|
|
922 |
objects. On success, this array will be filled
|
|
923 |
with the addresses of the physical pages which
|
|
924 |
contain the specified region. If aPageList is
|
|
925 |
zero, then the function will fail with
|
|
926 |
KErrNotFound if the specified region is not
|
|
927 |
physically contiguous.
|
|
928 |
|
|
929 |
@return 0 if successful and the whole region is physically contiguous.
|
|
930 |
1 if successful but the region isn't physically contiguous.
|
|
931 |
KErrNotFound, if any page in the region is not present,
|
|
932 |
otherwise one of the system wide error codes.
|
|
933 |
|
|
934 |
@pre This mapping must have been attached to a memory object with #Pin.
|
|
935 |
*/
|
|
936 |
TInt PhysAddr(TUint aIndex, TUint aCount, TPhysAddr& aPhysicalAddress, TPhysAddr* aPhysicalPageList);
|
|
937 |
private:
|
|
938 |
// from DMemoryMappingBase...
|
|
939 |
virtual TInt MapPages(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Not implemented. Faults in debug builds.
|
|
940 |
virtual void UnmapPages(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Does nothing
|
|
941 |
virtual void RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB); ///< Not implemented. Faults in debug builds.
|
|
942 |
virtual void RestrictPagesNA(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Does nothing
|
|
943 |
virtual TInt PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount); ///< Does nothing
|
|
944 |
virtual TBool MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex);///< Not implemented. Faults in debug builds.
|
|
945 |
virtual TInt DoMap(); ///< Does nothing
|
|
946 |
virtual void DoUnmap(); ///< Does nothing
|
|
947 |
};
|
|
948 |
|
|
949 |
|
|
950 |
|
|
951 |
/**
|
|
952 |
A mapping which pins memory in order to prevent demand paging related
|
|
953 |
page faults from occurring.
|
|
954 |
*/
|
|
955 |
class DVirtualPinMapping : public DPhysicalPinMapping
|
|
956 |
{
|
|
957 |
public:
|
|
958 |
DVirtualPinMapping();
|
|
959 |
~DVirtualPinMapping();
|
|
960 |
|
|
961 |
/**
|
|
962 |
Create a new DVirtualPinMapping object suitable for pinning a specified number of pages.
|
|
963 |
|
|
964 |
If no maximum is specified (\a aMaxCount==0) then this object may be used to pin
|
|
965 |
any number of pages, however this will require dynamic allocation of storage for
|
|
966 |
page table references.
|
|
967 |
|
|
968 |
@param aMaxCount The maximum number of pages which can be pinned, or zero for no maximum.
|
|
969 |
|
|
970 |
@return The newly created DVirtualPinMapping or the null pointer if there was
|
|
971 |
insufficient memory.
|
|
972 |
*/
|
|
973 |
static DVirtualPinMapping* New(TUint aMaxCount);
|
|
974 |
|
|
975 |
/**
|
|
976 |
Attach this mapping to a memory object so that it pins a specified region of its memory.
|
|
977 |
|
|
978 |
Additionally, pin the page tables in a specified mapping (\a aMapping) which
|
|
979 |
are being used to map these pages.
|
|
980 |
|
|
981 |
The result of this function is that access to the pinned memory through the virtual
|
|
982 |
addresses used by \a aMapping will not generate any demand paging related page faults.
|
|
983 |
|
|
984 |
@param aMemory The memory object.
|
|
985 |
@param aIndex The page index of the first page of memory to be pinned by the mapping.
|
|
986 |
@param aCount The number of pages of memory to be pinned by the mapping.
|
|
987 |
@param aPermissions The memory access permissions appropriate to the intended use
|
|
988 |
of the physical addresses. E.g. if the memory contents will be
|
|
989 |
changes, use EReadWrite. These permissions are used for error
|
|
990 |
checking, e.g. detecting attempted writes to read-only memory.
|
|
991 |
They are also used for optimising access to demand paged memory;
|
|
992 |
which is more efficient if only read-only access is required.
|
|
993 |
@param aMapping The mapping whose page tables are to be pinned. This must be
|
|
994 |
currently mapping the specified region of memory pages.
|
|
995 |
@param aMapInstanceCount The instance count of the mapping who's page tables are to be pinned.
|
|
996 |
|
|
997 |
@return KErrNone if successful,
|
|
998 |
KErrNotFound if any part of the memory to be pinned was not present,
|
|
999 |
KErrNoMemory if there was insufficient memory,
|
|
1000 |
otherwise one of the system wide error codes.
|
|
1001 |
*/
|
|
1002 |
TInt Pin( DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions,
|
|
1003 |
DMemoryMappingBase* aMapping, TUint aMapInstanceCount);
|
|
1004 |
|
|
1005 |
/**
|
|
1006 |
Remove this mapping from the memory object it was previously added to by #Pin.
|
|
1007 |
This will unpin any memory pages and pages tables that were pinned.
|
|
1008 |
*/
|
|
1009 |
void Unpin();
|
|
1010 |
|
|
1011 |
/**
|
|
1012 |
Return the maximum number of page tables which could be required to map
|
|
1013 |
\a aPageCount pages. This is used by various resource reserving calculations.
|
|
1014 |
*/
|
|
1015 |
static TUint MaxPageTables(TUint aPageCount);
|
|
1016 |
|
|
1017 |
/**
|
|
1018 |
In debug builds, dump information about this mapping to the kernel trace port.
|
|
1019 |
*/
|
|
1020 |
virtual void Dump();
|
|
1021 |
|
|
1022 |
private:
|
|
1023 |
// from DMemoryMappingBase...
|
|
1024 |
virtual void RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB); ///< Does nothing.
|
|
1025 |
virtual TInt PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount);
|
|
1026 |
virtual TBool MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex);///< Not implemented. Faults in debug builds.
|
|
1027 |
virtual TInt DoPin(TPinArgs& aPinArgs);
|
|
1028 |
virtual void DoUnpin(TPinArgs& aPinArgs);
|
|
1029 |
|
|
1030 |
private:
|
|
1031 |
/**
|
|
1032 |
Allocate memory to store pointers to all the page table which map
|
|
1033 |
\a aCount pages of memory. The pointer to the allocated memory
|
|
1034 |
is stored at iAllocatedPinnedPageTables.
|
|
1035 |
|
|
1036 |
If iSmallPinnedPageTablesArray is large enough, this function doesn't
|
|
1037 |
allocate any memory.
|
|
1038 |
|
|
1039 |
@return KErrNone if successful, otherwise KErrNoMemory.
|
|
1040 |
*/
|
|
1041 |
TInt AllocPageTableArray(TUint aCount);
|
|
1042 |
|
|
1043 |
/**
|
|
1044 |
Delete iAllocatedPinnedPageTables.
|
|
1045 |
*/
|
|
1046 |
void FreePageTableArray();
|
|
1047 |
|
|
1048 |
/**
|
|
1049 |
Return the address of the array storing pinned page tables.
|
|
1050 |
This is either iSmallPinnedPageTablesArray or iAllocatedPinnedPageTables.
|
|
1051 |
*/
|
|
1052 |
TPte** PageTableArray();
|
|
1053 |
|
|
1054 |
/**
|
|
1055 |
Unpin all the page tables which have been pinned by this mapping.
|
|
1056 |
|
|
1057 |
@param aPinArgs The resources used for pinning. The replacement pages allocated
|
|
1058 |
to this will be increased for each page which was became completely
|
|
1059 |
unpinned.
|
|
1060 |
*/
|
|
1061 |
void UnpinPageTables(TPinArgs& aPinArgs);
|
|
1062 |
private:
|
|
1063 |
/**
|
|
1064 |
Temporary store for the mapping passed to #Pin
|
|
1065 |
*/
|
|
1066 |
DMemoryMappingBase* iPinVirtualMapping;
|
|
1067 |
|
|
1068 |
/**
|
|
1069 |
Temporary store for the mapping instance count passed to #Pin
|
|
1070 |
*/
|
|
1071 |
TUint iPinVirtualMapInstanceCount;
|
|
1072 |
|
|
1073 |
/**
|
|
1074 |
The number of page tables which are currently being pinned by this mapping.
|
|
1075 |
This is the number of valid entries stored at PageTableArray.
|
|
1076 |
*/
|
|
1077 |
TUint iNumPinnedPageTables;
|
|
1078 |
|
|
1079 |
/**
|
|
1080 |
The maximum number of pages which can be pinned by this mapping.
|
|
1081 |
If this is zero, there is no maximum.
|
|
1082 |
*/
|
|
1083 |
TUint iMaxCount;
|
|
1084 |
|
|
1085 |
/**
|
|
1086 |
The memory allocated by this object for storing pointer to the page tables
|
|
1087 |
it has pinned.
|
|
1088 |
*/
|
|
1089 |
TPte** iAllocatedPinnedPageTables;
|
|
1090 |
|
|
1091 |
enum
|
|
1092 |
{
|
|
1093 |
KSmallPinnedPageTableCount = 2 ///< Number of entries in iSmallPinnedPageTablesArray
|
|
1094 |
};
|
|
1095 |
|
|
1096 |
/**
|
|
1097 |
A small array to use for storing pinned page tables.
|
|
1098 |
This is an optimisation used for the typical case of pinning a small number of pages
|
|
1099 |
to avoid dynamic allocation of memory.
|
|
1100 |
*/
|
|
1101 |
TPte* iSmallPinnedPageTablesArray[KSmallPinnedPageTableCount];
|
|
1102 |
};
|
|
1103 |
|
|
1104 |
#endif
|