author | Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com> |
Thu, 19 Aug 2010 11:14:22 +0300 | |
branch | RCL_3 |
changeset 42 | a179b74831c9 |
parent 28 | 5b5d147c7838 |
permissions | -rw-r--r-- |
0 | 1 |
// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies). |
2 |
// All rights reserved. |
|
3 |
// This component and the accompanying materials are made available |
|
4 |
// under the terms of the License "Eclipse Public License v1.0" |
|
5 |
// which accompanies this distribution, and is available |
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 |
// |
|
8 |
// Initial Contributors: |
|
9 |
// Nokia Corporation - initial contribution. |
|
10 |
// |
|
11 |
// Contributors: |
|
12 |
// |
|
13 |
// Description: |
|
14 |
// |
|
15 |
||
16 |
/** |
|
17 |
@file |
|
18 |
@internalComponent |
|
19 |
*/ |
|
20 |
||
21 |
#ifndef MMAPPING_H |
|
22 |
#define MMAPPING_H |
|
23 |
||
24 |
#include "mrefcntobj.h" |
|
25 |
#include "mmappinglist.h" |
|
26 |
#include "mpagearray.h" |
|
27 |
||
28 |
||
29 |
||
30 |
/** |
|
31 |
Base class for memory mappings. |
|
32 |
||
33 |
This provides the methods for linking a mapping to a memory object |
|
34 |
as well as the interface for updating the MMU page tables associated |
|
35 |
with a mapping when the memory state changes. |
|
36 |
*/ |
|
37 |
class DMemoryMappingBase : public DReferenceCountedObject |
|
38 |
{ |
|
39 |
private: |
|
40 |
/** |
|
41 |
Memory object to which this mapping is currently attached. |
|
42 |
Updates to the are protected by the MmuLock. |
|
43 |
*/ |
|
44 |
DMemoryObject* iMemory; |
|
45 |
||
46 |
public: |
|
47 |
/** |
|
48 |
Link used to maintain list of mappings attached to a memory object. |
|
49 |
*/ |
|
50 |
TMappingListLink iLink; |
|
51 |
||
52 |
/** |
|
53 |
Offset, in page units, within the memory object's memory for start of this mapping. |
|
54 |
*/ |
|
55 |
TUint iStartIndex; |
|
56 |
||
57 |
/** |
|
58 |
Size of this mapping, in page units. |
|
59 |
*/ |
|
60 |
TUint iSizeInPages; |
|
61 |
||
62 |
private: |
|
63 |
/** |
|
64 |
Instance count which is incremented every time a mapping is attached to a memory object. |
|
65 |
When code is manipulating mappings, the instance count is used to detect that a |
|
66 |
mapping has been reused and that the operation it is performing is no long needed. |
|
67 |
*/ |
|
68 |
TUint iMapInstanceCount; |
|
69 |
||
70 |
public: |
|
71 |
||
72 |
/** |
|
73 |
Bit flags stored in #Flags giving various state and attributes of the mapping. |
|
74 |
*/ |
|
75 |
enum TFlags |
|
76 |
{ |
|
77 |
/** |
|
78 |
Flag set during object construction to indicate that this mapping is of |
|
79 |
class #DCoarseMapping. |
|
80 |
*/ |
|
81 |
ECoarseMapping = 1<<0, |
|
82 |
||
83 |
/** |
|
84 |
Flag set during object construction to indicate that this mapping will pin |
|
85 |
any memory pages it maps. This may not be used with coarse memory mappings. |
|
86 |
*/ |
|
87 |
EPinned = 1<<1, |
|
88 |
||
89 |
/** |
|
90 |
Pages have already been reserved for pinning, so when this mapping is attached |
|
91 |
to a memory object no additional pages need to be reserved. Pre-reserving pages |
|
92 |
is used to prevent the possibility of failing to pin due to an out of memory |
|
93 |
condition. It is essential that the users of these mappings ensure that there |
|
94 |
are enough reserved pages in the paging pool to meet the maximum mapping size |
|
95 |
used. |
|
96 |
*/ |
|
97 |
EPinningPagesReserved = 1<<2, |
|
98 |
||
99 |
/** |
|
100 |
Pages have been successfully pinned by this mapping. This is set after demand |
|
101 |
paged memory has been succeeded pinned and is used to indicate that the pages |
|
102 |
need unpinning again when the mapping is later unmapped. |
|
103 |
*/ |
|
104 |
EPagesPinned = 1<<3, |
|
105 |
||
106 |
/** |
|
107 |
Flag set during object construction to indicate that MMU page tables are to |
|
108 |
be permanently allocated for use by this mapping. Normally, page tables are |
|
109 |
allocated as needed to map memory which can result in out-of-memory errors |
|
110 |
when mapping memory pages. |
|
111 |
*/ |
|
112 |
EPermanentPageTables = 1<<4, |
|
113 |
||
114 |
/** |
|
115 |
Permanent page tables have been successfully been allocated for this mapping. |
|
116 |
This flag is used to track allocation so they can be released when the mapping |
|
117 |
is destroyed. |
|
118 |
*/ |
|
119 |
EPageTablesAllocated = 1<<5, |
|
120 |
||
121 |
/** |
|
122 |
For pinned mappings (EPinned) this flag is set whenever the mapping prevents |
|
123 |
any pages of memory from being fully decommitted from a memory object. When a |
|
124 |
mapping is finally unmapped from the memory object this flag is checked, and, |
|
125 |
if set, further cleanup of the decommitted pages triggered. |
|
126 |
*/ |
|
127 |
EPageUnmapVetoed = 1<<6, |
|
128 |
||
129 |
/** |
|
130 |
Mapping is being, or has been, detached from a memory object. |
|
131 |
When set, operations on the mapping should act as though the mapping is no |
|
132 |
longer attached to a memory object. Specifically, no further pages of memory |
|
133 |
should be mapped into this mapping. |
|
134 |
||
135 |
This flag is only set when the MmuLock is held. |
|
136 |
*/ |
|
137 |
EDetaching = 1<<7, |
|
138 |
||
139 |
/** |
|
140 |
This mapping is a physical pinning mapping. The pages it pins |
|
141 |
cannot be paged out or moved. |
|
142 |
||
143 |
This flag is set when DPhysicalPinMapping objects are created. |
|
144 |
*/ |
|
145 |
EPhysicalPinningMapping = 1<<8, |
|
146 |
||
147 |
/** |
|
148 |
Flag set during object construction to indicate that this mapping is of |
|
149 |
class #DLargeMapping. |
|
150 |
||
151 |
Note that #DLargeMapping is derived from #DCoarseMapping, therefore presence of this flag |
|
152 |
implies presence of #ECoarseMapping as well. |
|
153 |
*/ |
|
154 |
ELargeMapping = 1<<9, |
|
155 |
}; |
|
156 |
||
157 |
/** |
|
158 |
Bitmask of values from enum #TPteType which will be used to calculate |
|
159 |
the correct attributes for any page table entries this mapping uses. |
|
160 |
*/ |
|
161 |
FORCE_INLINE TUint8& PteType() |
|
162 |
{ return iLink.iSpare1; } |
|
163 |
||
164 |
/** |
|
165 |
Bitmask of values from enum #TFlags. |
|
166 |
The flags 16 bits and are stored in iLink.iSpare2 and iLink.iSpare3. |
|
167 |
*/ |
|
168 |
FORCE_INLINE TUint16& Flags() |
|
169 |
{ return (TUint16&)iLink.iSpare2; } |
|
170 |
||
171 |
public: |
|
172 |
/** |
|
173 |
Return the memory object to which this mapping is currently attached. |
|
174 |
||
175 |
@pre MmuLock is held. (If aNoCheck==false) |
|
176 |
*/ |
|
177 |
FORCE_INLINE DMemoryObject* Memory(TBool aNoCheck=false) |
|
178 |
{ |
|
179 |
if(!aNoCheck) |
|
180 |
__NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
181 |
return iMemory; |
|
182 |
} |
|
183 |
||
184 |
/** |
|
185 |
Return true if the mapping is currently attached to a memory object. |
|
186 |
*/ |
|
187 |
FORCE_INLINE TBool IsAttached() |
|
188 |
{ return iLink.IsLinked(); } |
|
189 |
||
190 |
/** |
|
191 |
Return true if the mapping is being, or has been, detached from a memory object. |
|
192 |
The mapping may or may not still be attached to a memory object, i.e. #IsAttached |
|
193 |
is indeterminate. |
|
194 |
*/ |
|
195 |
FORCE_INLINE TBool BeingDetached() |
|
196 |
{ return Flags()&EDetaching; } |
|
197 |
||
198 |
/** |
|
199 |
Return the mapping instance count. |
|
200 |
@see #iMapInstanceCount. |
|
201 |
*/ |
|
202 |
FORCE_INLINE TUint MapInstanceCount() |
|
203 |
{ return iMapInstanceCount; } |
|
204 |
||
205 |
/** |
|
206 |
Return true if this mapping provides read only access to memory. |
|
207 |
*/ |
|
208 |
FORCE_INLINE TBool IsReadOnly() |
|
209 |
{ return !(PteType()&EPteTypeWritable); } |
|
210 |
||
211 |
#ifdef MMU_SUPPORTS_EXECUTE_NEVER |
|
212 |
/** |
|
213 |
Return true if this mapping provides access to memory which allows |
|
214 |
code to be executed from it. |
|
215 |
*/ |
|
216 |
FORCE_INLINE TBool IsExecutable() |
|
217 |
{ return (PteType()&EPteTypeExecutable); } |
|
218 |
#endif |
|
219 |
||
220 |
/** |
|
221 |
Return true if this is a coarse mapping, in other words it is an instance of #DCoarseMapping or |
|
222 |
#DLargeMapping. |
|
223 |
*/ |
|
224 |
FORCE_INLINE TBool IsCoarse() |
|
225 |
{ return Flags()&ECoarseMapping; } |
|
226 |
||
227 |
/** |
|
228 |
Return true if this mapping is a large mapping, in other words an instance of #DLargeMapping. |
|
229 |
||
230 |
Note that all large mappings are also coarse mappings. |
|
231 |
*/ |
|
232 |
FORCE_INLINE TBool IsLarge() |
|
233 |
{ return Flags()&ELargeMapping; } |
|
234 |
||
235 |
/** |
|
236 |
Return true if this mapping pins the memory it maps. |
|
237 |
*/ |
|
238 |
FORCE_INLINE TBool IsPinned() |
|
239 |
{ return Flags()&EPinned; } |
|
28
5b5d147c7838
Revision: 201021
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
14
diff
changeset
|
240 |
|
0 | 241 |
/** |
242 |
Return true if this mapping physically pins the memory it maps. |
|
243 |
*/ |
|
244 |
FORCE_INLINE TBool IsPhysicalPinning() |
|
245 |
{ return Flags()&EPhysicalPinningMapping; } |
|
246 |
||
247 |
/** |
|
28
5b5d147c7838
Revision: 201021
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
14
diff
changeset
|
248 |
Return true if this mapping has beed successfully attached to a memory object, pinning its pages. |
5b5d147c7838
Revision: 201021
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
14
diff
changeset
|
249 |
*/ |
5b5d147c7838
Revision: 201021
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
14
diff
changeset
|
250 |
FORCE_INLINE TBool PagesPinned() |
5b5d147c7838
Revision: 201021
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
14
diff
changeset
|
251 |
{ return Flags()&EPagesPinned; } |
5b5d147c7838
Revision: 201021
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
14
diff
changeset
|
252 |
|
5b5d147c7838
Revision: 201021
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
14
diff
changeset
|
253 |
/** |
0 | 254 |
Return the access permissions which this mapping uses to maps memory. |
255 |
*/ |
|
256 |
FORCE_INLINE TMappingPermissions Permissions() |
|
257 |
{ return Mmu::PermissionsFromPteType(PteType()); } |
|
258 |
||
259 |
/** |
|
260 |
Link this mapping to a memory object. |
|
261 |
||
262 |
This is called by the memory object during processing of #Attach. |
|
263 |
||
264 |
@param aMemory The memory object the mapping is being attached to. |
|
265 |
@param aMappingList The list to add this mapping to. |
|
266 |
||
267 |
@pre MmuLock is held. |
|
268 |
@pre Mapping list lock is held. |
|
269 |
*/ |
|
270 |
void LinkToMemory(DMemoryObject* aMemory, TMappingList& aMappingList); |
|
271 |
||
272 |
/** |
|
273 |
Unlink this mapping from the memory object it was previously linked to with |
|
274 |
#LinkToMemory. |
|
275 |
||
276 |
This is called by the memory object during processing of #Detach. |
|
277 |
||
278 |
@param aMappingList The list that the mapping appears on. |
|
279 |
*/ |
|
280 |
void UnlinkFromMemory(TMappingList& aMappingList); |
|
281 |
||
14
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
282 |
/** |
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
283 |
Get the physical address(es) for a region of pages in this mapping. |
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
284 |
|
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
285 |
@param aIndex Page index, within the mapping, for start of the region. |
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
286 |
@param aCount Number of pages in the region. |
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
287 |
@param aPhysicalAddress On success, this value is set to one of two values. |
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
288 |
If the specified region is physically contiguous, |
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
289 |
the value is the physical address of the first page |
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
290 |
in the region. If the region is discontiguous, the |
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
291 |
value is set to KPhysAddrInvalid. |
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
292 |
@param aPhysicalPageList If not zero, this points to an array of TPhysAddr |
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
293 |
objects. On success, this array will be filled |
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
294 |
with the addresses of the physical pages which |
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
295 |
contain the specified region. If aPageList is |
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
296 |
zero, then the function will fail with |
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
297 |
KErrNotFound if the specified region is not |
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
298 |
physically contiguous. |
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
299 |
|
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
300 |
@return 0 if successful and the whole region is physically contiguous. |
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
301 |
1 if successful but the region isn't physically contiguous. |
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
302 |
KErrNotFound, if any page in the region is not present, |
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
303 |
otherwise one of the system wide error codes. |
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
304 |
|
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
305 |
@pre This mapping must have been attached to a memory object with #Pin. |
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
306 |
*/ |
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
307 |
TInt PhysAddr(TUint aIndex, TUint aCount, TPhysAddr& aPhysicalAddress, TPhysAddr* aPhysicalPageList); |
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
308 |
|
0 | 309 |
protected: |
310 |
/** |
|
311 |
@param aType Initial value for #Flags. |
|
312 |
*/ |
|
313 |
DMemoryMappingBase(TUint aType); |
|
314 |
||
315 |
/** |
|
316 |
Attach this mapping to a memory object so that it maps a specified region of its memory. |
|
317 |
||
318 |
@param aMemory The memory object. |
|
319 |
@param aIndex The page index of the first page of memory to be mapped by the mapping. |
|
320 |
@param aCount The number of pages of memory to be mapped by the mapping. |
|
321 |
||
322 |
@return KErrNone if successful, otherwise one of the system wide error codes. |
|
323 |
*/ |
|
324 |
TInt Attach(DMemoryObject* aMemory, TUint aIndex, TUint aCount); |
|
325 |
||
326 |
/** |
|
327 |
Remove this mapping from the memory object it was previously attached to by #Attach. |
|
328 |
*/ |
|
329 |
void Detach(); |
|
330 |
||
331 |
public: |
|
332 |
/** |
|
333 |
Update the page table entries corresponding to this mapping to add entries for |
|
334 |
a specified set of memory pages. |
|
335 |
||
336 |
This method is called by DMemoryObject::MapPages to update each mapping attached |
|
337 |
to a memory object whenever new pages of memory are added. However, it won't be |
|
338 |
called for any mapping with the #EPinned attribute as such mappings are unchanging. |
|
339 |
||
340 |
@param aPages An RPageArray::TIter which refers to a range of pages |
|
341 |
in a memory object. This has been clipped to fit within |
|
342 |
the range of pages mapped by this mapping. |
|
343 |
Only array entries which have state RPageArray::ECommitted |
|
344 |
should be mapped into the mapping's page tables. |
|
345 |
||
346 |
@param aMapInstanceCount The instance of this mapping which is to be updated. |
|
347 |
Whenever this no longer matches the current #MapInstanceCount |
|
348 |
the function must not update any more of the mapping's |
|
349 |
page table entries, (but must still return KErrNone). |
|
350 |
||
351 |
@return KErrNone if successful, otherwise one of the system wide error codes. |
|
352 |
*/ |
|
353 |
virtual TInt MapPages(RPageArray::TIter aPages, TUint aMapInstanceCount) =0; |
|
354 |
||
355 |
/** |
|
356 |
Update the page table entries corresponding to this mapping to remove entries for |
|
357 |
a specified set of memory pages. |
|
358 |
||
359 |
This method is called by DMemoryObject::UnmapPages to update each mapping attached |
|
360 |
to a memory object whenever pages of memory are removed. |
|
361 |
||
362 |
@param aPages An RPageArray::TIter which refers to a range of pages |
|
363 |
in a memory object. This has been clipped to fit within |
|
364 |
the range of pages mapped by this mapping. |
|
365 |
Only array entries which return true for |
|
366 |
RPageArray::TargetStateIsDecommitted should be unmapped |
|
367 |
from the mapping's page tables. |
|
368 |
||
369 |
@param aMapInstanceCount The instance of this mapping which is to be updated. |
|
370 |
Whenever this no longer matches the current #MapInstanceCount |
|
371 |
the function must not update any more of the mapping's |
|
372 |
page table entries. |
|
373 |
*/ |
|
374 |
virtual void UnmapPages(RPageArray::TIter aPages, TUint aMapInstanceCount) =0; |
|
375 |
||
376 |
/** |
|
377 |
Update the page table entry corresponding to this mapping to update an entry for a specified |
|
378 |
page that has just been moved or shadowed. |
|
379 |
||
380 |
@param aPages The page array entry of the page in a memory object. |
|
381 |
Only array entries which have a target state of |
|
382 |
RPageArray::ECommitted should be mapped into the |
|
383 |
mapping's page tables. |
|
384 |
||
385 |
@param aIndex The index of the page in the memory object. |
|
386 |
||
387 |
@param aMapInstanceCount The instance of this mapping which is to be updated. |
|
388 |
Whenever this no longer matches the current #MapInstanceCount |
|
389 |
the function must not update any more of the mapping's |
|
390 |
page table entries, (but must still return KErrNone). |
|
391 |
||
392 |
@param aInvalidateTLB Set to ETrue when the TLB entries associated with this page |
|
393 |
should be invalidated. This must be done when there is |
|
394 |
already a valid pte for this page, i.e. if the page is still |
|
395 |
mapped. |
|
396 |
*/ |
|
397 |
virtual void RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB)=0; |
|
398 |
||
399 |
/** |
|
400 |
Update the page table entries corresponding to this mapping to apply access restrictions |
|
401 |
to a specified set of memory pages. |
|
402 |
||
403 |
This method is called by DMemoryObject::RestrictPages to update each mapping attached |
|
404 |
to a memory object whenever pages of memory are restricted. |
|
405 |
||
406 |
@param aPages An RPageArray::TIter which refers to a range of pages |
|
407 |
in a memory object. This has been clipped to fit within |
|
408 |
the range of pages mapped by this mapping. |
|
409 |
Only array entries which return true for |
|
410 |
RPageArray::TargetStateIsDecommitted should be unmapped |
|
411 |
from the mapping's page tables. |
|
412 |
||
413 |
@param aMapInstanceCount The instance of this mapping which is to be updated. |
|
414 |
Whenever this no longer matches the current #MapInstanceCount |
|
415 |
the function must not update any more of the mapping's |
|
416 |
page table entries. |
|
417 |
*/ |
|
418 |
virtual void RestrictPagesNA(RPageArray::TIter aPages, TUint aMapInstanceCount) =0; |
|
419 |
||
420 |
/** |
|
421 |
Update the page table entries corresponding to this mapping to add entries for |
|
422 |
a specified set of demand paged memory pages following a 'page in' or memory |
|
423 |
pinning operation. |
|
424 |
||
425 |
@param aPages An RPageArray::TIter which refers to a range of pages |
|
426 |
in a memory object. This will be within the range of pages |
|
427 |
mapped by this mapping. |
|
428 |
Only array entries which have state RPageArray::ECommitted |
|
429 |
should be mapped into the mapping's page tables. |
|
430 |
||
431 |
@param aPinArgs The resources required to pin any page tables the mapping uses. |
|
432 |
Page table must be pinned if \a aPinArgs.iPinnedPageTables is |
|
433 |
not the null pointer, in which case this the virtual address |
|
434 |
of the pinned must be stored in the array this points to. |
|
435 |
\a aPinArgs.iReadOnly is true if write access permissions |
|
436 |
are not needed. |
|
437 |
||
438 |
@return KErrNone if successful, otherwise one of the system wide error codes. |
|
439 |
*/ |
|
440 |
virtual TInt PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount) =0; |
|
441 |
||
442 |
||
443 |
/** |
|
444 |
Update the page table entry corresponding to this mapping to add an entry for |
|
445 |
a specified page which is in the process of being moved. |
|
446 |
||
447 |
@param aPageArrayPtr The page array entry for the page to be mapped which must be |
|
448 |
within this mapping range of pages. |
|
449 |
Only array entries which have a target state of |
|
450 |
RPageArray::ECommitted should be mapped into the mapping's |
|
451 |
page tables. |
|
452 |
||
453 |
@param aIndex The index of the page. |
|
454 |
||
455 |
@return ETrue if successful, EFalse otherwise. |
|
456 |
*/ |
|
457 |
virtual TBool MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex)=0; |
|
458 |
||
459 |
||
460 |
/** |
|
461 |
In debug builds, dump information about this mapping to the kernel trace port. |
|
462 |
*/ |
|
463 |
virtual void Dump(); |
|
464 |
||
465 |
private: |
|
466 |
/** |
|
467 |
Update this mapping's MMU data structures to map all pages of memory |
|
468 |
currently committed to the memory object (#iMemory) in the region covered |
|
469 |
by this mapping. |
|
470 |
||
471 |
This method is called by #Attach after the mapping has been linked |
|
472 |
into the memory object. |
|
473 |
||
474 |
@return KErrNone if successful, otherwise one of the system wide error codes. |
|
475 |
*/ |
|
476 |
virtual TInt DoMap() =0; |
|
477 |
||
478 |
/** |
|
479 |
Update this mapping's MMU data structures to unmap all pages of memory. |
|
480 |
||
481 |
This method is called by #Detach before the mapping has been unlinked |
|
482 |
from the memory object but after the #EDetaching flag has been set. |
|
483 |
*/ |
|
484 |
virtual void DoUnmap() =0; |
|
485 |
||
486 |
protected: |
|
487 |
/** |
|
488 |
For pinned mapping, this virtual method is called by #Attach in order to pin |
|
489 |
pages of memory if required. This is called after the mapping has been linked |
|
490 |
into the memory object but before #DoMap. |
|
491 |
||
492 |
The default implementation of this method simply calls DMemoryManager::Pin. |
|
493 |
||
494 |
@param aPinArgs The resources to use for pinning. This has sufficient replacement |
|
495 |
pages allocated to pin every page the mapping covers, and the |
|
496 |
value of \a aPinArgs.iReadOnly has been set to correspond to the |
|
497 |
mappings access permissions. |
|
498 |
||
499 |
@return KErrNone if successful, otherwise one of the system wide error codes. |
|
500 |
*/ |
|
501 |
virtual TInt DoPin(TPinArgs& aPinArgs); |
|
502 |
||
503 |
/** |
|
504 |
For pinned mapping, this virtual method is called by #Detach in order to unpin |
|
505 |
pages of memory if required. This is called before the mapping has been unlinked |
|
506 |
from the memory object but after #DoUnmap. |
|
507 |
||
508 |
The default implementation of this method simply calls DMemoryManager::Unpin. |
|
509 |
||
510 |
@param aPinArgs The resources used for pinning. The replacement pages allocated |
|
511 |
to this will be increased for each page which was became completely |
|
512 |
unpinned. |
|
513 |
*/ |
|
514 |
virtual void DoUnpin(TPinArgs& aPinArgs); |
|
515 |
}; |
|
516 |
||
517 |
||
518 |
||
519 |
/** |
|
520 |
Base class for memory mappings which map memory contents into a address space. |
|
521 |
||
522 |
This provides methods for allocating virtual memory and holds the attributes needed |
|
523 |
for MMU page table entries. |
|
524 |
*/ |
|
525 |
class DMemoryMapping : public DMemoryMappingBase |
|
526 |
{ |
|
527 |
protected: |
|
528 |
/** |
|
529 |
The page directory entry (PDE) value for use when mapping this mapping's page tables. |
|
530 |
This value has the physical address component being zero, so a page table's physical |
|
531 |
address can be simply ORed in. |
|
532 |
||
533 |
This could potentially be removed (see DMemoryMapping::PdeType()). |
|
534 |
*/ |
|
535 |
TPde iBlankPde; |
|
536 |
||
537 |
/** |
|
538 |
The page table entry (PTE) value for use when mapping pages into this mapping. |
|
539 |
This value has the physical address component being zero, so a page's physical |
|
540 |
address can be simply ORed in. |
|
541 |
*/ |
|
542 |
TPte iBlankPte; |
|
543 |
||
544 |
/** |
|
545 |
Start of the virtual address region allocated for use by this mapping |
|
546 |
ORed with the OS ASID of the address space this lies in. |
|
547 |
||
548 |
Note, the address at which memory is mapped (#iLinAddrAndOsAsid) may be different |
|
549 |
to this allocated address due to page colouring restrictions. |
|
550 |
||
551 |
@see iAllocatedSize |
|
552 |
*/ |
|
553 |
TLinAddr iAllocatedLinAddrAndOsAsid; |
|
554 |
||
555 |
/** |
|
556 |
Size of virtual address region memory allocated for use by this mapping. |
|
557 |
||
558 |
@see iAllocatedLinAddrAndOsAsid |
|
559 |
*/ |
|
560 |
TUint iAllocatedSize; |
|
561 |
||
562 |
private: |
|
563 |
/** |
|
564 |
Start of the virtual address region that this mapping is currently |
|
565 |
mapping memory at, ORed with the OS ASID of the address space this lies in. |
|
566 |
||
567 |
This value is set by #Map which is called from #Attach when the mapping |
|
568 |
is attached to a memory object. The address used may be different to |
|
569 |
#iAllocatedLinAddrAndOsAsid due to page colouring restrictions. |
|
570 |
||
571 |
The size of the region mapped is #iSizeInPages. |
|
572 |
||
573 |
Note, access to this value is through #Base() and #OsAsid(). |
|
574 |
*/ |
|
575 |
TLinAddr iLinAddrAndOsAsid; |
|
576 |
||
577 |
public: |
|
578 |
/** |
|
579 |
Second phase constructor. |
|
580 |
||
581 |
The main function of this is to allocate a virtual address region for the mapping |
|
582 |
and to add it to an address space. |
|
583 |
||
584 |
@param aAttributes The attributes of the memory which this mapping is intended to map. |
|
585 |
This is only needed to setup #PdeType which is required for correct |
|
586 |
virtual address allocation so in practice the only relevant attribute |
|
587 |
is to set EMemoryAttributeUseECC if required, else use |
|
588 |
EMemoryAttributeStandard. |
|
589 |
||
590 |
@param aFlags A combination of the options from enum TMappingCreateFlags. |
|
591 |
||
592 |
@param aOsAsid The OS ASID of the address space the mapping is to be added to. |
|
593 |
||
594 |
@param aAddr The virtual address to use for the mapping, or zero if this is |
|
595 |
to be allocated by this function. |
|
596 |
||
597 |
@param aSize The maximum size of memory, in bytes, this mapping will be used to |
|
598 |
map. This determines the size of the virtual address region the |
|
599 |
mapping will use. |
|
600 |
||
601 |
@param aColourOffset The byte offset within a memory object's memory which this mapping |
|
602 |
is to start. This is used to adjust virtual memory allocation to |
|
603 |
meet page colouring restrictions. If this value is not known leave |
|
604 |
this argument unspecified; however, it must be specified if \a aAddr |
|
605 |
is specified. |
|
606 |
||
607 |
@return KErrNone if successful, otherwise one of the system wide error codes. |
|
608 |
*/ |
|
609 |
TInt Construct(TMemoryAttributes aAttributes, TMappingCreateFlags aFlags, TInt aOsAsid, TLinAddr aAddr, TUint aSize, TLinAddr aColourOffset=~(TLinAddr)0); |
|
610 |
||
611 |
/** |
|
612 |
Add this mapping to a memory object so that it maps a specified region of its memory. |
|
613 |
||
614 |
Most of the action of this method is performed by #Attach. |
|
615 |
||
616 |
@param aMemory The memory object. |
|
617 |
@param aIndex The page index of the first page of memory to be mapped by the mapping. |
|
618 |
@param aCount The number of pages of memory to be mapped by the mapping. |
|
619 |
@param aPermissions The memory access permissions to apply to the mapping. |
|
620 |
||
621 |
@return KErrNone if successful, otherwise one of the system wide error codes. |
|
622 |
*/ |
|
623 |
TInt Map(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions); |
|
624 |
||
625 |
/** |
|
626 |
Remove this mapping from the memory object it was previously added to by #Map. |
|
627 |
||
628 |
Most of the action of this method is performed by #Detach. |
|
629 |
*/ |
|
630 |
void Unmap(); |
|
631 |
||
632 |
/** |
|
633 |
Return the OS ASID for the address space that this mapping is currently mapping memory in. |
|
634 |
*/ |
|
635 |
FORCE_INLINE TInt OsAsid() |
|
636 |
{ |
|
637 |
__NK_ASSERT_DEBUG(iLinAddrAndOsAsid); // check mapping has been added to an address space |
|
638 |
return iLinAddrAndOsAsid&KPageMask; |
|
639 |
} |
|
640 |
||
641 |
/** |
|
642 |
Return starting virtual address that this mapping is currently mapping memory at. |
|
643 |
The size of the region mapped is #iSizeInPages. |
|
644 |
*/ |
|
645 |
FORCE_INLINE TLinAddr Base() |
|
646 |
{ |
|
647 |
__NK_ASSERT_DEBUG(iLinAddrAndOsAsid); // check mapping has been added to an address space |
|
648 |
return iLinAddrAndOsAsid&~KPageMask; |
|
649 |
} |
|
650 |
||
651 |
/** |
|
652 |
Return #Base()|#OsAsid() |
|
653 |
*/ |
|
654 |
FORCE_INLINE TLinAddr LinAddrAndOsAsid() |
|
655 |
{ |
|
656 |
__NK_ASSERT_DEBUG(iLinAddrAndOsAsid); // check mapping has been added to an address space |
|
657 |
return iLinAddrAndOsAsid; |
|
658 |
} |
|
659 |
||
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
660 |
FORCE_INLINE TBool IsUserMapping() |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
661 |
{ |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
662 |
// Note: must be usable before the mapping has been added to an address space |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
663 |
return (PteType() & (EPteTypeUserAccess|EPteTypeGlobal)) == EPteTypeUserAccess; |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
664 |
} |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
665 |
|
0 | 666 |
/** |
667 |
Return #iBlankPde. |
|
668 |
*/ |
|
669 |
FORCE_INLINE TPde BlankPde() |
|
670 |
{ |
|
671 |
return iBlankPde; |
|
672 |
} |
|
673 |
||
674 |
/** |
|
675 |
Emit BTrace traces identifying this mappings virtual address usage. |
|
676 |
*/ |
|
677 |
void BTraceCreate(); |
|
678 |
||
679 |
/** |
|
680 |
In debug builds, dump information about this mapping to the kernel trace port. |
|
681 |
*/ |
|
682 |
virtual void Dump(); |
|
683 |
||
684 |
/** |
|
685 |
Function to return a page table pointer for the specified linear address and |
|
686 |
index to this mapping. |
|
687 |
||
688 |
This is called by #Epoc::MovePhysicalPage when moving page table or page table info pages. |
|
689 |
||
690 |
@param aLinAddr The linear address to find the page table entry for. |
|
691 |
@param aMemoryIndex The memory object index of the page to find the page |
|
692 |
table entry for. |
|
693 |
||
694 |
@return A pointer to the page table entry, if the page table entry couldn't |
|
695 |
be found this will be NULL |
|
696 |
*/ |
|
697 |
virtual TPte* FindPageTable(TLinAddr aLinAddr, TUint aMemoryIndex)=0; |
|
698 |
||
699 |
protected: |
|
700 |
/** |
|
701 |
@param aType Initial value for #Flags. |
|
702 |
*/ |
|
703 |
DMemoryMapping(TUint aType); |
|
704 |
||
705 |
/** |
|
706 |
This destructor removes the mapping from any address space it was added to and |
|
707 |
frees any virtual addresses allocated to it. |
|
708 |
*/ |
|
709 |
~DMemoryMapping(); |
|
710 |
||
711 |
/** |
|
14
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
712 |
Free any resources owned by this mapping, i.e. allow Construct() to be used |
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
713 |
on this mapping at a new address etc. |
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
714 |
*/ |
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
715 |
void Destruct(); |
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
716 |
|
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
717 |
/** |
0 | 718 |
Allocatate virtual addresses for this mapping to use. |
719 |
This is called from #Construct and the arguments to this function are the same. |
|
720 |
||
721 |
On success, iAllocatedLinAddrAndOsAsid and iAllocatedSize will be initialised. |
|
722 |
*/ |
|
723 |
virtual TInt AllocateVirtualMemory(TMappingCreateFlags aFlags, TInt aOsAsid, TLinAddr aAddr, TUint aSize, TLinAddr aColourOffset); |
|
724 |
||
725 |
/** |
|
726 |
Free the virtual addresses allocated to this mapping with AllocateVirtualMemory. |
|
727 |
*/ |
|
728 |
virtual void FreeVirtualMemory(); |
|
729 |
}; |
|
730 |
||
731 |
||
732 |
||
733 |
/** |
|
734 |
A memory mapping to map a 'chunk' aligned region of a DCoarseMemory object into |
|
735 |
an address space. A 'chunk' is the size of memory mapped by a whole MMU page table |
|
736 |
and is #KChunkSize bytes. |
|
737 |
||
738 |
These mappings make use of page tables owned by a DCoarseMemory and when |
|
739 |
they are attached to a memory object they are linked into |
|
740 |
DCoarseMemory::DPageTables::iMappings not DCoarseMemory::iMappings. |
|
741 |
*/ |
|
742 |
class DCoarseMapping : public DMemoryMapping |
|
743 |
{ |
|
744 |
public: |
|
745 |
DCoarseMapping(); |
|
746 |
~DCoarseMapping(); |
|
747 |
||
748 |
protected: |
|
749 |
DCoarseMapping(TUint aFlags); |
|
750 |
||
751 |
protected: |
|
752 |
// from DMemoryMappingBase... |
|
753 |
virtual TInt MapPages(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Not implemented. Faults in debug builds. |
|
754 |
virtual void UnmapPages(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Not implemented. Faults in debug builds. |
|
755 |
virtual void RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB); ///< Not implemented. Faults in debug builds. |
|
756 |
virtual void RestrictPagesNA(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Not implemented. Faults in debug builds. |
|
757 |
virtual TInt PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount); |
|
758 |
virtual TBool MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex); |
|
759 |
virtual TInt DoMap(); |
|
760 |
virtual void DoUnmap(); |
|
761 |
||
762 |
// from DMemoryMapping... |
|
763 |
virtual TPte* FindPageTable(TLinAddr aLinAddr, TUint aMemoryIndex); |
|
764 |
}; |
|
765 |
||
766 |
||
767 |
||
768 |
/** |
|
769 |
A memory mapping to map a page aligned region of a memory object into |
|
770 |
an address space. The may be used with any memory object: DFineMemory or DCoarseMemory. |
|
771 |
*/ |
|
772 |
class DFineMapping : public DMemoryMapping |
|
773 |
{ |
|
774 |
public: |
|
775 |
DFineMapping(); |
|
776 |
~DFineMapping(); |
|
777 |
||
778 |
private: |
|
779 |
// from DMemoryMappingBase... |
|
780 |
virtual TInt MapPages(RPageArray::TIter aPages, TUint aMapInstanceCount); |
|
781 |
virtual void UnmapPages(RPageArray::TIter aPages, TUint aMapInstanceCount); |
|
782 |
virtual void RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB); |
|
783 |
virtual void RestrictPagesNA(RPageArray::TIter aPages, TUint aMapInstanceCount); |
|
784 |
virtual TInt PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount); |
|
785 |
virtual TBool MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex); |
|
786 |
virtual TInt DoMap(); |
|
787 |
virtual void DoUnmap(); |
|
788 |
||
789 |
// from DMemoryMapping... |
|
790 |
||
791 |
/** |
|
792 |
Allocatate virtual addresses for this mapping to use. |
|
793 |
||
794 |
In addition to performing the action of DMemoryMapping::AllocateVirtualMemory |
|
795 |
this will also allocate all permanent page tables for the mapping if it has attribute |
|
796 |
#EPermanentPageTables. |
|
797 |
*/ |
|
798 |
virtual TInt AllocateVirtualMemory(TMappingCreateFlags aFlags, TInt aOsAsid, TLinAddr aAddr, TUint aSize, TLinAddr aColourOffset); |
|
799 |
||
800 |
/** |
|
801 |
Free the virtual addresses and permanent page tables allocated to this mapping with |
|
802 |
AllocateVirtualMemory. |
|
803 |
*/ |
|
804 |
virtual void FreeVirtualMemory(); |
|
805 |
||
806 |
virtual TPte* FindPageTable(TLinAddr aLinAddr, TUint aMemoryIndex); |
|
807 |
||
808 |
// new... |
|
809 |
||
810 |
/** |
|
811 |
Allocate all the page tables required for this mapping. This is called by |
|
812 |
AllocateVirtualMemory if the #EPermanentPageTables attribute is set. |
|
813 |
||
814 |
Each page table for the virtual address region used by the mapping is |
|
815 |
allocated if not already present. The permanence count of any page table |
|
816 |
(SPageTableInfo::iPermanenceCount) is then incremented so that it is not |
|
817 |
freed even when it no longer maps any pages. |
|
818 |
||
819 |
If successful, the #EPageTablesAllocated flag in #Flags will be set. |
|
820 |
||
821 |
@return KErrNone if successful, otherwise one of the system wide error codes. |
|
822 |
*/ |
|
823 |
TInt AllocatePermanentPageTables(); |
|
824 |
||
825 |
/** |
|
826 |
Free all permanent page tables allocated to this mapping. |
|
827 |
||
828 |
This reverses the action of #AllocatePermanentPageTables by decrementing |
|
829 |
the permanence count for each page table and freeing it if is no longer in use. |
|
830 |
*/ |
|
831 |
void FreePermanentPageTables(); |
|
832 |
||
833 |
/** |
|
834 |
Free a range of permanent page tables. |
|
835 |
||
836 |
This is an implementation factor for FreePermanentPageTables and |
|
837 |
AllocatePermanentPageTables. It decrements the permanence count |
|
838 |
for each page table and frees it if is no longer in use |
|
839 |
||
840 |
@param aFirstPde The address of the page directory entry which refers to |
|
841 |
the first page table to be freed. |
|
842 |
@param aLastPde The address of the page directory entry which refers to |
|
843 |
the last page table to be freed. |
|
844 |
*/ |
|
845 |
void FreePermanentPageTables(TPde* aFirstPde, TPde* aLastPde); |
|
846 |
||
847 |
#ifdef _DEBUG |
|
848 |
/** |
|
849 |
Validate the contents of the page table are valid. |
|
850 |
||
851 |
@param aPt The page table to validate. |
|
852 |
*/ |
|
853 |
void ValidatePageTable(TPte* aPt, TLinAddr aAddr); |
|
854 |
#endif |
|
855 |
||
856 |
/** |
|
857 |
Get the page table being used to map a specified virtual address if it exists. |
|
858 |
||
859 |
@param aAddr A virtual address in the region allocated to this mapping. |
|
860 |
||
861 |
@return The virtual address of the page table mapping \a aAddr, |
|
862 |
or the null pointer if one wasn't found. |
|
863 |
*/ |
|
864 |
TPte* GetPageTable(TLinAddr aAddr); |
|
865 |
||
866 |
/** |
|
867 |
Get the page table being used to map a specified virtual address; allocating |
|
868 |
a new one if it didn't previously exist. |
|
869 |
||
870 |
@param aAddr A virtual address in the region allocated to this mapping. |
|
871 |
||
872 |
@return The virtual address of the page table mapping \a aAddr, |
|
873 |
or the null pointer if one wasn't found and couldn't be allocated. |
|
874 |
*/ |
|
875 |
TPte* GetOrAllocatePageTable(TLinAddr aAddr); |
|
876 |
||
877 |
/** |
|
878 |
Get and pin the page table being used to map a specified virtual address; |
|
879 |
allocating a new one if it didn't previously exist. |
|
880 |
||
881 |
@param aAddr A virtual address in the region allocated to this mapping. |
|
882 |
@param aPinArgs The resources required to pin the page table. |
|
883 |
On success, the page table will have been appended to |
|
884 |
\a aPinArgs.iPinnedPageTables. |
|
885 |
||
886 |
@return The virtual address of the page table mapping \a aAddr, |
|
887 |
or the null pointer if one wasn't found and couldn't be allocated. |
|
888 |
*/ |
|
889 |
TPte* GetOrAllocatePageTable(TLinAddr aAddr, TPinArgs& aPinArgs); |
|
890 |
||
891 |
/** |
|
892 |
Allocate a single page table. |
|
893 |
||
894 |
@param aAddr The virtual address the page table will be used to map. |
|
895 |
@param aPdeAddress Address of the page directory entry which is to map |
|
896 |
the newly allocated page table. |
|
897 |
@param aPermanent True, if the page table's permanence count is to be incremented. |
|
898 |
||
899 |
@return The virtual address of the page table if it was successfully allocated, |
|
900 |
otherwise the null pointer. |
|
901 |
*/ |
|
902 |
TPte* AllocatePageTable(TLinAddr aAddr, TPde* aPdeAddress, TBool aPermanent=false); |
|
903 |
||
904 |
/** |
|
905 |
Free a single page table if it is unused. |
|
906 |
||
907 |
@param aPdeAddress Address of the page directory entry (PDE) which maps the page table. |
|
908 |
If the page table is freed, this PDE will be set to an 'unallocated' value. |
|
909 |
*/ |
|
910 |
void FreePageTable(TPde* aPdeAddress); |
|
911 |
}; |
|
912 |
||
913 |
||
14
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
914 |
/** |
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
915 |
A mapping which maps any memory into the kernel address space and provides access to |
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
916 |
the physical address used by a memory object. |
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
917 |
|
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
918 |
These mappings are always of the 'pinned' type to prevent the obtained physical addresses |
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
919 |
from becoming invalid. |
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
920 |
*/ |
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
921 |
class DKernelPinMapping : public DFineMapping |
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
922 |
{ |
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
923 |
public: |
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
924 |
DKernelPinMapping(); |
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
925 |
TInt Construct(TUint aReserveSize); |
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
926 |
TInt MapAndPin(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions); |
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
927 |
void UnmapAndUnpin(); |
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
928 |
|
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
929 |
public: |
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
930 |
TInt iReservePages; ///< The number of pages this mapping is able to map with its reserved resources(page tables etc). |
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
931 |
}; |
5d2844f35677
Revision: 201004
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
932 |
|
0 | 933 |
|
934 |
/** |
|
935 |
A mapping which provides access to the physical address used by a memory object |
|
936 |
without mapping these at any virtual address accessible to software. |
|
937 |
||
938 |
These mappings are always of the 'pinned' type to prevent the obtained physical addresses |
|
939 |
from becoming invalid. |
|
940 |
*/ |
|
941 |
class DPhysicalPinMapping : public DMemoryMappingBase |
|
942 |
{ |
|
943 |
public: |
|
944 |
DPhysicalPinMapping(); |
|
945 |
||
946 |
/** |
|
947 |
Attach this mapping to a memory object so that it pins a specified region of its memory. |
|
948 |
||
949 |
Most of the action of this method is performed by #Attach. |
|
950 |
||
951 |
@param aMemory The memory object. |
|
952 |
@param aIndex The page index of the first page of memory to be pinned by the mapping. |
|
953 |
@param aCount The number of pages of memory to be pinned by the mapping. |
|
954 |
@param aPermissions The memory access permissions appropriate to the intended use |
|
955 |
of the physical addresses. E.g. if the memory contents will be |
|
956 |
changes, use EReadWrite. These permissions are used for error |
|
957 |
checking, e.g. detecting attempted writes to read-only memory. |
|
958 |
They are also used for optimising access to demand paged memory; |
|
959 |
which is more efficient if only read-only access is required. |
|
960 |
||
961 |
@return KErrNone if successful, |
|
962 |
KErrNotFound if any part of the memory to be pinned was not present, |
|
963 |
KErrNoMemory if there was insufficient memory, |
|
964 |
otherwise one of the system wide error codes. |
|
965 |
*/ |
|
966 |
TInt Pin(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions); |
|
967 |
||
968 |
/** |
|
969 |
Remove this mapping from the memory object it was previously added to by #Pin. |
|
970 |
||
971 |
Most of the action of this method is performed by #Detach. |
|
972 |
*/ |
|
973 |
virtual void Unpin(); |
|
974 |
||
975 |
private: |
|
976 |
// from DMemoryMappingBase... |
|
977 |
virtual TInt MapPages(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Not implemented. Faults in debug builds. |
|
978 |
virtual void UnmapPages(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Does nothing |
|
979 |
virtual void RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB); ///< Not implemented. Faults in debug builds. |
|
980 |
virtual void RestrictPagesNA(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Does nothing |
|
981 |
virtual TInt PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount); ///< Does nothing |
|
982 |
virtual TBool MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex);///< Not implemented. Faults in debug builds. |
|
983 |
virtual TInt DoMap(); ///< Does nothing |
|
984 |
virtual void DoUnmap(); ///< Does nothing |
|
985 |
}; |
|
986 |
||
987 |
||
988 |
||
989 |
/** |
|
990 |
A mapping which pins memory in order to prevent demand paging related |
|
991 |
page faults from occurring. |
|
992 |
*/ |
|
993 |
class DVirtualPinMapping : public DPhysicalPinMapping |
|
994 |
{ |
|
995 |
public: |
|
996 |
DVirtualPinMapping(); |
|
997 |
~DVirtualPinMapping(); |
|
998 |
||
999 |
/** |
|
1000 |
Create a new DVirtualPinMapping object suitable for pinning a specified number of pages. |
|
1001 |
||
1002 |
If no maximum is specified (\a aMaxCount==0) then this object may be used to pin |
|
1003 |
any number of pages, however this will require dynamic allocation of storage for |
|
1004 |
page table references. |
|
1005 |
||
1006 |
@param aMaxCount The maximum number of pages which can be pinned, or zero for no maximum. |
|
1007 |
||
1008 |
@return The newly created DVirtualPinMapping or the null pointer if there was |
|
1009 |
insufficient memory. |
|
1010 |
*/ |
|
1011 |
static DVirtualPinMapping* New(TUint aMaxCount); |
|
1012 |
||
1013 |
/** |
|
1014 |
Attach this mapping to a memory object so that it pins a specified region of its memory. |
|
1015 |
||
1016 |
Additionally, pin the page tables in a specified mapping (\a aMapping) which |
|
1017 |
are being used to map these pages. |
|
1018 |
||
1019 |
The result of this function is that access to the pinned memory through the virtual |
|
1020 |
addresses used by \a aMapping will not generate any demand paging related page faults. |
|
1021 |
||
1022 |
@param aMemory The memory object. |
|
1023 |
@param aIndex The page index of the first page of memory to be pinned by the mapping. |
|
1024 |
@param aCount The number of pages of memory to be pinned by the mapping. |
|
1025 |
@param aPermissions The memory access permissions appropriate to the intended use |
|
1026 |
of the physical addresses. E.g. if the memory contents will be |
|
1027 |
changes, use EReadWrite. These permissions are used for error |
|
1028 |
checking, e.g. detecting attempted writes to read-only memory. |
|
1029 |
They are also used for optimising access to demand paged memory; |
|
1030 |
which is more efficient if only read-only access is required. |
|
1031 |
@param aMapping The mapping whose page tables are to be pinned. This must be |
|
1032 |
currently mapping the specified region of memory pages. |
|
1033 |
@param aMapInstanceCount The instance count of the mapping who's page tables are to be pinned. |
|
1034 |
||
1035 |
@return KErrNone if successful, |
|
1036 |
KErrNotFound if any part of the memory to be pinned was not present, |
|
1037 |
KErrNoMemory if there was insufficient memory, |
|
1038 |
otherwise one of the system wide error codes. |
|
1039 |
*/ |
|
1040 |
TInt Pin( DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions, |
|
1041 |
DMemoryMappingBase* aMapping, TUint aMapInstanceCount); |
|
1042 |
||
1043 |
/** |
|
1044 |
Remove this mapping from the memory object it was previously added to by #Pin. |
|
1045 |
This will unpin any memory pages and pages tables that were pinned. |
|
1046 |
*/ |
|
1047 |
void Unpin(); |
|
1048 |
||
1049 |
/** |
|
1050 |
Return the maximum number of page tables which could be required to map |
|
1051 |
\a aPageCount pages. This is used by various resource reserving calculations. |
|
1052 |
*/ |
|
1053 |
static TUint MaxPageTables(TUint aPageCount); |
|
1054 |
||
1055 |
/** |
|
1056 |
In debug builds, dump information about this mapping to the kernel trace port. |
|
1057 |
*/ |
|
1058 |
virtual void Dump(); |
|
1059 |
||
1060 |
private: |
|
1061 |
// from DMemoryMappingBase... |
|
1062 |
virtual void RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB); ///< Does nothing. |
|
1063 |
virtual TInt PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount); |
|
1064 |
virtual TBool MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex);///< Not implemented. Faults in debug builds. |
|
1065 |
virtual TInt DoPin(TPinArgs& aPinArgs); |
|
1066 |
virtual void DoUnpin(TPinArgs& aPinArgs); |
|
1067 |
||
1068 |
private: |
|
1069 |
/** |
|
1070 |
Allocate memory to store pointers to all the page table which map |
|
1071 |
\a aCount pages of memory. The pointer to the allocated memory |
|
1072 |
is stored at iAllocatedPinnedPageTables. |
|
1073 |
||
1074 |
If iSmallPinnedPageTablesArray is large enough, this function doesn't |
|
1075 |
allocate any memory. |
|
1076 |
||
1077 |
@return KErrNone if successful, otherwise KErrNoMemory. |
|
1078 |
*/ |
|
1079 |
TInt AllocPageTableArray(TUint aCount); |
|
1080 |
||
1081 |
/** |
|
1082 |
Delete iAllocatedPinnedPageTables. |
|
1083 |
*/ |
|
1084 |
void FreePageTableArray(); |
|
1085 |
||
1086 |
/** |
|
1087 |
Return the address of the array storing pinned page tables. |
|
1088 |
This is either iSmallPinnedPageTablesArray or iAllocatedPinnedPageTables. |
|
1089 |
*/ |
|
1090 |
TPte** PageTableArray(); |
|
1091 |
||
1092 |
/** |
|
1093 |
Unpin all the page tables which have been pinned by this mapping. |
|
1094 |
||
1095 |
@param aPinArgs The resources used for pinning. The replacement pages allocated |
|
1096 |
to this will be increased for each page which was became completely |
|
1097 |
unpinned. |
|
1098 |
*/ |
|
1099 |
void UnpinPageTables(TPinArgs& aPinArgs); |
|
1100 |
private: |
|
1101 |
/** |
|
1102 |
Temporary store for the mapping passed to #Pin |
|
1103 |
*/ |
|
1104 |
DMemoryMappingBase* iPinVirtualMapping; |
|
1105 |
||
1106 |
/** |
|
1107 |
Temporary store for the mapping instance count passed to #Pin |
|
1108 |
*/ |
|
1109 |
TUint iPinVirtualMapInstanceCount; |
|
1110 |
||
1111 |
/** |
|
1112 |
The number of page tables which are currently being pinned by this mapping. |
|
1113 |
This is the number of valid entries stored at PageTableArray. |
|
1114 |
*/ |
|
1115 |
TUint iNumPinnedPageTables; |
|
1116 |
||
1117 |
/** |
|
1118 |
The maximum number of pages which can be pinned by this mapping. |
|
1119 |
If this is zero, there is no maximum. |
|
1120 |
*/ |
|
1121 |
TUint iMaxCount; |
|
1122 |
||
1123 |
/** |
|
1124 |
The memory allocated by this object for storing pointer to the page tables |
|
1125 |
it has pinned. |
|
1126 |
*/ |
|
1127 |
TPte** iAllocatedPinnedPageTables; |
|
1128 |
||
1129 |
enum |
|
1130 |
{ |
|
1131 |
KSmallPinnedPageTableCount = 2 ///< Number of entries in iSmallPinnedPageTablesArray |
|
1132 |
}; |
|
1133 |
||
1134 |
/** |
|
1135 |
A small array to use for storing pinned page tables. |
|
1136 |
This is an optimisation used for the typical case of pinning a small number of pages |
|
1137 |
to avoid dynamic allocation of memory. |
|
1138 |
*/ |
|
1139 |
TPte* iSmallPinnedPageTablesArray[KSmallPinnedPageTableCount]; |
|
1140 |
}; |
|
1141 |
||
1142 |
#endif |