|
1 // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of the License "Eclipse Public License v1.0" |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: |
|
14 // |
|
15 |
|
16 /** |
|
17 @file |
|
18 @internalComponent |
|
19 */ |
|
20 |
|
21 #ifndef MMAPPING_H |
|
22 #define MMAPPING_H |
|
23 |
|
24 #include "mrefcntobj.h" |
|
25 #include "mmappinglist.h" |
|
26 #include "mpagearray.h" |
|
27 |
|
28 |
|
29 |
|
30 /** |
|
31 Base class for memory mappings. |
|
32 |
|
33 This provides the methods for linking a mapping to a memory object |
|
34 as well as the interface for updating the MMU page tables associated |
|
35 with a mapping when the memory state changes. |
|
36 */ |
|
37 class DMemoryMappingBase : public DReferenceCountedObject |
|
38 { |
|
39 private: |
|
40 /** |
|
41 Memory object to which this mapping is currently attached. |
|
42 Updates to the are protected by the MmuLock. |
|
43 */ |
|
44 DMemoryObject* iMemory; |
|
45 |
|
46 public: |
|
47 /** |
|
48 Link used to maintain list of mappings attached to a memory object. |
|
49 */ |
|
50 TMappingListLink iLink; |
|
51 |
|
52 /** |
|
53 Offset, in page units, within the memory object's memory for start of this mapping. |
|
54 */ |
|
55 TUint iStartIndex; |
|
56 |
|
57 /** |
|
58 Size of this mapping, in page units. |
|
59 */ |
|
60 TUint iSizeInPages; |
|
61 |
|
62 private: |
|
63 /** |
|
64 Instance count which is incremented every time a mapping is attached to a memory object. |
|
65 When code is manipulating mappings, the instance count is used to detect that a |
|
66 mapping has been reused and that the operation it is performing is no long needed. |
|
67 */ |
|
68 TUint iMapInstanceCount; |
|
69 |
|
70 public: |
|
71 |
|
72 /** |
|
73 Bit flags stored in #Flags giving various state and attributes of the mapping. |
|
74 */ |
|
75 enum TFlags |
|
76 { |
|
77 /** |
|
78 Flag set during object construction to indicate that this mapping is of |
|
79 class #DCoarseMapping. |
|
80 */ |
|
81 ECoarseMapping = 1<<0, |
|
82 |
|
83 /** |
|
84 Flag set during object construction to indicate that this mapping will pin |
|
85 any memory pages it maps. This may not be used with coarse memory mappings. |
|
86 */ |
|
87 EPinned = 1<<1, |
|
88 |
|
89 /** |
|
90 Pages have already been reserved for pinning, so when this mapping is attached |
|
91 to a memory object no additional pages need to be reserved. Pre-reserving pages |
|
92 is used to prevent the possibility of failing to pin due to an out of memory |
|
93 condition. It is essential that the users of these mappings ensure that there |
|
94 are enough reserved pages in the paging pool to meet the maximum mapping size |
|
95 used. |
|
96 */ |
|
97 EPinningPagesReserved = 1<<2, |
|
98 |
|
99 /** |
|
100 Pages have been successfully pinned by this mapping. This is set after demand |
|
101 paged memory has been succeeded pinned and is used to indicate that the pages |
|
102 need unpinning again when the mapping is later unmapped. |
|
103 */ |
|
104 EPagesPinned = 1<<3, |
|
105 |
|
106 /** |
|
107 Flag set during object construction to indicate that MMU page tables are to |
|
108 be permanently allocated for use by this mapping. Normally, page tables are |
|
109 allocated as needed to map memory which can result in out-of-memory errors |
|
110 when mapping memory pages. |
|
111 */ |
|
112 EPermanentPageTables = 1<<4, |
|
113 |
|
114 /** |
|
115 Permanent page tables have been successfully been allocated for this mapping. |
|
116 This flag is used to track allocation so they can be released when the mapping |
|
117 is destroyed. |
|
118 */ |
|
119 EPageTablesAllocated = 1<<5, |
|
120 |
|
121 /** |
|
122 For pinned mappings (EPinned) this flag is set whenever the mapping prevents |
|
123 any pages of memory from being fully decommitted from a memory object. When a |
|
124 mapping is finally unmapped from the memory object this flag is checked, and, |
|
125 if set, further cleanup of the decommitted pages triggered. |
|
126 */ |
|
127 EPageUnmapVetoed = 1<<6, |
|
128 |
|
129 /** |
|
130 Mapping is being, or has been, detached from a memory object. |
|
131 When set, operations on the mapping should act as though the mapping is no |
|
132 longer attached to a memory object. Specifically, no further pages of memory |
|
133 should be mapped into this mapping. |
|
134 |
|
135 This flag is only set when the MmuLock is held. |
|
136 */ |
|
137 EDetaching = 1<<7, |
|
138 |
|
139 /** |
|
140 This mapping is a physical pinning mapping. The pages it pins |
|
141 cannot be paged out or moved. |
|
142 |
|
143 This flag is set when DPhysicalPinMapping objects are created. |
|
144 */ |
|
145 EPhysicalPinningMapping = 1<<8, |
|
146 |
|
147 /** |
|
148 Flag set during object construction to indicate that this mapping is of |
|
149 class #DLargeMapping. |
|
150 |
|
151 Note that #DLargeMapping is derived from #DCoarseMapping, therefore presence of this flag |
|
152 implies presence of #ECoarseMapping as well. |
|
153 */ |
|
154 ELargeMapping = 1<<9, |
|
155 }; |
|
156 |
|
157 /** |
|
158 Bitmask of values from enum #TPteType which will be used to calculate |
|
159 the correct attributes for any page table entries this mapping uses. |
|
160 */ |
|
161 FORCE_INLINE TUint8& PteType() |
|
162 { return iLink.iSpare1; } |
|
163 |
|
164 /** |
|
165 Bitmask of values from enum #TFlags. |
|
166 The flags 16 bits and are stored in iLink.iSpare2 and iLink.iSpare3. |
|
167 */ |
|
168 FORCE_INLINE TUint16& Flags() |
|
169 { return (TUint16&)iLink.iSpare2; } |
|
170 |
|
171 public: |
|
172 /** |
|
173 Return the memory object to which this mapping is currently attached. |
|
174 |
|
175 @pre MmuLock is held. (If aNoCheck==false) |
|
176 */ |
|
177 FORCE_INLINE DMemoryObject* Memory(TBool aNoCheck=false) |
|
178 { |
|
179 if(!aNoCheck) |
|
180 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
181 return iMemory; |
|
182 } |
|
183 |
|
184 /** |
|
185 Return true if the mapping is currently attached to a memory object. |
|
186 */ |
|
187 FORCE_INLINE TBool IsAttached() |
|
188 { return iLink.IsLinked(); } |
|
189 |
|
190 /** |
|
191 Return true if the mapping is being, or has been, detached from a memory object. |
|
192 The mapping may or may not still be attached to a memory object, i.e. #IsAttached |
|
193 is indeterminate. |
|
194 */ |
|
195 FORCE_INLINE TBool BeingDetached() |
|
196 { return Flags()&EDetaching; } |
|
197 |
|
198 /** |
|
199 Return the mapping instance count. |
|
200 @see #iMapInstanceCount. |
|
201 */ |
|
202 FORCE_INLINE TUint MapInstanceCount() |
|
203 { return iMapInstanceCount; } |
|
204 |
|
205 /** |
|
206 Return true if this mapping provides read only access to memory. |
|
207 */ |
|
208 FORCE_INLINE TBool IsReadOnly() |
|
209 { return !(PteType()&EPteTypeWritable); } |
|
210 |
|
211 #ifdef MMU_SUPPORTS_EXECUTE_NEVER |
|
212 /** |
|
213 Return true if this mapping provides access to memory which allows |
|
214 code to be executed from it. |
|
215 */ |
|
216 FORCE_INLINE TBool IsExecutable() |
|
217 { return (PteType()&EPteTypeExecutable); } |
|
218 #endif |
|
219 |
|
220 /** |
|
221 Return true if this is a coarse mapping, in other words it is an instance of #DCoarseMapping or |
|
222 #DLargeMapping. |
|
223 */ |
|
224 FORCE_INLINE TBool IsCoarse() |
|
225 { return Flags()&ECoarseMapping; } |
|
226 |
|
227 /** |
|
228 Return true if this mapping is a large mapping, in other words an instance of #DLargeMapping. |
|
229 |
|
230 Note that all large mappings are also coarse mappings. |
|
231 */ |
|
232 FORCE_INLINE TBool IsLarge() |
|
233 { return Flags()&ELargeMapping; } |
|
234 |
|
235 /** |
|
236 Return true if this mapping pins the memory it maps. |
|
237 */ |
|
238 FORCE_INLINE TBool IsPinned() |
|
239 { return Flags()&EPinned; } |
|
240 |
|
241 /** |
|
242 Return true if this mapping physically pins the memory it maps. |
|
243 */ |
|
244 FORCE_INLINE TBool IsPhysicalPinning() |
|
245 { return Flags()&EPhysicalPinningMapping; } |
|
246 |
|
247 /** |
|
248 Return the access permissions which this mapping uses to maps memory. |
|
249 */ |
|
250 FORCE_INLINE TMappingPermissions Permissions() |
|
251 { return Mmu::PermissionsFromPteType(PteType()); } |
|
252 |
|
253 /** |
|
254 Link this mapping to a memory object. |
|
255 |
|
256 This is called by the memory object during processing of #Attach. |
|
257 |
|
258 @param aMemory The memory object the mapping is being attached to. |
|
259 @param aMappingList The list to add this mapping to. |
|
260 |
|
261 @pre MmuLock is held. |
|
262 @pre Mapping list lock is held. |
|
263 */ |
|
264 void LinkToMemory(DMemoryObject* aMemory, TMappingList& aMappingList); |
|
265 |
|
266 /** |
|
267 Unlink this mapping from the memory object it was previously linked to with |
|
268 #LinkToMemory. |
|
269 |
|
270 This is called by the memory object during processing of #Detach. |
|
271 |
|
272 @param aMappingList The list that the mapping appears on. |
|
273 */ |
|
274 void UnlinkFromMemory(TMappingList& aMappingList); |
|
275 |
|
276 protected: |
|
277 /** |
|
278 @param aType Initial value for #Flags. |
|
279 */ |
|
280 DMemoryMappingBase(TUint aType); |
|
281 |
|
282 /** |
|
283 Attach this mapping to a memory object so that it maps a specified region of its memory. |
|
284 |
|
285 @param aMemory The memory object. |
|
286 @param aIndex The page index of the first page of memory to be mapped by the mapping. |
|
287 @param aCount The number of pages of memory to be mapped by the mapping. |
|
288 |
|
289 @return KErrNone if successful, otherwise one of the system wide error codes. |
|
290 */ |
|
291 TInt Attach(DMemoryObject* aMemory, TUint aIndex, TUint aCount); |
|
292 |
|
293 /** |
|
294 Remove this mapping from the memory object it was previously attached to by #Attach. |
|
295 */ |
|
296 void Detach(); |
|
297 |
|
298 public: |
|
299 /** |
|
300 Update the page table entries corresponding to this mapping to add entries for |
|
301 a specified set of memory pages. |
|
302 |
|
303 This method is called by DMemoryObject::MapPages to update each mapping attached |
|
304 to a memory object whenever new pages of memory are added. However, it won't be |
|
305 called for any mapping with the #EPinned attribute as such mappings are unchanging. |
|
306 |
|
307 @param aPages An RPageArray::TIter which refers to a range of pages |
|
308 in a memory object. This has been clipped to fit within |
|
309 the range of pages mapped by this mapping. |
|
310 Only array entries which have state RPageArray::ECommitted |
|
311 should be mapped into the mapping's page tables. |
|
312 |
|
313 @param aMapInstanceCount The instance of this mapping which is to be updated. |
|
314 Whenever this no longer matches the current #MapInstanceCount |
|
315 the function must not update any more of the mapping's |
|
316 page table entries, (but must still return KErrNone). |
|
317 |
|
318 @return KErrNone if successful, otherwise one of the system wide error codes. |
|
319 */ |
|
320 virtual TInt MapPages(RPageArray::TIter aPages, TUint aMapInstanceCount) =0; |
|
321 |
|
322 /** |
|
323 Update the page table entries corresponding to this mapping to remove entries for |
|
324 a specified set of memory pages. |
|
325 |
|
326 This method is called by DMemoryObject::UnmapPages to update each mapping attached |
|
327 to a memory object whenever pages of memory are removed. |
|
328 |
|
329 @param aPages An RPageArray::TIter which refers to a range of pages |
|
330 in a memory object. This has been clipped to fit within |
|
331 the range of pages mapped by this mapping. |
|
332 Only array entries which return true for |
|
333 RPageArray::TargetStateIsDecommitted should be unmapped |
|
334 from the mapping's page tables. |
|
335 |
|
336 @param aMapInstanceCount The instance of this mapping which is to be updated. |
|
337 Whenever this no longer matches the current #MapInstanceCount |
|
338 the function must not update any more of the mapping's |
|
339 page table entries. |
|
340 */ |
|
341 virtual void UnmapPages(RPageArray::TIter aPages, TUint aMapInstanceCount) =0; |
|
342 |
|
343 /** |
|
344 Update the page table entry corresponding to this mapping to update an entry for a specified |
|
345 page that has just been moved or shadowed. |
|
346 |
|
347 @param aPages The page array entry of the page in a memory object. |
|
348 Only array entries which have a target state of |
|
349 RPageArray::ECommitted should be mapped into the |
|
350 mapping's page tables. |
|
351 |
|
352 @param aIndex The index of the page in the memory object. |
|
353 |
|
354 @param aMapInstanceCount The instance of this mapping which is to be updated. |
|
355 Whenever this no longer matches the current #MapInstanceCount |
|
356 the function must not update any more of the mapping's |
|
357 page table entries, (but must still return KErrNone). |
|
358 |
|
359 @param aInvalidateTLB Set to ETrue when the TLB entries associated with this page |
|
360 should be invalidated. This must be done when there is |
|
361 already a valid pte for this page, i.e. if the page is still |
|
362 mapped. |
|
363 */ |
|
364 virtual void RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB)=0; |
|
365 |
|
366 /** |
|
367 Update the page table entries corresponding to this mapping to apply access restrictions |
|
368 to a specified set of memory pages. |
|
369 |
|
370 This method is called by DMemoryObject::RestrictPages to update each mapping attached |
|
371 to a memory object whenever pages of memory are restricted. |
|
372 |
|
373 @param aPages An RPageArray::TIter which refers to a range of pages |
|
374 in a memory object. This has been clipped to fit within |
|
375 the range of pages mapped by this mapping. |
|
376 Only array entries which return true for |
|
377 RPageArray::TargetStateIsDecommitted should be unmapped |
|
378 from the mapping's page tables. |
|
379 |
|
380 @param aMapInstanceCount The instance of this mapping which is to be updated. |
|
381 Whenever this no longer matches the current #MapInstanceCount |
|
382 the function must not update any more of the mapping's |
|
383 page table entries. |
|
384 */ |
|
385 virtual void RestrictPagesNA(RPageArray::TIter aPages, TUint aMapInstanceCount) =0; |
|
386 |
|
387 /** |
|
388 Update the page table entries corresponding to this mapping to add entries for |
|
389 a specified set of demand paged memory pages following a 'page in' or memory |
|
390 pinning operation. |
|
391 |
|
392 @param aPages An RPageArray::TIter which refers to a range of pages |
|
393 in a memory object. This will be within the range of pages |
|
394 mapped by this mapping. |
|
395 Only array entries which have state RPageArray::ECommitted |
|
396 should be mapped into the mapping's page tables. |
|
397 |
|
398 @param aPinArgs The resources required to pin any page tables the mapping uses. |
|
399 Page table must be pinned if \a aPinArgs.iPinnedPageTables is |
|
400 not the null pointer, in which case this the virtual address |
|
401 of the pinned must be stored in the array this points to. |
|
402 \a aPinArgs.iReadOnly is true if write access permissions |
|
403 are not needed. |
|
404 |
|
405 @return KErrNone if successful, otherwise one of the system wide error codes. |
|
406 */ |
|
407 virtual TInt PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount) =0; |
|
408 |
|
409 |
|
410 /** |
|
411 Update the page table entry corresponding to this mapping to add an entry for |
|
412 a specified page which is in the process of being moved. |
|
413 |
|
414 @param aPageArrayPtr The page array entry for the page to be mapped which must be |
|
415 within this mapping range of pages. |
|
416 Only array entries which have a target state of |
|
417 RPageArray::ECommitted should be mapped into the mapping's |
|
418 page tables. |
|
419 |
|
420 @param aIndex The index of the page. |
|
421 |
|
422 @return ETrue if successful, EFalse otherwise. |
|
423 */ |
|
424 virtual TBool MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex)=0; |
|
425 |
|
426 |
|
427 /** |
|
428 In debug builds, dump information about this mapping to the kernel trace port. |
|
429 */ |
|
430 virtual void Dump(); |
|
431 |
|
432 private: |
|
433 /** |
|
434 Update this mapping's MMU data structures to map all pages of memory |
|
435 currently committed to the memory object (#iMemory) in the region covered |
|
436 by this mapping. |
|
437 |
|
438 This method is called by #Attach after the mapping has been linked |
|
439 into the memory object. |
|
440 |
|
441 @return KErrNone if successful, otherwise one of the system wide error codes. |
|
442 */ |
|
443 virtual TInt DoMap() =0; |
|
444 |
|
445 /** |
|
446 Update this mapping's MMU data structures to unmap all pages of memory. |
|
447 |
|
448 This method is called by #Detach before the mapping has been unlinked |
|
449 from the memory object but after the #EDetaching flag has been set. |
|
450 */ |
|
451 virtual void DoUnmap() =0; |
|
452 |
|
453 protected: |
|
454 /** |
|
455 For pinned mapping, this virtual method is called by #Attach in order to pin |
|
456 pages of memory if required. This is called after the mapping has been linked |
|
457 into the memory object but before #DoMap. |
|
458 |
|
459 The default implementation of this method simply calls DMemoryManager::Pin. |
|
460 |
|
461 @param aPinArgs The resources to use for pinning. This has sufficient replacement |
|
462 pages allocated to pin every page the mapping covers, and the |
|
463 value of \a aPinArgs.iReadOnly has been set to correspond to the |
|
464 mappings access permissions. |
|
465 |
|
466 @return KErrNone if successful, otherwise one of the system wide error codes. |
|
467 */ |
|
468 virtual TInt DoPin(TPinArgs& aPinArgs); |
|
469 |
|
470 /** |
|
471 For pinned mapping, this virtual method is called by #Detach in order to unpin |
|
472 pages of memory if required. This is called before the mapping has been unlinked |
|
473 from the memory object but after #DoUnmap. |
|
474 |
|
475 The default implementation of this method simply calls DMemoryManager::Unpin. |
|
476 |
|
477 @param aPinArgs The resources used for pinning. The replacement pages allocated |
|
478 to this will be increased for each page which was became completely |
|
479 unpinned. |
|
480 */ |
|
481 virtual void DoUnpin(TPinArgs& aPinArgs); |
|
482 }; |
|
483 |
|
484 |
|
485 |
|
486 /** |
|
487 Base class for memory mappings which map memory contents into a address space. |
|
488 |
|
489 This provides methods for allocating virtual memory and holds the attributes needed |
|
490 for MMU page table entries. |
|
491 */ |
|
492 class DMemoryMapping : public DMemoryMappingBase |
|
493 { |
|
494 protected: |
|
495 /** |
|
496 The page directory entry (PDE) value for use when mapping this mapping's page tables. |
|
497 This value has the physical address component being zero, so a page table's physical |
|
498 address can be simply ORed in. |
|
499 |
|
500 This could potentially be removed (see DMemoryMapping::PdeType()). |
|
501 */ |
|
502 TPde iBlankPde; |
|
503 |
|
504 /** |
|
505 The page table entry (PTE) value for use when mapping pages into this mapping. |
|
506 This value has the physical address component being zero, so a page's physical |
|
507 address can be simply ORed in. |
|
508 */ |
|
509 TPte iBlankPte; |
|
510 |
|
511 /** |
|
512 Start of the virtual address region allocated for use by this mapping |
|
513 ORed with the OS ASID of the address space this lies in. |
|
514 |
|
515 Note, the address at which memory is mapped (#iLinAddrAndOsAsid) may be different |
|
516 to this allocated address due to page colouring restrictions. |
|
517 |
|
518 @see iAllocatedSize |
|
519 */ |
|
520 TLinAddr iAllocatedLinAddrAndOsAsid; |
|
521 |
|
522 /** |
|
523 Size of virtual address region memory allocated for use by this mapping. |
|
524 |
|
525 @see iAllocatedLinAddrAndOsAsid |
|
526 */ |
|
527 TUint iAllocatedSize; |
|
528 |
|
529 private: |
|
530 /** |
|
531 Start of the virtual address region that this mapping is currently |
|
532 mapping memory at, ORed with the OS ASID of the address space this lies in. |
|
533 |
|
534 This value is set by #Map which is called from #Attach when the mapping |
|
535 is attached to a memory object. The address used may be different to |
|
536 #iAllocatedLinAddrAndOsAsid due to page colouring restrictions. |
|
537 |
|
538 The size of the region mapped is #iSizeInPages. |
|
539 |
|
540 Note, access to this value is through #Base() and #OsAsid(). |
|
541 */ |
|
542 TLinAddr iLinAddrAndOsAsid; |
|
543 |
|
544 public: |
|
545 /** |
|
546 Second phase constructor. |
|
547 |
|
548 The main function of this is to allocate a virtual address region for the mapping |
|
549 and to add it to an address space. |
|
550 |
|
551 @param aAttributes The attributes of the memory which this mapping is intended to map. |
|
552 This is only needed to setup #PdeType which is required for correct |
|
553 virtual address allocation so in practice the only relevant attribute |
|
554 is to set EMemoryAttributeUseECC if required, else use |
|
555 EMemoryAttributeStandard. |
|
556 |
|
557 @param aFlags A combination of the options from enum TMappingCreateFlags. |
|
558 |
|
559 @param aOsAsid The OS ASID of the address space the mapping is to be added to. |
|
560 |
|
561 @param aAddr The virtual address to use for the mapping, or zero if this is |
|
562 to be allocated by this function. |
|
563 |
|
564 @param aSize The maximum size of memory, in bytes, this mapping will be used to |
|
565 map. This determines the size of the virtual address region the |
|
566 mapping will use. |
|
567 |
|
568 @param aColourOffset The byte offset within a memory object's memory which this mapping |
|
569 is to start. This is used to adjust virtual memory allocation to |
|
570 meet page colouring restrictions. If this value is not known leave |
|
571 this argument unspecified; however, it must be specified if \a aAddr |
|
572 is specified. |
|
573 |
|
574 @return KErrNone if successful, otherwise one of the system wide error codes. |
|
575 */ |
|
576 TInt Construct(TMemoryAttributes aAttributes, TMappingCreateFlags aFlags, TInt aOsAsid, TLinAddr aAddr, TUint aSize, TLinAddr aColourOffset=~(TLinAddr)0); |
|
577 |
|
578 /** |
|
579 Add this mapping to a memory object so that it maps a specified region of its memory. |
|
580 |
|
581 Most of the action of this method is performed by #Attach. |
|
582 |
|
583 @param aMemory The memory object. |
|
584 @param aIndex The page index of the first page of memory to be mapped by the mapping. |
|
585 @param aCount The number of pages of memory to be mapped by the mapping. |
|
586 @param aPermissions The memory access permissions to apply to the mapping. |
|
587 |
|
588 @return KErrNone if successful, otherwise one of the system wide error codes. |
|
589 */ |
|
590 TInt Map(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions); |
|
591 |
|
592 /** |
|
593 Remove this mapping from the memory object it was previously added to by #Map. |
|
594 |
|
595 Most of the action of this method is performed by #Detach. |
|
596 */ |
|
597 void Unmap(); |
|
598 |
|
599 /** |
|
600 Return the OS ASID for the address space that this mapping is currently mapping memory in. |
|
601 */ |
|
602 FORCE_INLINE TInt OsAsid() |
|
603 { |
|
604 __NK_ASSERT_DEBUG(iLinAddrAndOsAsid); // check mapping has been added to an address space |
|
605 return iLinAddrAndOsAsid&KPageMask; |
|
606 } |
|
607 |
|
608 /** |
|
609 Return starting virtual address that this mapping is currently mapping memory at. |
|
610 The size of the region mapped is #iSizeInPages. |
|
611 */ |
|
612 FORCE_INLINE TLinAddr Base() |
|
613 { |
|
614 __NK_ASSERT_DEBUG(iLinAddrAndOsAsid); // check mapping has been added to an address space |
|
615 return iLinAddrAndOsAsid&~KPageMask; |
|
616 } |
|
617 |
|
618 /** |
|
619 Return #Base()|#OsAsid() |
|
620 */ |
|
621 FORCE_INLINE TLinAddr LinAddrAndOsAsid() |
|
622 { |
|
623 __NK_ASSERT_DEBUG(iLinAddrAndOsAsid); // check mapping has been added to an address space |
|
624 return iLinAddrAndOsAsid; |
|
625 } |
|
626 |
|
627 FORCE_INLINE TBool IsUserMapping() |
|
628 { |
|
629 // Note: must be usable before the mapping has been added to an address space |
|
630 return (PteType() & (EPteTypeUserAccess|EPteTypeGlobal)) == EPteTypeUserAccess; |
|
631 } |
|
632 |
|
633 /** |
|
634 Return #iBlankPde. |
|
635 */ |
|
636 FORCE_INLINE TPde BlankPde() |
|
637 { |
|
638 return iBlankPde; |
|
639 } |
|
640 |
|
641 /** |
|
642 Emit BTrace traces identifying this mappings virtual address usage. |
|
643 */ |
|
644 void BTraceCreate(); |
|
645 |
|
646 /** |
|
647 In debug builds, dump information about this mapping to the kernel trace port. |
|
648 */ |
|
649 virtual void Dump(); |
|
650 |
|
651 /** |
|
652 Function to return a page table pointer for the specified linear address and |
|
653 index to this mapping. |
|
654 |
|
655 This is called by #Epoc::MovePhysicalPage when moving page table or page table info pages. |
|
656 |
|
657 @param aLinAddr The linear address to find the page table entry for. |
|
658 @param aMemoryIndex The memory object index of the page to find the page |
|
659 table entry for. |
|
660 |
|
661 @return A pointer to the page table entry, if the page table entry couldn't |
|
662 be found this will be NULL |
|
663 */ |
|
664 virtual TPte* FindPageTable(TLinAddr aLinAddr, TUint aMemoryIndex)=0; |
|
665 |
|
666 protected: |
|
667 /** |
|
668 @param aType Initial value for #Flags. |
|
669 */ |
|
670 DMemoryMapping(TUint aType); |
|
671 |
|
672 /** |
|
673 This destructor removes the mapping from any address space it was added to and |
|
674 frees any virtual addresses allocated to it. |
|
675 */ |
|
676 ~DMemoryMapping(); |
|
677 |
|
678 /** |
|
679 Allocatate virtual addresses for this mapping to use. |
|
680 This is called from #Construct and the arguments to this function are the same. |
|
681 |
|
682 On success, iAllocatedLinAddrAndOsAsid and iAllocatedSize will be initialised. |
|
683 */ |
|
684 virtual TInt AllocateVirtualMemory(TMappingCreateFlags aFlags, TInt aOsAsid, TLinAddr aAddr, TUint aSize, TLinAddr aColourOffset); |
|
685 |
|
686 /** |
|
687 Free the virtual addresses allocated to this mapping with AllocateVirtualMemory. |
|
688 */ |
|
689 virtual void FreeVirtualMemory(); |
|
690 }; |
|
691 |
|
692 |
|
693 |
|
694 /** |
|
695 A memory mapping to map a 'chunk' aligned region of a DCoarseMemory object into |
|
696 an address space. A 'chunk' is the size of memory mapped by a whole MMU page table |
|
697 and is #KChunkSize bytes. |
|
698 |
|
699 These mappings make use of page tables owned by a DCoarseMemory and when |
|
700 they are attached to a memory object they are linked into |
|
701 DCoarseMemory::DPageTables::iMappings not DCoarseMemory::iMappings. |
|
702 */ |
|
703 class DCoarseMapping : public DMemoryMapping |
|
704 { |
|
705 public: |
|
706 DCoarseMapping(); |
|
707 ~DCoarseMapping(); |
|
708 |
|
709 protected: |
|
710 DCoarseMapping(TUint aFlags); |
|
711 |
|
712 protected: |
|
713 // from DMemoryMappingBase... |
|
714 virtual TInt MapPages(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Not implemented. Faults in debug builds. |
|
715 virtual void UnmapPages(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Not implemented. Faults in debug builds. |
|
716 virtual void RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB); ///< Not implemented. Faults in debug builds. |
|
717 virtual void RestrictPagesNA(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Not implemented. Faults in debug builds. |
|
718 virtual TInt PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount); |
|
719 virtual TBool MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex); |
|
720 virtual TInt DoMap(); |
|
721 virtual void DoUnmap(); |
|
722 |
|
723 // from DMemoryMapping... |
|
724 virtual TPte* FindPageTable(TLinAddr aLinAddr, TUint aMemoryIndex); |
|
725 }; |
|
726 |
|
727 |
|
728 |
|
729 /** |
|
730 A memory mapping to map a page aligned region of a memory object into |
|
731 an address space. The may be used with any memory object: DFineMemory or DCoarseMemory. |
|
732 */ |
|
733 class DFineMapping : public DMemoryMapping |
|
734 { |
|
735 public: |
|
736 DFineMapping(); |
|
737 ~DFineMapping(); |
|
738 |
|
739 private: |
|
740 // from DMemoryMappingBase... |
|
741 virtual TInt MapPages(RPageArray::TIter aPages, TUint aMapInstanceCount); |
|
742 virtual void UnmapPages(RPageArray::TIter aPages, TUint aMapInstanceCount); |
|
743 virtual void RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB); |
|
744 virtual void RestrictPagesNA(RPageArray::TIter aPages, TUint aMapInstanceCount); |
|
745 virtual TInt PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount); |
|
746 virtual TBool MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex); |
|
747 virtual TInt DoMap(); |
|
748 virtual void DoUnmap(); |
|
749 |
|
750 // from DMemoryMapping... |
|
751 |
|
752 /** |
|
753 Allocatate virtual addresses for this mapping to use. |
|
754 |
|
755 In addition to performing the action of DMemoryMapping::AllocateVirtualMemory |
|
756 this will also allocate all permanent page tables for the mapping if it has attribute |
|
757 #EPermanentPageTables. |
|
758 */ |
|
759 virtual TInt AllocateVirtualMemory(TMappingCreateFlags aFlags, TInt aOsAsid, TLinAddr aAddr, TUint aSize, TLinAddr aColourOffset); |
|
760 |
|
761 /** |
|
762 Free the virtual addresses and permanent page tables allocated to this mapping with |
|
763 AllocateVirtualMemory. |
|
764 */ |
|
765 virtual void FreeVirtualMemory(); |
|
766 |
|
767 virtual TPte* FindPageTable(TLinAddr aLinAddr, TUint aMemoryIndex); |
|
768 |
|
769 // new... |
|
770 |
|
771 /** |
|
772 Allocate all the page tables required for this mapping. This is called by |
|
773 AllocateVirtualMemory if the #EPermanentPageTables attribute is set. |
|
774 |
|
775 Each page table for the virtual address region used by the mapping is |
|
776 allocated if not already present. The permanence count of any page table |
|
777 (SPageTableInfo::iPermanenceCount) is then incremented so that it is not |
|
778 freed even when it no longer maps any pages. |
|
779 |
|
780 If successful, the #EPageTablesAllocated flag in #Flags will be set. |
|
781 |
|
782 @return KErrNone if successful, otherwise one of the system wide error codes. |
|
783 */ |
|
784 TInt AllocatePermanentPageTables(); |
|
785 |
|
786 /** |
|
787 Free all permanent page tables allocated to this mapping. |
|
788 |
|
789 This reverses the action of #AllocatePermanentPageTables by decrementing |
|
790 the permanence count for each page table and freeing it if is no longer in use. |
|
791 */ |
|
792 void FreePermanentPageTables(); |
|
793 |
|
794 /** |
|
795 Free a range of permanent page tables. |
|
796 |
|
797 This is an implementation factor for FreePermanentPageTables and |
|
798 AllocatePermanentPageTables. It decrements the permanence count |
|
799 for each page table and frees it if is no longer in use |
|
800 |
|
801 @param aFirstPde The address of the page directory entry which refers to |
|
802 the first page table to be freed. |
|
803 @param aLastPde The address of the page directory entry which refers to |
|
804 the last page table to be freed. |
|
805 */ |
|
806 void FreePermanentPageTables(TPde* aFirstPde, TPde* aLastPde); |
|
807 |
|
808 #ifdef _DEBUG |
|
809 /** |
|
810 Validate the contents of the page table are valid. |
|
811 |
|
812 @param aPt The page table to validate. |
|
813 */ |
|
814 void ValidatePageTable(TPte* aPt, TLinAddr aAddr); |
|
815 #endif |
|
816 |
|
817 /** |
|
818 Get the page table being used to map a specified virtual address if it exists. |
|
819 |
|
820 @param aAddr A virtual address in the region allocated to this mapping. |
|
821 |
|
822 @return The virtual address of the page table mapping \a aAddr, |
|
823 or the null pointer if one wasn't found. |
|
824 */ |
|
825 TPte* GetPageTable(TLinAddr aAddr); |
|
826 |
|
827 /** |
|
828 Get the page table being used to map a specified virtual address; allocating |
|
829 a new one if it didn't previously exist. |
|
830 |
|
831 @param aAddr A virtual address in the region allocated to this mapping. |
|
832 |
|
833 @return The virtual address of the page table mapping \a aAddr, |
|
834 or the null pointer if one wasn't found and couldn't be allocated. |
|
835 */ |
|
836 TPte* GetOrAllocatePageTable(TLinAddr aAddr); |
|
837 |
|
838 /** |
|
839 Get and pin the page table being used to map a specified virtual address; |
|
840 allocating a new one if it didn't previously exist. |
|
841 |
|
842 @param aAddr A virtual address in the region allocated to this mapping. |
|
843 @param aPinArgs The resources required to pin the page table. |
|
844 On success, the page table will have been appended to |
|
845 \a aPinArgs.iPinnedPageTables. |
|
846 |
|
847 @return The virtual address of the page table mapping \a aAddr, |
|
848 or the null pointer if one wasn't found and couldn't be allocated. |
|
849 */ |
|
850 TPte* GetOrAllocatePageTable(TLinAddr aAddr, TPinArgs& aPinArgs); |
|
851 |
|
852 /** |
|
853 Allocate a single page table. |
|
854 |
|
855 @param aAddr The virtual address the page table will be used to map. |
|
856 @param aPdeAddress Address of the page directory entry which is to map |
|
857 the newly allocated page table. |
|
858 @param aPermanent True, if the page table's permanence count is to be incremented. |
|
859 |
|
860 @return The virtual address of the page table if it was successfully allocated, |
|
861 otherwise the null pointer. |
|
862 */ |
|
863 TPte* AllocatePageTable(TLinAddr aAddr, TPde* aPdeAddress, TBool aPermanent=false); |
|
864 |
|
865 /** |
|
866 Free a single page table if it is unused. |
|
867 |
|
868 @param aPdeAddress Address of the page directory entry (PDE) which maps the page table. |
|
869 If the page table is freed, this PDE will be set to an 'unallocated' value. |
|
870 */ |
|
871 void FreePageTable(TPde* aPdeAddress); |
|
872 }; |
|
873 |
|
874 |
|
875 |
|
876 /** |
|
877 A mapping which provides access to the physical address used by a memory object |
|
878 without mapping these at any virtual address accessible to software. |
|
879 |
|
880 These mappings are always of the 'pinned' type to prevent the obtained physical addresses |
|
881 from becoming invalid. |
|
882 */ |
|
883 class DPhysicalPinMapping : public DMemoryMappingBase |
|
884 { |
|
885 public: |
|
886 DPhysicalPinMapping(); |
|
887 |
|
888 /** |
|
889 Attach this mapping to a memory object so that it pins a specified region of its memory. |
|
890 |
|
891 Most of the action of this method is performed by #Attach. |
|
892 |
|
893 @param aMemory The memory object. |
|
894 @param aIndex The page index of the first page of memory to be pinned by the mapping. |
|
895 @param aCount The number of pages of memory to be pinned by the mapping. |
|
896 @param aPermissions The memory access permissions appropriate to the intended use |
|
897 of the physical addresses. E.g. if the memory contents will be |
|
898 changes, use EReadWrite. These permissions are used for error |
|
899 checking, e.g. detecting attempted writes to read-only memory. |
|
900 They are also used for optimising access to demand paged memory; |
|
901 which is more efficient if only read-only access is required. |
|
902 |
|
903 @return KErrNone if successful, |
|
904 KErrNotFound if any part of the memory to be pinned was not present, |
|
905 KErrNoMemory if there was insufficient memory, |
|
906 otherwise one of the system wide error codes. |
|
907 */ |
|
908 TInt Pin(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions); |
|
909 |
|
910 /** |
|
911 Remove this mapping from the memory object it was previously added to by #Pin. |
|
912 |
|
913 Most of the action of this method is performed by #Detach. |
|
914 */ |
|
915 virtual void Unpin(); |
|
916 |
|
917 /** |
|
918 Get the physical address(es) for a region of pages in this mapping. |
|
919 |
|
920 @param aIndex Page index, within the mapping, for start of the region. |
|
921 @param aCount Number of pages in the region. |
|
922 @param aPhysicalAddress On success, this value is set to one of two values. |
|
923 If the specified region is physically contiguous, |
|
924 the value is the physical address of the first page |
|
925 in the region. If the region is discontiguous, the |
|
926 value is set to KPhysAddrInvalid. |
|
927 @param aPhysicalPageList If not zero, this points to an array of TPhysAddr |
|
928 objects. On success, this array will be filled |
|
929 with the addresses of the physical pages which |
|
930 contain the specified region. If aPageList is |
|
931 zero, then the function will fail with |
|
932 KErrNotFound if the specified region is not |
|
933 physically contiguous. |
|
934 |
|
935 @return 0 if successful and the whole region is physically contiguous. |
|
936 1 if successful but the region isn't physically contiguous. |
|
937 KErrNotFound, if any page in the region is not present, |
|
938 otherwise one of the system wide error codes. |
|
939 |
|
940 @pre This mapping must have been attached to a memory object with #Pin. |
|
941 */ |
|
942 TInt PhysAddr(TUint aIndex, TUint aCount, TPhysAddr& aPhysicalAddress, TPhysAddr* aPhysicalPageList); |
|
943 private: |
|
944 // from DMemoryMappingBase... |
|
945 virtual TInt MapPages(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Not implemented. Faults in debug builds. |
|
946 virtual void UnmapPages(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Does nothing |
|
947 virtual void RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB); ///< Not implemented. Faults in debug builds. |
|
948 virtual void RestrictPagesNA(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Does nothing |
|
949 virtual TInt PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount); ///< Does nothing |
|
950 virtual TBool MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex);///< Not implemented. Faults in debug builds. |
|
951 virtual TInt DoMap(); ///< Does nothing |
|
952 virtual void DoUnmap(); ///< Does nothing |
|
953 }; |
|
954 |
|
955 |
|
956 |
|
957 /** |
|
958 A mapping which pins memory in order to prevent demand paging related |
|
959 page faults from occurring. |
|
960 */ |
|
961 class DVirtualPinMapping : public DPhysicalPinMapping |
|
962 { |
|
963 public: |
|
964 DVirtualPinMapping(); |
|
965 ~DVirtualPinMapping(); |
|
966 |
|
967 /** |
|
968 Create a new DVirtualPinMapping object suitable for pinning a specified number of pages. |
|
969 |
|
970 If no maximum is specified (\a aMaxCount==0) then this object may be used to pin |
|
971 any number of pages, however this will require dynamic allocation of storage for |
|
972 page table references. |
|
973 |
|
974 @param aMaxCount The maximum number of pages which can be pinned, or zero for no maximum. |
|
975 |
|
976 @return The newly created DVirtualPinMapping or the null pointer if there was |
|
977 insufficient memory. |
|
978 */ |
|
979 static DVirtualPinMapping* New(TUint aMaxCount); |
|
980 |
|
981 /** |
|
982 Attach this mapping to a memory object so that it pins a specified region of its memory. |
|
983 |
|
984 Additionally, pin the page tables in a specified mapping (\a aMapping) which |
|
985 are being used to map these pages. |
|
986 |
|
987 The result of this function is that access to the pinned memory through the virtual |
|
988 addresses used by \a aMapping will not generate any demand paging related page faults. |
|
989 |
|
990 @param aMemory The memory object. |
|
991 @param aIndex The page index of the first page of memory to be pinned by the mapping. |
|
992 @param aCount The number of pages of memory to be pinned by the mapping. |
|
993 @param aPermissions The memory access permissions appropriate to the intended use |
|
994 of the physical addresses. E.g. if the memory contents will be |
|
995 changes, use EReadWrite. These permissions are used for error |
|
996 checking, e.g. detecting attempted writes to read-only memory. |
|
997 They are also used for optimising access to demand paged memory; |
|
998 which is more efficient if only read-only access is required. |
|
999 @param aMapping The mapping whose page tables are to be pinned. This must be |
|
1000 currently mapping the specified region of memory pages. |
|
1001 @param aMapInstanceCount The instance count of the mapping who's page tables are to be pinned. |
|
1002 |
|
1003 @return KErrNone if successful, |
|
1004 KErrNotFound if any part of the memory to be pinned was not present, |
|
1005 KErrNoMemory if there was insufficient memory, |
|
1006 otherwise one of the system wide error codes. |
|
1007 */ |
|
1008 TInt Pin( DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions, |
|
1009 DMemoryMappingBase* aMapping, TUint aMapInstanceCount); |
|
1010 |
|
1011 /** |
|
1012 Remove this mapping from the memory object it was previously added to by #Pin. |
|
1013 This will unpin any memory pages and pages tables that were pinned. |
|
1014 */ |
|
1015 void Unpin(); |
|
1016 |
|
1017 /** |
|
1018 Return the maximum number of page tables which could be required to map |
|
1019 \a aPageCount pages. This is used by various resource reserving calculations. |
|
1020 */ |
|
1021 static TUint MaxPageTables(TUint aPageCount); |
|
1022 |
|
1023 /** |
|
1024 In debug builds, dump information about this mapping to the kernel trace port. |
|
1025 */ |
|
1026 virtual void Dump(); |
|
1027 |
|
1028 private: |
|
1029 // from DMemoryMappingBase... |
|
1030 virtual void RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB); ///< Does nothing. |
|
1031 virtual TInt PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount); |
|
1032 virtual TBool MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex);///< Not implemented. Faults in debug builds. |
|
1033 virtual TInt DoPin(TPinArgs& aPinArgs); |
|
1034 virtual void DoUnpin(TPinArgs& aPinArgs); |
|
1035 |
|
1036 private: |
|
1037 /** |
|
1038 Allocate memory to store pointers to all the page table which map |
|
1039 \a aCount pages of memory. The pointer to the allocated memory |
|
1040 is stored at iAllocatedPinnedPageTables. |
|
1041 |
|
1042 If iSmallPinnedPageTablesArray is large enough, this function doesn't |
|
1043 allocate any memory. |
|
1044 |
|
1045 @return KErrNone if successful, otherwise KErrNoMemory. |
|
1046 */ |
|
1047 TInt AllocPageTableArray(TUint aCount); |
|
1048 |
|
1049 /** |
|
1050 Delete iAllocatedPinnedPageTables. |
|
1051 */ |
|
1052 void FreePageTableArray(); |
|
1053 |
|
1054 /** |
|
1055 Return the address of the array storing pinned page tables. |
|
1056 This is either iSmallPinnedPageTablesArray or iAllocatedPinnedPageTables. |
|
1057 */ |
|
1058 TPte** PageTableArray(); |
|
1059 |
|
1060 /** |
|
1061 Unpin all the page tables which have been pinned by this mapping. |
|
1062 |
|
1063 @param aPinArgs The resources used for pinning. The replacement pages allocated |
|
1064 to this will be increased for each page which was became completely |
|
1065 unpinned. |
|
1066 */ |
|
1067 void UnpinPageTables(TPinArgs& aPinArgs); |
|
1068 private: |
|
1069 /** |
|
1070 Temporary store for the mapping passed to #Pin |
|
1071 */ |
|
1072 DMemoryMappingBase* iPinVirtualMapping; |
|
1073 |
|
1074 /** |
|
1075 Temporary store for the mapping instance count passed to #Pin |
|
1076 */ |
|
1077 TUint iPinVirtualMapInstanceCount; |
|
1078 |
|
1079 /** |
|
1080 The number of page tables which are currently being pinned by this mapping. |
|
1081 This is the number of valid entries stored at PageTableArray. |
|
1082 */ |
|
1083 TUint iNumPinnedPageTables; |
|
1084 |
|
1085 /** |
|
1086 The maximum number of pages which can be pinned by this mapping. |
|
1087 If this is zero, there is no maximum. |
|
1088 */ |
|
1089 TUint iMaxCount; |
|
1090 |
|
1091 /** |
|
1092 The memory allocated by this object for storing pointer to the page tables |
|
1093 it has pinned. |
|
1094 */ |
|
1095 TPte** iAllocatedPinnedPageTables; |
|
1096 |
|
1097 enum |
|
1098 { |
|
1099 KSmallPinnedPageTableCount = 2 ///< Number of entries in iSmallPinnedPageTablesArray |
|
1100 }; |
|
1101 |
|
1102 /** |
|
1103 A small array to use for storing pinned page tables. |
|
1104 This is an optimisation used for the typical case of pinning a small number of pages |
|
1105 to avoid dynamic allocation of memory. |
|
1106 */ |
|
1107 TPte* iSmallPinnedPageTablesArray[KSmallPinnedPageTableCount]; |
|
1108 }; |
|
1109 |
|
1110 #endif |