|
1 // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of the License "Eclipse Public License v1.0" |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: |
|
14 // |
|
15 |
|
16 #include <plat_priv.h> |
|
17 #include "mm.h" |
|
18 #include "mmu.h" |
|
19 |
|
20 #include "mobject.h" |
|
21 #include "mmapping.h" |
|
22 #include "mptalloc.h" |
|
23 #include "mmanager.h" |
|
24 #include "cache_maintenance.inl" |
|
25 |
|
26 const TUint KMaxMappingsInOneGo = KMaxPageInfoUpdatesInOneGo; // must be power-of-2 |
|
27 |
|
28 |
|
29 |
|
30 // |
|
31 // MemoryObjectLock |
|
32 // |
|
33 |
|
34 /** |
|
35 The mutex pool used to assign locks to memory objects. |
|
36 @see #MemoryObjectLock. |
|
37 */ |
|
38 DMutexPool MemoryObjectMutexPool; |
|
39 |
|
40 void MemoryObjectLock::Lock(DMemoryObject* aMemory) |
|
41 { |
|
42 TRACE2(("MemoryObjectLock::Lock(0x%08x) try",aMemory)); |
|
43 MemoryObjectMutexPool.Wait(aMemory->iLock); |
|
44 TRACE2(("MemoryObjectLock::Lock(0x%08x) acquired",aMemory)); |
|
45 } |
|
46 |
|
47 void MemoryObjectLock::Unlock(DMemoryObject* aMemory) |
|
48 { |
|
49 TRACE2(("MemoryObjectLock::Unlock(0x%08x)",aMemory)); |
|
50 MemoryObjectMutexPool.Signal(aMemory->iLock); |
|
51 } |
|
52 |
|
53 TBool MemoryObjectLock::IsHeld(DMemoryObject* aMemory) |
|
54 { |
|
55 return MemoryObjectMutexPool.IsHeld(aMemory->iLock); |
|
56 } |
|
57 |
|
58 |
|
59 |
|
60 // |
|
61 // DMemoryObject |
|
62 // |
|
63 |
|
64 DMemoryObject::DMemoryObject(DMemoryManager* aManager, TUint aFlags, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags) |
|
65 : iManager(aManager), iFlags(aFlags), iAttributes(Mmu::CanonicalMemoryAttributes(aAttributes)), |
|
66 iSizeInPages(aSizeInPages) |
|
67 { |
|
68 __ASSERT_COMPILE(EMemoryAttributeMask<0x100); // make sure aAttributes fits into a TUint8 |
|
69 |
|
70 TMemoryType type = (TMemoryType)(aAttributes&EMemoryAttributeTypeMask); |
|
71 iRamAllocFlags = type; |
|
72 if(aCreateFlags&EMemoryCreateNoWipe) |
|
73 iRamAllocFlags |= Mmu::EAllocNoWipe; |
|
74 else if(aCreateFlags&EMemoryCreateUseCustomWipeByte) |
|
75 { |
|
76 TUint8 wipeByte = (aCreateFlags>>EMemoryCreateWipeByteShift)&0xff; |
|
77 iRamAllocFlags |= wipeByte<<Mmu::EAllocWipeByteShift; |
|
78 iRamAllocFlags |= Mmu::EAllocUseCustomWipeByte; |
|
79 } |
|
80 |
|
81 if(aCreateFlags&EMemoryCreateDemandPaged) |
|
82 iFlags |= EDemandPaged; |
|
83 if(aCreateFlags&EMemoryCreateReserveAllResources) |
|
84 iFlags |= EReserveResources; |
|
85 if(aCreateFlags&EMemoryCreateDisallowPinning) |
|
86 iFlags |= EDenyPinning; |
|
87 if(aCreateFlags&EMemoryCreateReadOnly) |
|
88 iFlags |= EDenyWriteMappings; |
|
89 if(!(aCreateFlags&EMemoryCreateAllowExecution)) |
|
90 iFlags |= EDenyExecuteMappings; |
|
91 } |
|
92 |
|
93 |
|
94 TInt DMemoryObject::Construct() |
|
95 { |
|
96 TBool preAllocateMemory = iFlags&(EReserveResources|EDemandPaged); |
|
97 TInt r = iPages.Construct(iSizeInPages,preAllocateMemory); |
|
98 return r; |
|
99 } |
|
100 |
|
101 |
|
102 DMemoryObject::~DMemoryObject() |
|
103 { |
|
104 TRACE(("DMemoryObject[0x%08x]::~DMemoryObject()",this)); |
|
105 __NK_ASSERT_DEBUG(iMappings.IsEmpty()); |
|
106 } |
|
107 |
|
108 |
|
109 TBool DMemoryObject::CheckRegion(TUint aIndex, TUint aCount) |
|
110 { |
|
111 TUint end = aIndex+aCount; |
|
112 return end>=aIndex && end<=iSizeInPages; |
|
113 } |
|
114 |
|
115 |
|
116 void DMemoryObject::ClipRegion(TUint& aIndex, TUint& aCount) |
|
117 { |
|
118 TUint end = aIndex+aCount; |
|
119 if(end<aIndex) // overflow? |
|
120 end = ~0u; |
|
121 if(end>iSizeInPages) |
|
122 end = iSizeInPages; |
|
123 if(aIndex>=end) |
|
124 aIndex = end; |
|
125 aCount = end-aIndex; |
|
126 } |
|
127 |
|
128 |
|
129 void DMemoryObject::SetLock(DMutex* aLock) |
|
130 { |
|
131 __NK_ASSERT_DEBUG(!iLock); |
|
132 iLock = aLock; |
|
133 TRACE(("MemoryObject[0x%08x]::SetLock(0x%08x) \"%O\"",this,aLock,aLock)); |
|
134 } |
|
135 |
|
136 |
|
137 DMemoryMapping* DMemoryObject::CreateMapping(TUint, TUint) |
|
138 { |
|
139 return new DFineMapping(); |
|
140 } |
|
141 |
|
142 |
|
143 TInt DMemoryObject::MapPages(RPageArray::TIter aPages) |
|
144 { |
|
145 TRACE2(("DMemoryObject[0x%08x]::MapPages(?) index=0x%x count=0x%x",this,aPages.Index(),aPages.Count())); |
|
146 |
|
147 TUint offset = aPages.Index(); |
|
148 TUint offsetEnd = aPages.IndexEnd(); |
|
149 TInt r = KErrNone; |
|
150 |
|
151 iMappings.Lock(); |
|
152 TMappingListIter iter; |
|
153 DMemoryMappingBase* mapping = iter.Start(iMappings); |
|
154 while(mapping) |
|
155 { |
|
156 if(mapping->IsPinned()) |
|
157 { |
|
158 // pinned mappings don't change, so nothing to do... |
|
159 iMappings.Unlock(); |
|
160 } |
|
161 else |
|
162 { |
|
163 // get region where pages overlap the mapping... |
|
164 TUint start = mapping->iStartIndex; |
|
165 TUint end = start+mapping->iSizeInPages; |
|
166 if(start<offset) |
|
167 start = offset; |
|
168 if(end>offsetEnd) |
|
169 end = offsetEnd; |
|
170 if(start>=end) |
|
171 { |
|
172 // the mapping doesn't contain the pages... |
|
173 iMappings.Unlock(); |
|
174 } |
|
175 else |
|
176 { |
|
177 // map pages in the mapping... |
|
178 mapping->Open(); |
|
179 TUint mapInstanceCount = mapping->MapInstanceCount(); |
|
180 iMappings.Unlock(); |
|
181 r = mapping->MapPages(aPages.Slice(start,end),mapInstanceCount); |
|
182 mapping->AsyncClose(); |
|
183 if(r!=KErrNone) |
|
184 { |
|
185 iMappings.Lock(); |
|
186 break; |
|
187 } |
|
188 } |
|
189 } |
|
190 iMappings.Lock(); |
|
191 mapping = iter.Next(); |
|
192 } |
|
193 iter.Finish(); |
|
194 iMappings.Unlock(); |
|
195 |
|
196 return r; |
|
197 } |
|
198 |
|
199 |
|
200 void DMemoryObject::RemapPage(TPhysAddr& aPageArray, TUint aIndex, TBool aInvalidateTLB) |
|
201 { |
|
202 TRACE2(("DMemoryObject[0x%08x]::RemapPage(0x%x,%d,%d)",this,aPageArray,aIndex,aInvalidateTLB)); |
|
203 |
|
204 iMappings.RemapPage(aPageArray, aIndex, aInvalidateTLB); |
|
205 |
|
206 #ifdef COARSE_GRAINED_TLB_MAINTENANCE |
|
207 if (aInvalidateTLB) |
|
208 InvalidateTLB(); |
|
209 #endif |
|
210 } |
|
211 |
|
212 |
|
213 void DMemoryObject::UnmapPages(RPageArray::TIter aPages, TBool aDecommitting) |
|
214 { |
|
215 TRACE2(("DMemoryObject[0x%08x]::UnmapPages(?,%d) index=0x%x count=0x%x",this,(bool)aDecommitting,aPages.Index(),aPages.Count())); |
|
216 |
|
217 TUint offset = aPages.Index(); |
|
218 TUint offsetEnd = aPages.IndexEnd(); |
|
219 if(offset==offsetEnd) |
|
220 return; |
|
221 |
|
222 iMappings.Lock(); |
|
223 TMappingListIter iter; |
|
224 DMemoryMappingBase* mapping = iter.Start(iMappings); |
|
225 while(mapping) |
|
226 { |
|
227 // get region where pages overlap the mapping... |
|
228 TUint start = mapping->iStartIndex; |
|
229 TUint end = start+mapping->iSizeInPages; |
|
230 if(start<offset) |
|
231 start = offset; |
|
232 if(end>offsetEnd) |
|
233 end = offsetEnd; |
|
234 if(start>=end) |
|
235 { |
|
236 // the mapping doesn't contain the pages... |
|
237 iMappings.Unlock(); |
|
238 } |
|
239 else |
|
240 { |
|
241 RPageArray::TIter pages = aPages.Slice(start,end); |
|
242 if(mapping->IsPinned()) |
|
243 { |
|
244 // pinned mappings veto page unmapping... |
|
245 if(aDecommitting) |
|
246 __e32_atomic_ior_ord8(&mapping->Flags(), (TUint8)DMemoryMapping::EPageUnmapVetoed); |
|
247 iMappings.Unlock(); |
|
248 TRACE2(("DFineMemoryMapping[0x%08x] veto UnmapPages, index=0x%x count=0x%x",mapping,pages.Index(),pages.Count())); |
|
249 pages.VetoUnmap(); |
|
250 } |
|
251 else |
|
252 { |
|
253 // unmap pages in the mapping... |
|
254 mapping->Open(); |
|
255 TUint mapInstanceCount = mapping->MapInstanceCount(); |
|
256 iMappings.Unlock(); |
|
257 mapping->UnmapPages(pages,mapInstanceCount); |
|
258 mapping->AsyncClose(); |
|
259 } |
|
260 } |
|
261 iMappings.Lock(); |
|
262 mapping = iter.Next(); |
|
263 } |
|
264 iter.Finish(); |
|
265 iMappings.Unlock(); |
|
266 |
|
267 #ifdef COARSE_GRAINED_TLB_MAINTENANCE |
|
268 InvalidateTLB(); |
|
269 #endif |
|
270 } |
|
271 |
|
272 |
|
273 void DMemoryObject::RestrictPages(RPageArray::TIter aPages, TRestrictPagesType aRestriction) |
|
274 { |
|
275 TRACE2(("DMemoryObject[0x%08x]::RestrictPages(?,%d) index=0x%x count=0x%x",this,aRestriction,aPages.Index(),aPages.Count())); |
|
276 |
|
277 TUint offset = aPages.Index(); |
|
278 TUint offsetEnd = aPages.IndexEnd(); |
|
279 if(offset==offsetEnd) |
|
280 return; |
|
281 |
|
282 iMappings.Lock(); |
|
283 TMappingListIter iter; |
|
284 DMemoryMappingBase* mapping = iter.Start(iMappings); |
|
285 while(mapping) |
|
286 { |
|
287 // get region where pages overlap the mapping... |
|
288 TUint start = mapping->iStartIndex; |
|
289 TUint end = start+mapping->iSizeInPages; |
|
290 if(start<offset) |
|
291 start = offset; |
|
292 if(end>offsetEnd) |
|
293 end = offsetEnd; |
|
294 if(start>=end) |
|
295 { |
|
296 // the mapping doesn't contain the pages... |
|
297 iMappings.Unlock(); |
|
298 } |
|
299 else |
|
300 { |
|
301 RPageArray::TIter pages = aPages.Slice(start,end); |
|
302 if(mapping->IsPhysicalPinning() || |
|
303 (!(aRestriction & ERestrictPagesForMovingFlag) && mapping->IsPinned())) |
|
304 { |
|
305 // Pinned mappings veto page restrictions except for page moving |
|
306 // where only physically pinned mappings block page moving. |
|
307 iMappings.Unlock(); |
|
308 TRACE2(("DFineMemoryMapping[0x%08x] veto RestrictPages, index=0x%x count=0x%x",mapping,pages.Index(),pages.Count())); |
|
309 pages.VetoRestrict(aRestriction & ERestrictPagesForMovingFlag); |
|
310 // Mappings lock required for iter.Finish() as iter will be removed from the mappings list. |
|
311 iMappings.Lock(); |
|
312 break; |
|
313 } |
|
314 else |
|
315 { |
|
316 // pages not pinned so do they need restricting... |
|
317 if(aRestriction == ERestrictPagesForMovingFlag) |
|
318 { |
|
319 // nothing to do when just checking for pinned mappings for |
|
320 // page moving purposes and not restricting to NA. |
|
321 iMappings.Unlock(); |
|
322 } |
|
323 else |
|
324 { |
|
325 // restrict pages in the mapping... |
|
326 mapping->Open(); |
|
327 TUint mapInstanceCount = mapping->MapInstanceCount(); |
|
328 iMappings.Unlock(); |
|
329 mapping->RestrictPagesNA(pages, mapInstanceCount); |
|
330 mapping->AsyncClose(); |
|
331 } |
|
332 } |
|
333 } |
|
334 iMappings.Lock(); |
|
335 mapping = iter.Next(); |
|
336 } |
|
337 |
|
338 if(aRestriction & ERestrictPagesForMovingFlag) |
|
339 {// Clear the mappings addded flag so page moving can detect whether any |
|
340 // new mappings have been added |
|
341 ClearMappingAddedFlag(); |
|
342 } |
|
343 |
|
344 iter.Finish(); |
|
345 iMappings.Unlock(); |
|
346 |
|
347 #ifdef COARSE_GRAINED_TLB_MAINTENANCE |
|
348 // Writable memory objects will have been restricted no access so invalidate TLB. |
|
349 if (aRestriction != ERestrictPagesForMovingFlag) |
|
350 InvalidateTLB(); |
|
351 #endif |
|
352 } |
|
353 |
|
354 |
|
355 TInt DMemoryObject::CheckNewMapping(DMemoryMappingBase* aMapping) |
|
356 { |
|
357 if(iFlags&EDenyPinning && aMapping->IsPinned()) |
|
358 return KErrAccessDenied; |
|
359 if(iFlags&EDenyMappings) |
|
360 return KErrAccessDenied; |
|
361 if(iFlags&EDenyWriteMappings && !aMapping->IsReadOnly()) |
|
362 return KErrAccessDenied; |
|
363 #ifdef MMU_SUPPORTS_EXECUTE_NEVER |
|
364 if((iFlags&EDenyExecuteMappings) && aMapping->IsExecutable()) |
|
365 return KErrAccessDenied; |
|
366 #endif |
|
367 return KErrNone; |
|
368 } |
|
369 |
|
370 |
|
371 TInt DMemoryObject::AddMapping(DMemoryMappingBase* aMapping) |
|
372 { |
|
373 __NK_ASSERT_DEBUG(!aMapping->IsCoarse()); |
|
374 |
|
375 // check mapping allowed... |
|
376 MmuLock::Lock(); |
|
377 iMappings.Lock(); |
|
378 |
|
379 TInt r = CheckNewMapping(aMapping); |
|
380 if(r == KErrNone) |
|
381 { |
|
382 Open(); |
|
383 aMapping->LinkToMemory(this, iMappings); |
|
384 } |
|
385 |
|
386 iMappings.Unlock(); |
|
387 MmuLock::Unlock(); |
|
388 |
|
389 TRACE(("DMemoryObject[0x%08x]::AddMapping(0x%08x) returns %d", this, aMapping, r)); |
|
390 |
|
391 return r; |
|
392 } |
|
393 |
|
394 |
|
395 void DMemoryObject::RemoveMapping(DMemoryMappingBase* aMapping) |
|
396 { |
|
397 aMapping->UnlinkFromMemory(iMappings); |
|
398 Close(); |
|
399 } |
|
400 |
|
401 |
|
402 TInt DMemoryObject::SetReadOnly() |
|
403 { |
|
404 TRACE(("DMemoryObject[0x%08x]::SetReadOnly()",this)); |
|
405 __NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(this)); |
|
406 |
|
407 TInt r = KErrNone; |
|
408 iMappings.Lock(); |
|
409 if (iFlags & EDenyWriteMappings) |
|
410 {// The object is already read only. |
|
411 iMappings.Unlock(); |
|
412 return KErrNone; |
|
413 } |
|
414 |
|
415 TMappingListIter iter; |
|
416 DMemoryMappingBase* mapping = iter.Start(iMappings); |
|
417 while(mapping) |
|
418 { |
|
419 if (!mapping->IsReadOnly()) |
|
420 { |
|
421 r = KErrInUse; |
|
422 goto exit; |
|
423 } |
|
424 // This will flash iMappings.Lock to stop it being held too long. |
|
425 // This is safe as new mappings will be added to the end of the list so we |
|
426 // won't miss them. |
|
427 mapping = iter.Next(); |
|
428 } |
|
429 // Block any writable mapping from being added to this memory object. |
|
430 // Use atomic operation as iMappings.Lock protects EDenyWriteMappings |
|
431 // but not the whole word. |
|
432 __e32_atomic_ior_ord8(&iFlags, (TUint8)EDenyWriteMappings); |
|
433 |
|
434 exit: |
|
435 iter.Finish(); |
|
436 iMappings.Unlock(); |
|
437 return r; |
|
438 } |
|
439 |
|
440 |
|
441 void DMemoryObject::DenyMappings() |
|
442 { |
|
443 TRACE(("DMemoryObject[0x%08x]::LockMappings()",this)); |
|
444 MmuLock::Lock(); |
|
445 // Use atomic operation as MmuLock protects EDenyMappings |
|
446 // but not the whole word. |
|
447 __e32_atomic_ior_ord8(&iFlags, (TUint8)EDenyMappings); |
|
448 MmuLock::Unlock(); |
|
449 } |
|
450 |
|
451 |
|
452 TInt DMemoryObject::PhysAddr(TUint aIndex, TUint aCount, TPhysAddr& aPhysicalAddress, TPhysAddr* aPhysicalPageList) |
|
453 { |
|
454 TRACE2(("DMemoryObject[0x%08x]::PhysAddr(0x%x,0x%x,?,?)",this,aIndex,aCount)); |
|
455 TInt r = iPages.PhysAddr(aIndex,aCount,aPhysicalAddress,aPhysicalPageList); |
|
456 TRACE2(("DMemoryObject[0x%08x]::PhysAddr(0x%x,0x%x,?,?) returns %d aPhysicalAddress=0x%08x",this,aIndex,aCount,r,aPhysicalAddress)); |
|
457 return r; |
|
458 } |
|
459 |
|
460 |
|
461 void DMemoryObject::BTraceCreate() |
|
462 { |
|
463 BTraceContext8(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectCreate,this,iSizeInPages); |
|
464 } |
|
465 |
|
466 |
|
467 TUint DMemoryObject::PagingManagerData(TUint aIndex) |
|
468 { |
|
469 TRACE2(("DMemoryObject[0x%08x]::PagingManagerData(0x%x)",this,aIndex)); |
|
470 __NK_ASSERT_DEBUG(IsDemandPaged()); |
|
471 TUint value = iPages.PagingManagerData(aIndex); |
|
472 TRACE2(("DMemoryObject[0x%08x]::PagingManagerData(0x%x) returns 0x%x",this,aIndex,value)); |
|
473 return value; |
|
474 } |
|
475 |
|
476 |
|
477 void DMemoryObject::SetPagingManagerData(TUint aIndex, TUint aValue) |
|
478 { |
|
479 TRACE(("DMemoryObject[0x%08x]::SetPagingManagerData(0x%x,0x%08x)",this,aIndex,aValue)); |
|
480 __NK_ASSERT_DEBUG(IsDemandPaged()); |
|
481 iPages.SetPagingManagerData(aIndex, aValue); |
|
482 __NK_ASSERT_DEBUG(iPages.PagingManagerData(aIndex)==aValue); |
|
483 } |
|
484 |
|
485 |
|
486 |
|
487 // |
|
488 // DCoarseMemory::DPageTables |
|
489 // |
|
490 |
|
491 DCoarseMemory::DPageTables::DPageTables(DCoarseMemory* aMemory, TInt aNumPts, TUint aPteType) |
|
492 : iMemory(aMemory), iPteType(aPteType), iPermanenceCount(0), iNumPageTables(aNumPts) |
|
493 { |
|
494 aMemory->Open(); |
|
495 iBlankPte = Mmu::BlankPte(aMemory->Attributes(),aPteType); |
|
496 } |
|
497 |
|
498 |
|
499 DCoarseMemory::DPageTables* DCoarseMemory::DPageTables::New(DCoarseMemory* aMemory, TUint aNumPages, TUint aPteType) |
|
500 { |
|
501 TRACE2(("DCoarseMemory::DPageTables::New(0x%08x,0x%x,0x%08x)",aMemory, aNumPages, aPteType)); |
|
502 __NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory)); |
|
503 __NK_ASSERT_DEBUG((aNumPages&(KChunkMask>>KPageShift))==0); |
|
504 TUint numPts = aNumPages>>(KChunkShift-KPageShift); |
|
505 DPageTables* self = (DPageTables*)Kern::AllocZ(sizeof(DPageTables)+(numPts-1)*sizeof(TPte*)); |
|
506 if(self) |
|
507 { |
|
508 new (self) DPageTables(aMemory,numPts,aPteType); |
|
509 TInt r = self->Construct(); |
|
510 if(r!=KErrNone) |
|
511 { |
|
512 self->Close(); |
|
513 self = 0; |
|
514 } |
|
515 } |
|
516 TRACE2(("DCoarseMemory::DPageTables::New(0x%08x,0x%x,0x%08x) returns 0x%08x",aMemory, aNumPages, aPteType, self)); |
|
517 return self; |
|
518 } |
|
519 |
|
520 |
|
521 TInt DCoarseMemory::DPageTables::Construct() |
|
522 { |
|
523 if(iMemory->IsDemandPaged()) |
|
524 { |
|
525 // do nothing, allow pages to be mapped on demand... |
|
526 return KErrNone; |
|
527 } |
|
528 |
|
529 RPageArray::TIter pageIter; |
|
530 iMemory->iPages.FindStart(0,iMemory->iSizeInPages,pageIter); |
|
531 |
|
532 // map pages... |
|
533 TInt r = KErrNone; |
|
534 for(;;) |
|
535 { |
|
536 // find some pages... |
|
537 RPageArray::TIter pageList; |
|
538 TUint n = pageIter.Find(pageList); |
|
539 if(!n) |
|
540 break; // done |
|
541 |
|
542 // map some pages... |
|
543 r = MapPages(pageList); |
|
544 |
|
545 // done with pages... |
|
546 pageIter.FindRelease(n); |
|
547 |
|
548 if(r!=KErrNone) |
|
549 break; |
|
550 } |
|
551 |
|
552 iMemory->iPages.FindEnd(0,iMemory->iSizeInPages); |
|
553 |
|
554 return r; |
|
555 } |
|
556 |
|
557 |
|
558 void DCoarseMemory::DPageTables::Close() |
|
559 { |
|
560 __NK_ASSERT_DEBUG(CheckCloseIsSafe()); |
|
561 MmuLock::Lock(); |
|
562 if (__e32_atomic_tas_ord32(&iReferenceCount, 1, -1, 0) != 1) |
|
563 { |
|
564 MmuLock::Unlock(); |
|
565 return; |
|
566 } |
|
567 DCoarseMemory* memory = iMemory; |
|
568 if(memory) |
|
569 { |
|
570 iMemory->iPageTables[iPteType] = 0; |
|
571 iMemory = 0; |
|
572 } |
|
573 MmuLock::Unlock(); |
|
574 if(memory) |
|
575 memory->Close(); |
|
576 delete this; |
|
577 } |
|
578 |
|
579 |
|
580 void DCoarseMemory::DPageTables::AsyncClose() |
|
581 { |
|
582 __NK_ASSERT_DEBUG(CheckAsyncCloseIsSafe()); |
|
583 MmuLock::Lock(); |
|
584 if (__e32_atomic_tas_ord32(&iReferenceCount, 1, -1, 0) != 1) |
|
585 { |
|
586 MmuLock::Unlock(); |
|
587 return; |
|
588 } |
|
589 DCoarseMemory* memory = iMemory; |
|
590 if(memory) |
|
591 { |
|
592 iMemory->iPageTables[iPteType] = 0; |
|
593 iMemory = 0; |
|
594 } |
|
595 MmuLock::Unlock(); |
|
596 if(memory) |
|
597 memory->AsyncClose(); |
|
598 AsyncDelete(); |
|
599 } |
|
600 |
|
601 |
|
602 DCoarseMemory::DPageTables::~DPageTables() |
|
603 { |
|
604 TRACE2(("DCoarseMemory::DPageTables[0x%08x]::~DPageTables()",this)); |
|
605 __NK_ASSERT_DEBUG(!iMemory); |
|
606 __NK_ASSERT_DEBUG(iMappings.IsEmpty()); |
|
607 TUint i=0; |
|
608 while(i<iNumPageTables) |
|
609 { |
|
610 TPte* pt = iTables[i]; |
|
611 if(pt) |
|
612 { |
|
613 iTables[i] = 0; |
|
614 ::PageTables.Lock(); |
|
615 ::PageTables.Free(pt); |
|
616 ::PageTables.Unlock(); |
|
617 } |
|
618 ++i; |
|
619 } |
|
620 } |
|
621 |
|
622 |
|
623 TPte* DCoarseMemory::DPageTables::GetOrAllocatePageTable(TUint aChunkIndex) |
|
624 { |
|
625 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
626 |
|
627 // get page table... |
|
628 TPte* pt = GetPageTable(aChunkIndex); |
|
629 if(!pt) |
|
630 pt = AllocatePageTable(aChunkIndex, iMemory->IsDemandPaged()); |
|
631 |
|
632 return pt; |
|
633 } |
|
634 |
|
635 |
|
636 TPte* DCoarseMemory::DPageTables::GetOrAllocatePageTable(TUint aChunkIndex, TPinArgs& aPinArgs) |
|
637 { |
|
638 __NK_ASSERT_DEBUG(aPinArgs.iPinnedPageTables); |
|
639 |
|
640 if(!aPinArgs.HaveSufficientPages(KNumPagesToPinOnePageTable)) |
|
641 return 0; |
|
642 |
|
643 TPte* pinnedPt = 0; |
|
644 for(;;) |
|
645 { |
|
646 TPte* pt = GetOrAllocatePageTable(aChunkIndex); |
|
647 |
|
648 if(pinnedPt && pinnedPt!=pt) |
|
649 { |
|
650 // previously pinned page table not needed... |
|
651 PageTableAllocator::UnpinPageTable(pinnedPt,aPinArgs); |
|
652 |
|
653 // make sure we have memory for next pin attempt... |
|
654 MmuLock::Unlock(); |
|
655 aPinArgs.AllocReplacementPages(KNumPagesToPinOnePageTable); |
|
656 MmuLock::Lock(); |
|
657 if(!aPinArgs.HaveSufficientPages(KNumPagesToPinOnePageTable)) // if out of memory... |
|
658 { |
|
659 // make sure we free any unneeded page table we allocated... |
|
660 if(pt) |
|
661 FreePageTable(aChunkIndex); |
|
662 return 0; |
|
663 } |
|
664 } |
|
665 |
|
666 if(!pt) |
|
667 return 0; // out of memory |
|
668 |
|
669 if(pt==pinnedPt) |
|
670 { |
|
671 // we got a page table and it was pinned... |
|
672 *aPinArgs.iPinnedPageTables++ = pt; |
|
673 ++aPinArgs.iNumPinnedPageTables; |
|
674 return pt; |
|
675 } |
|
676 |
|
677 // don't pin page table if it's not paged (e.g. unpaged part of ROM)... |
|
678 SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt); |
|
679 if(!pti->IsDemandPaged()) |
|
680 return pt; |
|
681 |
|
682 // pin the page table... |
|
683 pinnedPt = pt; |
|
684 PageTableAllocator::PinPageTable(pinnedPt,aPinArgs); |
|
685 } |
|
686 } |
|
687 |
|
688 |
|
689 TPte* DCoarseMemory::DPageTables::AllocatePageTable(TUint aChunkIndex, TBool aDemandPaged, TBool aPermanent) |
|
690 { |
|
691 TRACE2(("DCoarseMemory::DPageTables[0x%08x]::AllocatePageTable(0x%08x,%d,%d)",this,aChunkIndex,aDemandPaged,aPermanent)); |
|
692 |
|
693 TPte* pt; |
|
694 do |
|
695 { |
|
696 // acquire page table lock... |
|
697 MmuLock::Unlock(); |
|
698 ::PageTables.Lock(); |
|
699 |
|
700 // see if we still need to allocate a page table... |
|
701 pt = iTables[aChunkIndex]; |
|
702 if(!pt) |
|
703 { |
|
704 // allocate page table... |
|
705 pt = ::PageTables.Alloc(aDemandPaged); |
|
706 if(!pt) |
|
707 { |
|
708 // out of memory... |
|
709 ::PageTables.Unlock(); |
|
710 MmuLock::Lock(); |
|
711 return 0; |
|
712 } |
|
713 AssignPageTable(aChunkIndex,pt); |
|
714 } |
|
715 |
|
716 // release page table lock... |
|
717 ::PageTables.Unlock(); |
|
718 MmuLock::Lock(); |
|
719 |
|
720 // check again... |
|
721 pt = iTables[aChunkIndex]; |
|
722 } |
|
723 while(!pt); |
|
724 |
|
725 // we have a page table... |
|
726 if(aPermanent) |
|
727 { |
|
728 __NK_ASSERT_ALWAYS(!aDemandPaged); |
|
729 SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt); |
|
730 pti->IncPermanenceCount(); |
|
731 } |
|
732 return pt; |
|
733 } |
|
734 |
|
735 |
|
736 void DCoarseMemory::DPageTables::AssignPageTable(TUint aChunkIndex, TPte* aPageTable) |
|
737 { |
|
738 __NK_ASSERT_DEBUG(PageTablesLockIsHeld()); |
|
739 |
|
740 MmuLock::Lock(); |
|
741 |
|
742 // get physical address of page table now, this can't change whilst we have the page table allocator mutex... |
|
743 TPhysAddr ptPhys = Mmu::PageTablePhysAddr(aPageTable); |
|
744 |
|
745 // update mappings with new page table... |
|
746 TUint offset = aChunkIndex<<(KChunkShift-KPageShift); |
|
747 iMappings.Lock(); |
|
748 TMappingListIter iter; |
|
749 DMemoryMapping* mapping = (DMemoryMapping*)iter.Start(iMappings); |
|
750 TUint flash = 0; |
|
751 while(mapping) |
|
752 { |
|
753 TUint size = mapping->iSizeInPages; |
|
754 TUint start = offset-mapping->iStartIndex; |
|
755 if(start<size && !mapping->BeingDetached()) |
|
756 { |
|
757 // page table is used by this mapping, so set PDE... |
|
758 TLinAddr linAddrAndOsAsid = mapping->LinAddrAndOsAsid()+start*KPageSize; |
|
759 TPde* pPde = Mmu::PageDirectoryEntry(linAddrAndOsAsid&KPageMask,linAddrAndOsAsid); |
|
760 TPde pde = ptPhys|mapping->BlankPde(); |
|
761 #ifdef __USER_MEMORY_GUARDS_ENABLED__ |
|
762 if (mapping->IsUserMapping()) |
|
763 pde = PDE_IN_DOMAIN(pde, USER_MEMORY_DOMAIN); |
|
764 #endif |
|
765 TRACE2(("!PDE %x=%x",pPde,pde)); |
|
766 __NK_ASSERT_DEBUG(((*pPde^pde)&~KPdeMatchMask)==0 || *pPde==KPdeUnallocatedEntry); |
|
767 *pPde = pde; |
|
768 SinglePdeUpdated(pPde); |
|
769 |
|
770 ++flash; // increase flash rate because we've done quite a bit more work |
|
771 } |
|
772 iMappings.Unlock(); |
|
773 MmuLock::Flash(flash,KMaxMappingsInOneGo); |
|
774 iMappings.Lock(); |
|
775 mapping = (DMemoryMapping*)iter.Next(); |
|
776 } |
|
777 iter.Finish(); |
|
778 iMappings.Unlock(); |
|
779 |
|
780 // next, assign page table to us... |
|
781 // NOTE: Must happen before MmuLock is released after reaching the end of the mapping list |
|
782 // otherwise it would be possible for a new mapping to be added and mapped before we manage |
|
783 // to update iTables with the page table it should use. |
|
784 SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPageTable); |
|
785 pti->SetCoarse(iMemory,aChunkIndex,iPteType); |
|
786 __NK_ASSERT_DEBUG(!iTables[aChunkIndex]); |
|
787 iTables[aChunkIndex] = aPageTable; // new mappings can now see the page table |
|
788 |
|
789 MmuLock::Unlock(); |
|
790 } |
|
791 |
|
792 |
|
793 void DCoarseMemory::DPageTables::FreePageTable(TUint aChunkIndex) |
|
794 { |
|
795 TRACE2(("DCoarseMemory::DPageTables[0x%08x]::FreePageTable(0x%08x)",this,aChunkIndex)); |
|
796 |
|
797 // acquire locks... |
|
798 ::PageTables.Lock(); |
|
799 MmuLock::Lock(); |
|
800 |
|
801 // test if page table still needs freeing... |
|
802 TPte* pt = iTables[aChunkIndex]; |
|
803 if(pt) |
|
804 { |
|
805 SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt); |
|
806 if(pti->PageCount()==0 && pti->PermanenceCount()==0) |
|
807 { |
|
808 // page table needs freeing... |
|
809 UnassignPageTable(aChunkIndex); |
|
810 MmuLock::Unlock(); |
|
811 ::PageTables.Free(pt); |
|
812 ::PageTables.Unlock(); |
|
813 return; |
|
814 } |
|
815 } |
|
816 |
|
817 // page table doesn't need freeing... |
|
818 MmuLock::Unlock(); |
|
819 ::PageTables.Unlock(); |
|
820 return; |
|
821 } |
|
822 |
|
823 |
|
824 void DCoarseMemory::StealPageTable(TUint aChunkIndex, TUint aPteType) |
|
825 { |
|
826 __NK_ASSERT_DEBUG(PageTablesLockIsHeld()); |
|
827 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
828 __NK_ASSERT_DEBUG(iPageTables[aPteType]); |
|
829 iPageTables[aPteType]->StealPageTable(aChunkIndex); |
|
830 } |
|
831 |
|
832 |
|
833 void DCoarseMemory::DPageTables::StealPageTable(TUint aChunkIndex) |
|
834 { |
|
835 __NK_ASSERT_DEBUG(PageTablesLockIsHeld()); |
|
836 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
837 #ifdef _DEBUG |
|
838 TPte* pt = iTables[aChunkIndex]; |
|
839 __NK_ASSERT_DEBUG(pt); |
|
840 SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt); |
|
841 __NK_ASSERT_DEBUG(pti->PageCount()==0); |
|
842 __NK_ASSERT_DEBUG(pti->PermanenceCount()==0); |
|
843 #endif |
|
844 UnassignPageTable(aChunkIndex); |
|
845 } |
|
846 |
|
847 |
|
848 void DCoarseMemory::DPageTables::UnassignPageTable(TUint aChunkIndex) |
|
849 { |
|
850 __NK_ASSERT_DEBUG(PageTablesLockIsHeld()); |
|
851 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
852 |
|
853 #ifdef _DEBUG |
|
854 TPhysAddr ptPhys = Mmu::PageTablePhysAddr(iTables[aChunkIndex]); |
|
855 #endif |
|
856 |
|
857 // zero page table pointer immediately so new mappings or memory commits will be force to |
|
858 // create a new one (which will block until we've finished here because it also needs the |
|
859 // PageTablesLock... |
|
860 iTables[aChunkIndex] = 0; |
|
861 |
|
862 // remove page table from mappings... |
|
863 TUint offset = aChunkIndex<<(KChunkShift-KPageShift); |
|
864 iMappings.Lock(); |
|
865 TMappingListIter iter; |
|
866 DMemoryMapping* mapping = (DMemoryMapping*)iter.Start(iMappings); |
|
867 TUint flash = 0; |
|
868 while(mapping) |
|
869 { |
|
870 __NK_ASSERT_DEBUG(iTables[aChunkIndex]==0); // can't have been recreated because we hold PageTablesLock |
|
871 TUint size = mapping->iSizeInPages; |
|
872 TUint start = offset-mapping->iStartIndex; |
|
873 if(start<size) |
|
874 { |
|
875 // page table is used by this mapping, so clear PDE... |
|
876 TLinAddr linAddrAndOsAsid = mapping->LinAddrAndOsAsid()+start*KPageSize; |
|
877 TPde* pPde = Mmu::PageDirectoryEntry(linAddrAndOsAsid&KPageMask,linAddrAndOsAsid); |
|
878 TPde pde = KPdeUnallocatedEntry; |
|
879 TRACE2(("!PDE %x=%x",pPde,pde)); |
|
880 __NK_ASSERT_DEBUG(*pPde==pde || (*pPde&~KPageTableMask)==ptPhys); |
|
881 *pPde = pde; |
|
882 SinglePdeUpdated(pPde); |
|
883 |
|
884 ++flash; // increase flash rate because we've done quite a bit more work |
|
885 } |
|
886 iMappings.Unlock(); |
|
887 MmuLock::Flash(flash,KMaxMappingsInOneGo); |
|
888 iMappings.Lock(); |
|
889 mapping = (DMemoryMapping*)iter.Next(); |
|
890 } |
|
891 iter.Finish(); |
|
892 |
|
893 iMappings.Unlock(); |
|
894 } |
|
895 |
|
896 |
|
897 TInt DCoarseMemory::DPageTables::AllocatePermanentPageTables() |
|
898 { |
|
899 __NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(iMemory)); |
|
900 __NK_ASSERT_ALWAYS(!iMemory->IsDemandPaged()); |
|
901 |
|
902 if(iPermanenceCount++) |
|
903 { |
|
904 // page tables already marked permanent, so end... |
|
905 return KErrNone; |
|
906 } |
|
907 |
|
908 // allocate all page tables... |
|
909 MmuLock::Lock(); |
|
910 TUint flash = 0; |
|
911 TUint i; |
|
912 for(i=0; i<iNumPageTables; ++i) |
|
913 { |
|
914 TPte* pt = iTables[i]; |
|
915 if(pt) |
|
916 { |
|
917 // already have page table... |
|
918 SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt); |
|
919 pti->IncPermanenceCount(); |
|
920 MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo); |
|
921 } |
|
922 else |
|
923 { |
|
924 // allocate new page table... |
|
925 pt = AllocatePageTable(i,EFalse,ETrue); |
|
926 if(!pt) |
|
927 { |
|
928 MmuLock::Unlock(); |
|
929 --iPermanenceCount; |
|
930 FreePermanentPageTables(0,i); |
|
931 return KErrNoMemory; |
|
932 } |
|
933 } |
|
934 } |
|
935 MmuLock::Unlock(); |
|
936 |
|
937 return KErrNone; |
|
938 } |
|
939 |
|
940 |
|
941 void DCoarseMemory::DPageTables::FreePermanentPageTables(TUint aChunkIndex, TUint aChunkCount) |
|
942 { |
|
943 MmuLock::Lock(); |
|
944 |
|
945 TUint flash = 0; |
|
946 TUint i; |
|
947 for(i=aChunkIndex; i<aChunkIndex+aChunkCount; ++i) |
|
948 { |
|
949 TPte* pt = iTables[i]; |
|
950 SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt); |
|
951 if(pti->DecPermanenceCount() || pti->PageCount()) |
|
952 { |
|
953 // still in use... |
|
954 MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo); |
|
955 } |
|
956 else |
|
957 { |
|
958 // page table no longer used for anything... |
|
959 MmuLock::Unlock(); |
|
960 FreePageTable(i); |
|
961 MmuLock::Lock(); |
|
962 } |
|
963 } |
|
964 |
|
965 MmuLock::Unlock(); |
|
966 } |
|
967 |
|
968 |
|
969 void DCoarseMemory::DPageTables::FreePermanentPageTables() |
|
970 { |
|
971 __NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(iMemory)); |
|
972 |
|
973 if(--iPermanenceCount) |
|
974 { |
|
975 // page tables still permanent, so end... |
|
976 return; |
|
977 } |
|
978 |
|
979 FreePermanentPageTables(0,iNumPageTables); |
|
980 } |
|
981 |
|
982 |
|
983 TInt DCoarseMemory::DPageTables::AddMapping(DCoarseMapping* aMapping) |
|
984 { |
|
985 TRACE(("DCoarseMemory::DPageTables[0x%08x]::AddMapping(0x%08x)",this,aMapping)); |
|
986 __NK_ASSERT_DEBUG(aMapping->IsCoarse()); |
|
987 Open(); |
|
988 MmuLock::Lock(); |
|
989 iMappings.Lock(); |
|
990 aMapping->LinkToMemory(iMemory,iMappings); |
|
991 iMappings.Unlock(); |
|
992 MmuLock::Unlock(); |
|
993 return KErrNone; |
|
994 } |
|
995 |
|
996 |
|
997 void DCoarseMemory::DPageTables::RemoveMapping(DCoarseMapping* aMapping) |
|
998 { |
|
999 aMapping->UnlinkFromMemory(iMappings); |
|
1000 Close(); |
|
1001 } |
|
1002 |
|
1003 |
|
1004 void DCoarseMemory::DPageTables::RemapPage(TPhysAddr& aPageArray, TUint aIndex, TBool aInvalidateTLB) |
|
1005 { |
|
1006 TUint pteIndex = aIndex & (KChunkMask>>KPageShift); |
|
1007 |
|
1008 // get address of page table... |
|
1009 MmuLock::Lock(); |
|
1010 TUint i = aIndex>>(KChunkShift-KPageShift); |
|
1011 TPte* pPte = GetPageTable(i); |
|
1012 |
|
1013 if (!pPte) |
|
1014 {// This page has been unmapped so just return. |
|
1015 MmuLock::Unlock(); |
|
1016 return; |
|
1017 } |
|
1018 |
|
1019 // remap the page... |
|
1020 pPte += pteIndex; |
|
1021 Mmu::RemapPage(pPte, aPageArray, iBlankPte); |
|
1022 |
|
1023 MmuLock::Unlock(); |
|
1024 |
|
1025 if (aInvalidateTLB) |
|
1026 FlushTLB(aIndex, aIndex + 1); |
|
1027 } |
|
1028 |
|
1029 |
|
1030 TInt DCoarseMemory::DPageTables::MapPages(RPageArray::TIter aPages) |
|
1031 { |
|
1032 __NK_ASSERT_DEBUG(aPages.Count()); |
|
1033 |
|
1034 for(;;) |
|
1035 { |
|
1036 TUint pteIndex = aPages.Index()&(KChunkMask>>KPageShift); |
|
1037 |
|
1038 // calculate max number of pages to do... |
|
1039 TUint n = (KChunkSize>>KPageShift)-pteIndex; // pages left in page table |
|
1040 if(n>KMaxPagesInOneGo) |
|
1041 n = KMaxPagesInOneGo; |
|
1042 |
|
1043 // get some pages... |
|
1044 TPhysAddr* pages; |
|
1045 n = aPages.Pages(pages,n); |
|
1046 if(!n) |
|
1047 break; |
|
1048 |
|
1049 // get address of page table... |
|
1050 MmuLock::Lock(); |
|
1051 TUint i = aPages.Index()>>(KChunkShift-KPageShift); |
|
1052 TPte* pPte = GetOrAllocatePageTable(i); |
|
1053 |
|
1054 // check for OOM... |
|
1055 if(!pPte) |
|
1056 { |
|
1057 MmuLock::Unlock(); |
|
1058 return KErrNoMemory; |
|
1059 } |
|
1060 |
|
1061 // map some pages... |
|
1062 pPte += pteIndex; |
|
1063 TBool keepPt = Mmu::MapPages(pPte, n, pages, iBlankPte); |
|
1064 MmuLock::Unlock(); |
|
1065 |
|
1066 // free page table if no longer needed... |
|
1067 if(!keepPt) |
|
1068 FreePageTable(i); |
|
1069 |
|
1070 // move on... |
|
1071 aPages.Skip(n); |
|
1072 } |
|
1073 |
|
1074 return KErrNone; |
|
1075 } |
|
1076 |
|
1077 |
|
1078 void DCoarseMemory::DPageTables::UnmapPages(RPageArray::TIter aPages, TBool aDecommitting) |
|
1079 { |
|
1080 __NK_ASSERT_DEBUG(aPages.Count()); |
|
1081 |
|
1082 TUint startIndex = aPages.Index(); |
|
1083 |
|
1084 for(;;) |
|
1085 { |
|
1086 TUint pteIndex = aPages.Index()&(KChunkMask>>KPageShift); |
|
1087 |
|
1088 // calculate max number of pages to do... |
|
1089 TUint n = (KChunkSize>>KPageShift)-pteIndex; // pages left in page table |
|
1090 if(n>KMaxPagesInOneGo) |
|
1091 n = KMaxPagesInOneGo; |
|
1092 |
|
1093 // get some pages... |
|
1094 TPhysAddr* pages; |
|
1095 n = aPages.Pages(pages,n); |
|
1096 if(!n) |
|
1097 break; |
|
1098 |
|
1099 // get address of PTE for pages... |
|
1100 MmuLock::Lock(); |
|
1101 TUint i = aPages.Index()>>(KChunkShift-KPageShift); |
|
1102 TPte* pPte = iTables[i]; |
|
1103 if(pPte) |
|
1104 { |
|
1105 // unmap some pages... |
|
1106 pPte += pteIndex; |
|
1107 TBool keepPt = Mmu::UnmapPages(pPte,n,pages); |
|
1108 MmuLock::Unlock(); |
|
1109 |
|
1110 // free page table if no longer needed... |
|
1111 if(!keepPt) |
|
1112 FreePageTable(i); |
|
1113 } |
|
1114 else |
|
1115 { |
|
1116 // no page table found... |
|
1117 MmuLock::Unlock(); |
|
1118 } |
|
1119 |
|
1120 // move on... |
|
1121 aPages.Skip(n); |
|
1122 } |
|
1123 |
|
1124 FlushTLB(startIndex,aPages.IndexEnd()); |
|
1125 } |
|
1126 |
|
1127 |
|
1128 void DCoarseMemory::DPageTables::RestrictPagesNA(RPageArray::TIter aPages) |
|
1129 { |
|
1130 __NK_ASSERT_DEBUG(aPages.Count()); |
|
1131 |
|
1132 TUint startIndex = aPages.Index(); |
|
1133 |
|
1134 for(;;) |
|
1135 { |
|
1136 TUint pteIndex = aPages.Index()&(KChunkMask>>KPageShift); |
|
1137 |
|
1138 // calculate max number of pages to do... |
|
1139 TUint n = (KChunkSize>>KPageShift)-pteIndex; // pages left in page table |
|
1140 if(n>KMaxPagesInOneGo) |
|
1141 n = KMaxPagesInOneGo; |
|
1142 |
|
1143 // get some pages... |
|
1144 TPhysAddr* pages; |
|
1145 n = aPages.Pages(pages,n); |
|
1146 if(!n) |
|
1147 break; |
|
1148 |
|
1149 // get address of PTE for pages... |
|
1150 MmuLock::Lock(); |
|
1151 TUint i = aPages.Index()>>(KChunkShift-KPageShift); |
|
1152 TPte* pPte = iTables[i]; |
|
1153 if(pPte) |
|
1154 { |
|
1155 // restrict some pages... |
|
1156 pPte += pteIndex; |
|
1157 Mmu::RestrictPagesNA(pPte,n,pages); |
|
1158 } |
|
1159 MmuLock::Unlock(); |
|
1160 |
|
1161 // move on... |
|
1162 aPages.Skip(n); |
|
1163 } |
|
1164 |
|
1165 FlushTLB(startIndex,aPages.IndexEnd()); |
|
1166 } |
|
1167 |
|
1168 |
|
1169 TInt DCoarseMemory::DPageTables::PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, |
|
1170 DMemoryMappingBase* aMapping, TUint aMapInstanceCount) |
|
1171 { |
|
1172 __NK_ASSERT_DEBUG(aPages.Count()); |
|
1173 |
|
1174 TBool pinPageTable = aPinArgs.iPinnedPageTables!=0; // check if we need to pin the first page table |
|
1175 for(;;) |
|
1176 { |
|
1177 TUint pteIndex = aPages.Index()&(KChunkMask>>KPageShift); |
|
1178 if(pteIndex==0) |
|
1179 pinPageTable = aPinArgs.iPinnedPageTables!=0; // started a new page table, check if we need to pin it |
|
1180 |
|
1181 // calculate max number of pages to do... |
|
1182 TUint n = (KChunkSize>>KPageShift)-pteIndex; // pages left in page table |
|
1183 if(n>KMaxPagesInOneGo) |
|
1184 n = KMaxPagesInOneGo; |
|
1185 |
|
1186 // get some pages... |
|
1187 TPhysAddr* pages; |
|
1188 n = aPages.Pages(pages,n); |
|
1189 if(!n) |
|
1190 break; |
|
1191 |
|
1192 // make sure we have memory to pin the page table if required... |
|
1193 if(pinPageTable) |
|
1194 aPinArgs.AllocReplacementPages(KNumPagesToPinOnePageTable); |
|
1195 |
|
1196 // get address of page table... |
|
1197 MmuLock::Lock(); |
|
1198 TUint i = aPages.Index()>>(KChunkShift-KPageShift); |
|
1199 TPte* pPte; |
|
1200 if(pinPageTable) |
|
1201 pPte = GetOrAllocatePageTable(i,aPinArgs); |
|
1202 else |
|
1203 pPte = GetOrAllocatePageTable(i); |
|
1204 |
|
1205 // check for OOM... |
|
1206 if(!pPte) |
|
1207 { |
|
1208 MmuLock::Unlock(); |
|
1209 return KErrNoMemory; |
|
1210 } |
|
1211 |
|
1212 if (aMapInstanceCount != aMapping->MapInstanceCount()) |
|
1213 {// The mapping that took the page fault has been reused. |
|
1214 MmuLock::Unlock(); |
|
1215 FreePageTable(i); // This will only free if this is the only pt referencer. |
|
1216 return KErrNotFound; |
|
1217 } |
|
1218 |
|
1219 // map some pages... |
|
1220 pPte += pteIndex; |
|
1221 TPte blankPte = iBlankPte; |
|
1222 if(aPinArgs.iReadOnly) |
|
1223 blankPte = Mmu::MakePteInaccessible(blankPte,true); |
|
1224 TBool keepPt = Mmu::PageInPages(pPte, n, pages, blankPte); |
|
1225 MmuLock::Unlock(); |
|
1226 |
|
1227 // free page table if no longer needed... |
|
1228 if(!keepPt) |
|
1229 FreePageTable(i); |
|
1230 |
|
1231 // move on... |
|
1232 aPages.Skip(n); |
|
1233 pinPageTable = false; |
|
1234 } |
|
1235 |
|
1236 return KErrNone; |
|
1237 } |
|
1238 |
|
1239 |
|
1240 TBool DCoarseMemory::DPageTables::MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex) |
|
1241 { |
|
1242 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
1243 |
|
1244 TUint pteIndex = aIndex & (KChunkMask >> KPageShift); |
|
1245 |
|
1246 // get address of page table... |
|
1247 TUint i = aIndex >> (KChunkShift - KPageShift); |
|
1248 TPte* pPte = GetPageTable(i); |
|
1249 |
|
1250 // Check the page is still mapped.. |
|
1251 if (!pPte) |
|
1252 return EFalse; |
|
1253 |
|
1254 // map the page... |
|
1255 pPte += pteIndex; |
|
1256 Mmu::RemapPage(pPte, aPageArrayPtr, iBlankPte); |
|
1257 return ETrue; |
|
1258 } |
|
1259 |
|
1260 |
|
1261 void DCoarseMemory::DPageTables::FlushTLB(TUint aStartIndex, TUint aEndIndex) |
|
1262 { |
|
1263 #ifndef COARSE_GRAINED_TLB_MAINTENANCE |
|
1264 iMappings.Lock(); |
|
1265 TMappingListIter iter; |
|
1266 DMemoryMapping* mapping = (DMemoryMapping*)iter.Start(iMappings); |
|
1267 while(mapping) |
|
1268 { |
|
1269 // get region which overlaps the mapping... |
|
1270 TUint start = mapping->iStartIndex; |
|
1271 TUint end = start+mapping->iSizeInPages; |
|
1272 if(start<aStartIndex) |
|
1273 start = aStartIndex; |
|
1274 if(end>aEndIndex) |
|
1275 end = aEndIndex; |
|
1276 if(start>=end) |
|
1277 { |
|
1278 // the mapping doesn't contain the pages... |
|
1279 iMappings.Unlock(); |
|
1280 } |
|
1281 else |
|
1282 { |
|
1283 // flush TLB for pages in the mapping... |
|
1284 TUint size = end-start; |
|
1285 start -= mapping->iStartIndex; |
|
1286 TLinAddr addr = mapping->LinAddrAndOsAsid()+start*KPageSize; |
|
1287 TLinAddr endAddr = addr+size*KPageSize; |
|
1288 iMappings.Unlock(); |
|
1289 do |
|
1290 { |
|
1291 InvalidateTLBForPage(addr); |
|
1292 } |
|
1293 while((addr+=KPageSize)<endAddr); |
|
1294 } |
|
1295 iMappings.Lock(); |
|
1296 mapping = (DMemoryMapping*)iter.Next(); |
|
1297 } |
|
1298 iter.Finish(); |
|
1299 iMappings.Unlock(); |
|
1300 #endif |
|
1301 } |
|
1302 |
|
1303 |
|
1304 |
|
1305 // |
|
1306 // DCoarseMemory |
|
1307 // |
|
1308 |
|
1309 DCoarseMemory::DCoarseMemory(DMemoryManager* aManager, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags) |
|
1310 : DMemoryObject(aManager,ECoarseObject,aSizeInPages,aAttributes,aCreateFlags) |
|
1311 { |
|
1312 } |
|
1313 |
|
1314 |
|
1315 DCoarseMemory* DCoarseMemory::New(DMemoryManager* aManager, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags) |
|
1316 { |
|
1317 DCoarseMemory* self = new DCoarseMemory(aManager, aSizeInPages, aAttributes, aCreateFlags); |
|
1318 if(self) |
|
1319 { |
|
1320 if(self->Construct()==KErrNone) |
|
1321 return self; |
|
1322 self->Close(); |
|
1323 } |
|
1324 return 0; |
|
1325 } |
|
1326 |
|
1327 |
|
1328 DCoarseMemory::~DCoarseMemory() |
|
1329 { |
|
1330 TRACE2(("DCoarseMemory[0x%08x]::~DCoarseMemory()",this)); |
|
1331 #ifdef _DEBUG |
|
1332 for(TUint i=0; i<ENumPteTypes; i++) |
|
1333 { |
|
1334 __NK_ASSERT_DEBUG(!iPageTables[i]); |
|
1335 } |
|
1336 #endif |
|
1337 } |
|
1338 |
|
1339 |
|
1340 DMemoryMapping* DCoarseMemory::CreateMapping(TUint aIndex, TUint aCount) |
|
1341 { |
|
1342 if (((aIndex|aCount)&(KChunkMask>>KPageShift))==0) |
|
1343 return new DCoarseMapping(); |
|
1344 else |
|
1345 return new DFineMapping(); |
|
1346 } |
|
1347 |
|
1348 |
|
1349 TInt DCoarseMemory::MapPages(RPageArray::TIter aPages) |
|
1350 { |
|
1351 TRACE2(("DCoarseMemory[0x%08x]::MapPages(?) index=0x%x count=0x%x",this,aPages.Index(),aPages.Count())); |
|
1352 |
|
1353 // map pages in all page tables for coarse mapping... |
|
1354 MmuLock::Lock(); |
|
1355 TUint pteType = 0; |
|
1356 do |
|
1357 { |
|
1358 DPageTables* tables = iPageTables[pteType]; |
|
1359 if(tables) |
|
1360 { |
|
1361 tables->Open(); |
|
1362 MmuLock::Unlock(); |
|
1363 TInt r = tables->MapPages(aPages); |
|
1364 tables->AsyncClose(); |
|
1365 if(r!=KErrNone) |
|
1366 return r; |
|
1367 MmuLock::Lock(); |
|
1368 } |
|
1369 } |
|
1370 while(++pteType<ENumPteTypes); |
|
1371 MmuLock::Unlock(); |
|
1372 |
|
1373 // map page in all fine mappings... |
|
1374 return DMemoryObject::MapPages(aPages); |
|
1375 } |
|
1376 |
|
1377 |
|
1378 void DCoarseMemory::RemapPage(TPhysAddr& aPageArray, TUint aIndex, TBool aInvalidateTLB) |
|
1379 { |
|
1380 TRACE2(("DCoarseMemory[0x%08x]::RemapPage() index=0x%x",this, aIndex)); |
|
1381 |
|
1382 // remap pages in all page tables for coarse mapping... |
|
1383 MmuLock::Lock(); |
|
1384 TUint pteType = 0; |
|
1385 do |
|
1386 { |
|
1387 DPageTables* tables = iPageTables[pteType]; |
|
1388 if(tables) |
|
1389 { |
|
1390 tables->Open(); |
|
1391 MmuLock::Unlock(); |
|
1392 tables->RemapPage(aPageArray, aIndex, aInvalidateTLB); |
|
1393 tables->AsyncClose(); |
|
1394 MmuLock::Lock(); |
|
1395 } |
|
1396 } |
|
1397 while(++pteType<ENumPteTypes); |
|
1398 MmuLock::Unlock(); |
|
1399 |
|
1400 // remap page in all fine mappings... |
|
1401 DMemoryObject::RemapPage(aPageArray, aIndex, aInvalidateTLB); |
|
1402 } |
|
1403 |
|
1404 |
|
1405 void DCoarseMemory::UnmapPages(RPageArray::TIter aPages, TBool aDecommitting) |
|
1406 { |
|
1407 TRACE2(("DCoarseMemory[0x%08x]::UnmapPages(?,%d) index=0x%x count=0x%x",this,(bool)aDecommitting,aPages.Index(),aPages.Count())); |
|
1408 |
|
1409 if(!aPages.Count()) |
|
1410 return; |
|
1411 |
|
1412 // unmap pages from all page tables for coarse mapping... |
|
1413 MmuLock::Lock(); |
|
1414 TUint pteType = 0; |
|
1415 do |
|
1416 { |
|
1417 DPageTables* tables = iPageTables[pteType]; |
|
1418 if(tables) |
|
1419 { |
|
1420 tables->Open(); |
|
1421 MmuLock::Unlock(); |
|
1422 tables->UnmapPages(aPages,aDecommitting); |
|
1423 tables->AsyncClose(); |
|
1424 MmuLock::Lock(); |
|
1425 } |
|
1426 } |
|
1427 while(++pteType<ENumPteTypes); |
|
1428 MmuLock::Unlock(); |
|
1429 |
|
1430 // unmap pages from all fine mappings... |
|
1431 DMemoryObject::UnmapPages(aPages,aDecommitting); |
|
1432 } |
|
1433 |
|
1434 |
|
1435 void DCoarseMemory::RestrictPages(RPageArray::TIter aPages, TRestrictPagesType aRestriction) |
|
1436 { |
|
1437 TRACE2(("DCoarseMemory[0x%08x]::RestrictPages(?,%d) index=0x%x count=0x%x",this,aRestriction,aPages.Index(),aPages.Count())); |
|
1438 __ASSERT_COMPILE(ERestrictPagesForMovingFlag != ERestrictPagesNoAccessForMoving); |
|
1439 |
|
1440 if(!aPages.Count()) |
|
1441 return; |
|
1442 |
|
1443 if (aRestriction != ERestrictPagesForMovingFlag) |
|
1444 {// restrict pages in all the page tables for the coarse mapping... |
|
1445 MmuLock::Lock(); |
|
1446 TUint pteType = 0; |
|
1447 do |
|
1448 { |
|
1449 DPageTables* tables = iPageTables[pteType]; |
|
1450 if(tables) |
|
1451 { |
|
1452 tables->Open(); |
|
1453 MmuLock::Unlock(); |
|
1454 tables->RestrictPagesNA(aPages); |
|
1455 tables->AsyncClose(); |
|
1456 MmuLock::Lock(); |
|
1457 } |
|
1458 } |
|
1459 while(++pteType<ENumPteTypes); |
|
1460 MmuLock::Unlock(); |
|
1461 } |
|
1462 |
|
1463 // restrict pages in all fine mappings, will also check for pinned mappings... |
|
1464 DMemoryObject::RestrictPages(aPages,aRestriction); |
|
1465 } |
|
1466 |
|
1467 |
|
1468 TPte* DCoarseMemory::GetPageTable(TUint aPteType, TUint aChunkIndex) |
|
1469 { |
|
1470 __NK_ASSERT_DEBUG(aChunkIndex < (iSizeInPages >> KPagesInPDEShift)); |
|
1471 return iPageTables[aPteType]->GetPageTable(aChunkIndex); |
|
1472 } |
|
1473 |
|
1474 |
|
1475 TInt DCoarseMemory::PageIn(DCoarseMapping* aMapping, RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount) |
|
1476 { |
|
1477 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
1478 |
|
1479 DPageTables* tables = iPageTables[aMapping->PteType()]; |
|
1480 tables->Open(); |
|
1481 |
|
1482 MmuLock::Unlock(); |
|
1483 |
|
1484 #ifndef COARSE_GRAINED_TLB_MAINTENANCE |
|
1485 TLinAddr startAddr = aMapping->Base()+(aPages.Index()-aMapping->iStartIndex)*KPageSize; |
|
1486 TLinAddr endAddr = startAddr+aPages.Count()*KPageSize; |
|
1487 #endif |
|
1488 |
|
1489 TInt r = tables->PageIn(aPages, aPinArgs, aMapping, aMapInstanceCount); |
|
1490 |
|
1491 // clean TLB... |
|
1492 #ifdef COARSE_GRAINED_TLB_MAINTENANCE |
|
1493 InvalidateTLBForAsid(aMapping->OsAsid()); |
|
1494 #else |
|
1495 TLinAddr addr = startAddr+aMapping->OsAsid(); |
|
1496 do InvalidateTLBForPage(addr); |
|
1497 while((addr+=KPageSize)<endAddr); |
|
1498 #endif |
|
1499 |
|
1500 tables->AsyncClose(); |
|
1501 |
|
1502 return r; |
|
1503 } |
|
1504 |
|
1505 |
|
1506 TBool DCoarseMemory::MovingPageIn(DCoarseMapping* aMapping, TPhysAddr& aPageArrayPtr, TUint aIndex) |
|
1507 { |
|
1508 DCoarseMemory::DPageTables* tables = iPageTables[aMapping->PteType()]; |
|
1509 return tables->MovingPageIn(aPageArrayPtr, aIndex); |
|
1510 } |
|
1511 |
|
1512 |
|
1513 TPte* DCoarseMemory::FindPageTable(DCoarseMapping* aMapping, TLinAddr aLinAddr, TUint aMemoryIndex) |
|
1514 { |
|
1515 DCoarseMemory::DPageTables* tables = iPageTables[aMapping->PteType()]; |
|
1516 |
|
1517 // get address of page table... |
|
1518 TUint i = aMemoryIndex >> (KChunkShift - KPageShift); |
|
1519 return tables->GetPageTable(i); |
|
1520 } |
|
1521 |
|
1522 |
|
1523 TInt DCoarseMemory::ClaimInitialPages(TLinAddr aBase, TUint aSize, TMappingPermissions aPermissions, TBool aAllowGaps, TBool aAllowNonRamPages) |
|
1524 { |
|
1525 TRACE(("DCoarseMemory[0x%08x]::ClaimInitialPages(0x%08x,0x%08x,0x%08x,%d,%d)",this,aBase,aSize,aPermissions,aAllowGaps,aAllowNonRamPages)); |
|
1526 |
|
1527 // validate arguments... |
|
1528 if(aBase&KChunkMask || aBase<KGlobalMemoryBase) |
|
1529 return KErrArgument; |
|
1530 if(aSize&KPageMask || aSize>iSizeInPages*KPageSize) |
|
1531 return KErrArgument; |
|
1532 |
|
1533 // get DPageTables object... |
|
1534 TUint pteType = Mmu::PteType(aPermissions,true); |
|
1535 MemoryObjectLock::Lock(this); |
|
1536 DPageTables* tables = GetOrAllocatePageTables(pteType); |
|
1537 MemoryObjectLock::Unlock(this); |
|
1538 __NK_ASSERT_DEBUG(tables); |
|
1539 |
|
1540 // check and allocate page array entries... |
|
1541 RPageArray::TIter pageIter; |
|
1542 TInt r = iPages.AddStart(0,aSize>>KPageShift,pageIter); |
|
1543 __NK_ASSERT_ALWAYS(r==KErrNone); |
|
1544 |
|
1545 // hold MmuLock for long time, shouldn't matter as this is only done during boot |
|
1546 ::PageTables.Lock(); |
|
1547 MmuLock::Lock(); |
|
1548 |
|
1549 TPte blankPte = tables->iBlankPte; |
|
1550 TPte** pPt = tables->iTables; |
|
1551 TPde* pPde = Mmu::PageDirectoryEntry(KKernelOsAsid,aBase); |
|
1552 TUint offset = 0; |
|
1553 TUint size = aSize; |
|
1554 while(size) |
|
1555 { |
|
1556 TPde pde = *pPde; |
|
1557 TRACE(("DCoarseMemory::ClaimInitialPages: %08x: 0x%08x",aBase+offset,pde)); |
|
1558 |
|
1559 TPte* pPte = NULL; |
|
1560 SPageTableInfo* pti = NULL; |
|
1561 |
|
1562 if (Mmu::PdeMapsSection(pde)) |
|
1563 { |
|
1564 TPhysAddr sectionBase = Mmu::SectionBaseFromPde(pde); |
|
1565 TRACE((" chunk is section mapped, base at %08x", sectionBase)); |
|
1566 __NK_ASSERT_DEBUG(sectionBase != KPhysAddrInvalid); |
|
1567 |
|
1568 TPde pde = sectionBase | Mmu::BlankSectionPde(Attributes(),pteType); |
|
1569 __NK_ASSERT_DEBUG(((*pPde^pde)&~KPdeMatchMask)==0); |
|
1570 *pPde = pde; |
|
1571 SinglePdeUpdated(pPde); |
|
1572 InvalidateTLB(); |
|
1573 |
|
1574 // We allocate and populate a page table for the section even though it won't be mapped |
|
1575 // initially because the presense of the page table is used to check whether RAM is |
|
1576 // mapped in a chunk, and because it makes it possible to break the section mapping |
|
1577 // without allocating memory. This may change in the future. |
|
1578 |
|
1579 // Note these page table are always unpaged here regardless of paged bit in iFlags |
|
1580 // (e.g. ROM object is marked as paged despite initial pages being unpaged) |
|
1581 pPte = tables->AllocatePageTable(offset >> KChunkShift, EFalse, EFalse); |
|
1582 if (!pPte) |
|
1583 { |
|
1584 MmuLock::Unlock(); |
|
1585 return KErrNoMemory; |
|
1586 } |
|
1587 pti = SPageTableInfo::FromPtPtr(pPte); |
|
1588 } |
|
1589 else if (Mmu::PdeMapsPageTable(pde)) |
|
1590 { |
|
1591 pPte = Mmu::PageTableFromPde(*pPde); |
|
1592 TRACE((" page table found at %08x", pPte)); |
|
1593 __NK_ASSERT_DEBUG(pPte); |
|
1594 pti = SPageTableInfo::FromPtPtr(pPte); |
|
1595 pti->SetCoarse(this,offset>>KChunkShift,pteType); |
|
1596 } |
|
1597 |
|
1598 *pPt++ = pPte; |
|
1599 ++pPde; |
|
1600 |
|
1601 TUint numPages = 0; |
|
1602 do |
|
1603 { |
|
1604 TPhysAddr pagePhys = Mmu::LinearToPhysical(aBase+offset); |
|
1605 TPte pte; |
|
1606 if(pagePhys==KPhysAddrInvalid) |
|
1607 { |
|
1608 if(size) |
|
1609 { |
|
1610 __NK_ASSERT_ALWAYS(aAllowGaps); // we have a gap, check this is allowed |
|
1611 pageIter.Skip(1); |
|
1612 } |
|
1613 |
|
1614 pte = KPteUnallocatedEntry; |
|
1615 } |
|
1616 else |
|
1617 { |
|
1618 __NK_ASSERT_ALWAYS(size); // pages can't be mapped above aSize |
|
1619 |
|
1620 pageIter.Add(1,&pagePhys); |
|
1621 |
|
1622 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pagePhys); |
|
1623 __NK_ASSERT_ALWAYS(pi || aAllowNonRamPages); |
|
1624 if(pi) |
|
1625 { |
|
1626 __NK_ASSERT_ALWAYS(pi->Type()==SPageInfo::EFixed); |
|
1627 pi->SetManaged(this,offset>>KPageShift,PageInfoFlags()); |
|
1628 } |
|
1629 |
|
1630 ++numPages; |
|
1631 pte = pagePhys|blankPte; |
|
1632 } |
|
1633 |
|
1634 if(pPte) |
|
1635 { |
|
1636 TRACE2(("!PTE %x=%x (was %x)",pPte,pte,*pPte)); |
|
1637 __NK_ASSERT_DEBUG(((*pPte^pte)&~KPteMatchMask)==0 || *pPte==KPteUnallocatedEntry); |
|
1638 *pPte = pte; |
|
1639 CacheMaintenance::SinglePteUpdated((TLinAddr)pPte); |
|
1640 ++pPte; |
|
1641 } |
|
1642 |
|
1643 offset += KPageSize; |
|
1644 if(size) |
|
1645 size -= KPageSize; |
|
1646 } |
|
1647 while(offset&(KChunkMask&~KPageMask)); |
|
1648 |
|
1649 if(pti) |
|
1650 { |
|
1651 pti->IncPageCount(numPages); |
|
1652 TRACE2(("pt %x page count=%d",TLinAddr(pPte)-KPageTableSize,numPages)); |
|
1653 __NK_ASSERT_DEBUG(pti->CheckPageCount()); |
|
1654 } |
|
1655 } |
|
1656 |
|
1657 InvalidateTLBForAsid(KKernelOsAsid); |
|
1658 |
|
1659 MmuLock::Unlock(); |
|
1660 ::PageTables.Unlock(); |
|
1661 |
|
1662 // release page array entries... |
|
1663 iPages.AddEnd(0,aSize>>KPageShift); |
|
1664 |
|
1665 return KErrNone; |
|
1666 } |
|
1667 |
|
1668 |
|
1669 DCoarseMemory::DPageTables* DCoarseMemory::GetOrAllocatePageTables(TUint aPteType) |
|
1670 { |
|
1671 __NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(this)); |
|
1672 |
|
1673 MmuLock::Lock(); |
|
1674 DPageTables* tables = iPageTables[aPteType]; |
|
1675 if(tables) |
|
1676 tables->Open(); |
|
1677 MmuLock::Unlock(); |
|
1678 |
|
1679 if(!tables) |
|
1680 { |
|
1681 // allocate a new one if required... |
|
1682 tables = DPageTables::New(this, iSizeInPages, aPteType); |
|
1683 if (tables) |
|
1684 { |
|
1685 __NK_ASSERT_DEBUG(!iPageTables[aPteType]); |
|
1686 iPageTables[aPteType] = tables; |
|
1687 } |
|
1688 } |
|
1689 |
|
1690 return tables; |
|
1691 } |
|
1692 |
|
1693 |
|
1694 TInt DCoarseMemory::AddMapping(DMemoryMappingBase* aMapping) |
|
1695 { |
|
1696 if(!aMapping->IsCoarse()) |
|
1697 { |
|
1698 // not coarse mapping... |
|
1699 return DMemoryObject::AddMapping(aMapping); |
|
1700 } |
|
1701 |
|
1702 __NK_ASSERT_DEBUG(aMapping->IsPinned()==false); // coarse mappings can't pin |
|
1703 |
|
1704 // Check mapping allowed. Must hold memory object lock to prevent changes |
|
1705 // to object's restrictions. |
|
1706 MemoryObjectLock::Lock(this); |
|
1707 TInt r = CheckNewMapping(aMapping); |
|
1708 if(r!=KErrNone) |
|
1709 { |
|
1710 MemoryObjectLock::Unlock(this); |
|
1711 return r; |
|
1712 } |
|
1713 |
|
1714 // get DPageTable for mapping... |
|
1715 DPageTables* tables = GetOrAllocatePageTables(aMapping->PteType()); |
|
1716 |
|
1717 // Safe to release here as no restrictions to this type of mapping can be added as |
|
1718 // we now have an iPageTables entry for this type of mapping. |
|
1719 MemoryObjectLock::Unlock(this); |
|
1720 if(!tables) |
|
1721 return KErrNoMemory; |
|
1722 |
|
1723 // add mapping to DPageTable... |
|
1724 r = tables->AddMapping((DCoarseMapping*)aMapping); |
|
1725 if(r==KErrNone) |
|
1726 { |
|
1727 // allocate permanent page tables if required... |
|
1728 if(aMapping->Flags()&DMemoryMapping::EPermanentPageTables) |
|
1729 { |
|
1730 MemoryObjectLock::Lock(this); |
|
1731 r = tables->AllocatePermanentPageTables(); |
|
1732 MemoryObjectLock::Unlock(this); |
|
1733 |
|
1734 if(r==KErrNone) |
|
1735 __e32_atomic_ior_ord8(&aMapping->Flags(), (TUint8)DMemoryMapping::EPageTablesAllocated); |
|
1736 else |
|
1737 tables->RemoveMapping((DCoarseMapping*)aMapping); |
|
1738 } |
|
1739 } |
|
1740 |
|
1741 tables->Close(); |
|
1742 |
|
1743 return r; |
|
1744 } |
|
1745 |
|
1746 |
|
1747 void DCoarseMemory::RemoveMapping(DMemoryMappingBase* aMapping) |
|
1748 { |
|
1749 if(!aMapping->IsCoarse()) |
|
1750 { |
|
1751 // not coarse mapping... |
|
1752 DMemoryObject::RemoveMapping(aMapping); |
|
1753 return; |
|
1754 } |
|
1755 |
|
1756 // need a temporary reference on self because we may be removing the last mapping |
|
1757 // which will delete this... |
|
1758 Open(); |
|
1759 |
|
1760 // get DPageTable the mapping is attached to... |
|
1761 DPageTables* tables = iPageTables[aMapping->PteType()]; |
|
1762 __NK_ASSERT_DEBUG(tables); // must exist because aMapping has a reference on it |
|
1763 |
|
1764 // free permanent page tables if required... |
|
1765 if(aMapping->Flags()&DMemoryMapping::EPageTablesAllocated) |
|
1766 { |
|
1767 MemoryObjectLock::Lock(this); |
|
1768 tables->FreePermanentPageTables(); |
|
1769 MemoryObjectLock::Unlock(this); |
|
1770 } |
|
1771 |
|
1772 // remove mapping from page tables object... |
|
1773 tables->RemoveMapping((DCoarseMapping*)aMapping); |
|
1774 |
|
1775 Close(); // may delete this memory object |
|
1776 } |
|
1777 |
|
1778 |
|
1779 TInt DCoarseMemory::SetReadOnly() |
|
1780 { |
|
1781 __NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(this)); |
|
1782 |
|
1783 // Search for writable iPageTable entries. |
|
1784 // We hold the MemoryObjectLock so iPageTable entries can't be added or removed. |
|
1785 MmuLock::Lock(); |
|
1786 TUint pteType = 0; |
|
1787 do |
|
1788 { |
|
1789 if((pteType & EPteTypeWritable) && iPageTables[pteType]) |
|
1790 { |
|
1791 MmuLock::Unlock(); |
|
1792 return KErrInUse; |
|
1793 } |
|
1794 } |
|
1795 while(++pteType < ENumPteTypes); |
|
1796 MmuLock::Unlock(); |
|
1797 |
|
1798 // unmap pages from all fine mappings... |
|
1799 return DMemoryObject::SetReadOnly(); |
|
1800 } |
|
1801 |
|
1802 |
|
1803 // |
|
1804 // DFineMemory |
|
1805 // |
|
1806 |
|
1807 DFineMemory::DFineMemory(DMemoryManager* aManager, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags) |
|
1808 : DMemoryObject(aManager,0,aSizeInPages,aAttributes,aCreateFlags) |
|
1809 { |
|
1810 } |
|
1811 |
|
1812 |
|
1813 DFineMemory* DFineMemory::New(DMemoryManager* aManager, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags) |
|
1814 { |
|
1815 DFineMemory* self = new DFineMemory(aManager,aSizeInPages,aAttributes,aCreateFlags); |
|
1816 if(self) |
|
1817 { |
|
1818 if(self->Construct()==KErrNone) |
|
1819 return self; |
|
1820 self->Close(); |
|
1821 } |
|
1822 return 0; |
|
1823 } |
|
1824 |
|
1825 |
|
1826 DFineMemory::~DFineMemory() |
|
1827 { |
|
1828 TRACE2(("DFineMemory[0x%08x]::~DFineMemory",this)); |
|
1829 } |
|
1830 |
|
1831 |
|
1832 TInt DFineMemory::ClaimInitialPages(TLinAddr aBase, TUint aSize, TMappingPermissions aPermissions, TBool aAllowGaps, TBool aAllowNonRamPages) |
|
1833 { |
|
1834 TRACE(("DFineMemory[0x%08x]::ClaimInitialPages(0x%08x,0x%08x,0x%08x,%d,%d)",this,aBase,aSize,aPermissions,aAllowGaps,aAllowNonRamPages)); |
|
1835 (void)aPermissions; |
|
1836 |
|
1837 // validate arguments... |
|
1838 if(aBase&KPageMask || aBase<KGlobalMemoryBase) |
|
1839 return KErrArgument; |
|
1840 if(aSize&KPageMask || aSize>iSizeInPages*KPageSize) |
|
1841 return KErrArgument; |
|
1842 |
|
1843 #ifdef _DEBUG |
|
1844 // calculate 'blankPte', the correct PTE value for pages in this memory object... |
|
1845 TUint pteType = Mmu::PteType(aPermissions,true); |
|
1846 TPte blankPte = Mmu::BlankPte(Attributes(),pteType); |
|
1847 #endif |
|
1848 |
|
1849 // get page table... |
|
1850 TPde* pPde = Mmu::PageDirectoryEntry(KKernelOsAsid,aBase); |
|
1851 TPte* pPte = Mmu::PageTableFromPde(*pPde); |
|
1852 if(!pPte) |
|
1853 return KErrNone; // no pages mapped |
|
1854 |
|
1855 // check and allocate page array entries... |
|
1856 RPageArray::TIter pageIter; |
|
1857 TInt r = iPages.AddStart(0,aSize>>KPageShift,pageIter); |
|
1858 __NK_ASSERT_ALWAYS(r==KErrNone); |
|
1859 |
|
1860 // hold MmuLock for long time, shouldn't matter as this is only done during boot |
|
1861 MmuLock::Lock(); |
|
1862 |
|
1863 // setup page table for fine mappings... |
|
1864 SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pPte); |
|
1865 __NK_ASSERT_DEBUG(pti->CheckPageCount()); |
|
1866 TBool pageTableOk = pti->ClaimFine(aBase&~KChunkMask,KKernelOsAsid); |
|
1867 __NK_ASSERT_ALWAYS(pageTableOk); |
|
1868 TRACE(("DFineMemory::ClaimInitialPages page table = 0x%08x",pPte)); |
|
1869 |
|
1870 TUint pteIndex = (aBase>>KPageShift)&(KChunkMask>>KPageShift); |
|
1871 TUint pageIndex = 0; |
|
1872 TUint size = aSize; |
|
1873 while(pageIndex<iSizeInPages) |
|
1874 { |
|
1875 TPhysAddr pagePhys = Mmu::PtePhysAddr(pPte[pteIndex],pteIndex); |
|
1876 if(pagePhys==KPhysAddrInvalid) |
|
1877 { |
|
1878 if(size) |
|
1879 { |
|
1880 __NK_ASSERT_ALWAYS(aAllowGaps); // we have a gap, check this is allowed |
|
1881 pageIter.Skip(1); |
|
1882 } |
|
1883 |
|
1884 // check PTE is correct... |
|
1885 __NK_ASSERT_DEBUG(pPte[pteIndex]==KPteUnallocatedEntry); |
|
1886 } |
|
1887 else |
|
1888 { |
|
1889 __NK_ASSERT_ALWAYS(size); // pages can't be mapped above aSize |
|
1890 |
|
1891 pageIter.Add(1,&pagePhys); |
|
1892 |
|
1893 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pagePhys); |
|
1894 |
|
1895 if(!pi) |
|
1896 __NK_ASSERT_ALWAYS(aAllowNonRamPages); |
|
1897 else |
|
1898 { |
|
1899 __NK_ASSERT_ALWAYS(pi->Type()==SPageInfo::EFixed); |
|
1900 pi->SetManaged(this,pageIndex,PageInfoFlags()); |
|
1901 } |
|
1902 |
|
1903 #ifdef _DEBUG |
|
1904 // check PTE is correct... |
|
1905 TPte pte = pagePhys|blankPte; |
|
1906 __NK_ASSERT_DEBUG(((pPte[pteIndex]^pte)&~KPteMatchMask)==0); |
|
1907 #endif |
|
1908 } |
|
1909 |
|
1910 // move on to next page... |
|
1911 ++pteIndex; |
|
1912 __NK_ASSERT_ALWAYS(pteIndex<(KChunkSize>>KPageShift)); |
|
1913 ++pageIndex; |
|
1914 if(size) |
|
1915 size -= KPageSize; |
|
1916 } |
|
1917 |
|
1918 MmuLock::Unlock(); |
|
1919 |
|
1920 // release page array entries... |
|
1921 iPages.AddEnd(0,aSize>>KPageShift); |
|
1922 |
|
1923 return KErrNone; |
|
1924 } |
|
1925 |
|
1926 |
|
1927 |