author | Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com> |
Thu, 19 Aug 2010 11:14:22 +0300 | |
branch | RCL_3 |
changeset 42 | a179b74831c9 |
parent 41 | 0ffb4e86fcc9 |
permissions | -rw-r--r-- |
0 | 1 |
// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies). |
2 |
// All rights reserved. |
|
3 |
// This component and the accompanying materials are made available |
|
4 |
// under the terms of the License "Eclipse Public License v1.0" |
|
5 |
// which accompanies this distribution, and is available |
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 |
// |
|
8 |
// Initial Contributors: |
|
9 |
// Nokia Corporation - initial contribution. |
|
10 |
// |
|
11 |
// Contributors: |
|
12 |
// |
|
13 |
// Description: |
|
14 |
// |
|
15 |
||
16 |
#include <plat_priv.h> |
|
17 |
#include "mm.h" |
|
18 |
#include "mmu.h" |
|
19 |
||
20 |
#include "mobject.h" |
|
21 |
#include "mmapping.h" |
|
22 |
#include "mptalloc.h" |
|
23 |
#include "mmanager.h" |
|
24 |
#include "cache_maintenance.inl" |
|
25 |
||
26 |
const TUint KMaxMappingsInOneGo = KMaxPageInfoUpdatesInOneGo; // must be power-of-2 |
|
27 |
||
28 |
||
29 |
||
30 |
// |
|
31 |
// MemoryObjectLock |
|
32 |
// |
|
33 |
||
34 |
/** |
|
35 |
The mutex pool used to assign locks to memory objects. |
|
36 |
@see #MemoryObjectLock. |
|
37 |
*/ |
|
38 |
DMutexPool MemoryObjectMutexPool; |
|
39 |
||
40 |
void MemoryObjectLock::Lock(DMemoryObject* aMemory) |
|
41 |
{ |
|
42 |
TRACE2(("MemoryObjectLock::Lock(0x%08x) try",aMemory)); |
|
43 |
MemoryObjectMutexPool.Wait(aMemory->iLock); |
|
44 |
TRACE2(("MemoryObjectLock::Lock(0x%08x) acquired",aMemory)); |
|
45 |
} |
|
46 |
||
47 |
void MemoryObjectLock::Unlock(DMemoryObject* aMemory) |
|
48 |
{ |
|
49 |
TRACE2(("MemoryObjectLock::Unlock(0x%08x)",aMemory)); |
|
50 |
MemoryObjectMutexPool.Signal(aMemory->iLock); |
|
51 |
} |
|
52 |
||
53 |
TBool MemoryObjectLock::IsHeld(DMemoryObject* aMemory) |
|
54 |
{ |
|
55 |
return MemoryObjectMutexPool.IsHeld(aMemory->iLock); |
|
56 |
} |
|
57 |
||
58 |
||
59 |
||
60 |
// |
|
61 |
// DMemoryObject |
|
62 |
// |
|
63 |
||
64 |
DMemoryObject::DMemoryObject(DMemoryManager* aManager, TUint aFlags, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags) |
|
65 |
: iManager(aManager), iFlags(aFlags), iAttributes(Mmu::CanonicalMemoryAttributes(aAttributes)), |
|
66 |
iSizeInPages(aSizeInPages) |
|
67 |
{ |
|
68 |
__ASSERT_COMPILE(EMemoryAttributeMask<0x100); // make sure aAttributes fits into a TUint8 |
|
69 |
||
70 |
TMemoryType type = (TMemoryType)(aAttributes&EMemoryAttributeTypeMask); |
|
71 |
iRamAllocFlags = type; |
|
72 |
if(aCreateFlags&EMemoryCreateNoWipe) |
|
73 |
iRamAllocFlags |= Mmu::EAllocNoWipe; |
|
74 |
else if(aCreateFlags&EMemoryCreateUseCustomWipeByte) |
|
75 |
{ |
|
76 |
TUint8 wipeByte = (aCreateFlags>>EMemoryCreateWipeByteShift)&0xff; |
|
77 |
iRamAllocFlags |= wipeByte<<Mmu::EAllocWipeByteShift; |
|
78 |
iRamAllocFlags |= Mmu::EAllocUseCustomWipeByte; |
|
79 |
} |
|
80 |
||
81 |
if(aCreateFlags&EMemoryCreateDemandPaged) |
|
41
0ffb4e86fcc9
Revision: 201027
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
20
diff
changeset
|
82 |
{ |
0 | 83 |
iFlags |= EDemandPaged; |
41
0ffb4e86fcc9
Revision: 201027
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
20
diff
changeset
|
84 |
iRamAllocFlags |= Mmu::EAllocNoPagerReclaim; |
0ffb4e86fcc9
Revision: 201027
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
20
diff
changeset
|
85 |
} |
0 | 86 |
if(aCreateFlags&EMemoryCreateReserveAllResources) |
87 |
iFlags |= EReserveResources; |
|
88 |
if(aCreateFlags&EMemoryCreateDisallowPinning) |
|
89 |
iFlags |= EDenyPinning; |
|
90 |
if(aCreateFlags&EMemoryCreateReadOnly) |
|
91 |
iFlags |= EDenyWriteMappings; |
|
92 |
if(!(aCreateFlags&EMemoryCreateAllowExecution)) |
|
93 |
iFlags |= EDenyExecuteMappings; |
|
94 |
} |
|
95 |
||
96 |
||
97 |
TInt DMemoryObject::Construct() |
|
98 |
{ |
|
99 |
TBool preAllocateMemory = iFlags&(EReserveResources|EDemandPaged); |
|
100 |
TInt r = iPages.Construct(iSizeInPages,preAllocateMemory); |
|
101 |
return r; |
|
102 |
} |
|
103 |
||
104 |
||
105 |
DMemoryObject::~DMemoryObject() |
|
106 |
{ |
|
107 |
TRACE(("DMemoryObject[0x%08x]::~DMemoryObject()",this)); |
|
108 |
__NK_ASSERT_DEBUG(iMappings.IsEmpty()); |
|
109 |
} |
|
110 |
||
111 |
||
112 |
TBool DMemoryObject::CheckRegion(TUint aIndex, TUint aCount) |
|
113 |
{ |
|
114 |
TUint end = aIndex+aCount; |
|
115 |
return end>=aIndex && end<=iSizeInPages; |
|
116 |
} |
|
117 |
||
118 |
||
119 |
void DMemoryObject::ClipRegion(TUint& aIndex, TUint& aCount) |
|
120 |
{ |
|
121 |
TUint end = aIndex+aCount; |
|
122 |
if(end<aIndex) // overflow? |
|
123 |
end = ~0u; |
|
124 |
if(end>iSizeInPages) |
|
125 |
end = iSizeInPages; |
|
126 |
if(aIndex>=end) |
|
127 |
aIndex = end; |
|
128 |
aCount = end-aIndex; |
|
129 |
} |
|
130 |
||
131 |
||
132 |
void DMemoryObject::SetLock(DMutex* aLock) |
|
133 |
{ |
|
134 |
__NK_ASSERT_DEBUG(!iLock); |
|
135 |
iLock = aLock; |
|
136 |
TRACE(("MemoryObject[0x%08x]::SetLock(0x%08x) \"%O\"",this,aLock,aLock)); |
|
137 |
} |
|
138 |
||
139 |
||
140 |
DMemoryMapping* DMemoryObject::CreateMapping(TUint, TUint) |
|
141 |
{ |
|
142 |
return new DFineMapping(); |
|
143 |
} |
|
144 |
||
145 |
||
146 |
TInt DMemoryObject::MapPages(RPageArray::TIter aPages) |
|
147 |
{ |
|
148 |
TRACE2(("DMemoryObject[0x%08x]::MapPages(?) index=0x%x count=0x%x",this,aPages.Index(),aPages.Count())); |
|
149 |
||
150 |
TUint offset = aPages.Index(); |
|
151 |
TUint offsetEnd = aPages.IndexEnd(); |
|
152 |
TInt r = KErrNone; |
|
153 |
||
154 |
iMappings.Lock(); |
|
155 |
TMappingListIter iter; |
|
156 |
DMemoryMappingBase* mapping = iter.Start(iMappings); |
|
157 |
while(mapping) |
|
158 |
{ |
|
159 |
if(mapping->IsPinned()) |
|
160 |
{ |
|
161 |
// pinned mappings don't change, so nothing to do... |
|
162 |
iMappings.Unlock(); |
|
163 |
} |
|
164 |
else |
|
165 |
{ |
|
166 |
// get region where pages overlap the mapping... |
|
167 |
TUint start = mapping->iStartIndex; |
|
168 |
TUint end = start+mapping->iSizeInPages; |
|
169 |
if(start<offset) |
|
170 |
start = offset; |
|
171 |
if(end>offsetEnd) |
|
172 |
end = offsetEnd; |
|
173 |
if(start>=end) |
|
174 |
{ |
|
175 |
// the mapping doesn't contain the pages... |
|
176 |
iMappings.Unlock(); |
|
177 |
} |
|
178 |
else |
|
179 |
{ |
|
180 |
// map pages in the mapping... |
|
181 |
mapping->Open(); |
|
182 |
TUint mapInstanceCount = mapping->MapInstanceCount(); |
|
183 |
iMappings.Unlock(); |
|
184 |
r = mapping->MapPages(aPages.Slice(start,end),mapInstanceCount); |
|
185 |
mapping->AsyncClose(); |
|
186 |
if(r!=KErrNone) |
|
187 |
{ |
|
188 |
iMappings.Lock(); |
|
189 |
break; |
|
190 |
} |
|
191 |
} |
|
192 |
} |
|
193 |
iMappings.Lock(); |
|
194 |
mapping = iter.Next(); |
|
195 |
} |
|
196 |
iter.Finish(); |
|
197 |
iMappings.Unlock(); |
|
198 |
||
199 |
return r; |
|
200 |
} |
|
201 |
||
202 |
||
203 |
void DMemoryObject::RemapPage(TPhysAddr& aPageArray, TUint aIndex, TBool aInvalidateTLB) |
|
204 |
{ |
|
205 |
TRACE2(("DMemoryObject[0x%08x]::RemapPage(0x%x,%d,%d)",this,aPageArray,aIndex,aInvalidateTLB)); |
|
206 |
||
207 |
iMappings.RemapPage(aPageArray, aIndex, aInvalidateTLB); |
|
208 |
||
209 |
#ifdef COARSE_GRAINED_TLB_MAINTENANCE |
|
210 |
if (aInvalidateTLB) |
|
211 |
InvalidateTLB(); |
|
212 |
#endif |
|
213 |
} |
|
214 |
||
215 |
||
216 |
void DMemoryObject::UnmapPages(RPageArray::TIter aPages, TBool aDecommitting) |
|
217 |
{ |
|
218 |
TRACE2(("DMemoryObject[0x%08x]::UnmapPages(?,%d) index=0x%x count=0x%x",this,(bool)aDecommitting,aPages.Index(),aPages.Count())); |
|
219 |
||
220 |
TUint offset = aPages.Index(); |
|
221 |
TUint offsetEnd = aPages.IndexEnd(); |
|
222 |
if(offset==offsetEnd) |
|
223 |
return; |
|
224 |
||
225 |
iMappings.Lock(); |
|
226 |
TMappingListIter iter; |
|
227 |
DMemoryMappingBase* mapping = iter.Start(iMappings); |
|
228 |
while(mapping) |
|
229 |
{ |
|
230 |
// get region where pages overlap the mapping... |
|
231 |
TUint start = mapping->iStartIndex; |
|
232 |
TUint end = start+mapping->iSizeInPages; |
|
233 |
if(start<offset) |
|
234 |
start = offset; |
|
235 |
if(end>offsetEnd) |
|
236 |
end = offsetEnd; |
|
237 |
if(start>=end) |
|
238 |
{ |
|
239 |
// the mapping doesn't contain the pages... |
|
240 |
iMappings.Unlock(); |
|
241 |
} |
|
242 |
else |
|
243 |
{ |
|
244 |
RPageArray::TIter pages = aPages.Slice(start,end); |
|
245 |
if(mapping->IsPinned()) |
|
246 |
{ |
|
247 |
// pinned mappings veto page unmapping... |
|
248 |
if(aDecommitting) |
|
249 |
__e32_atomic_ior_ord8(&mapping->Flags(), (TUint8)DMemoryMapping::EPageUnmapVetoed); |
|
250 |
iMappings.Unlock(); |
|
251 |
TRACE2(("DFineMemoryMapping[0x%08x] veto UnmapPages, index=0x%x count=0x%x",mapping,pages.Index(),pages.Count())); |
|
252 |
pages.VetoUnmap(); |
|
253 |
} |
|
254 |
else |
|
255 |
{ |
|
256 |
// unmap pages in the mapping... |
|
257 |
mapping->Open(); |
|
258 |
TUint mapInstanceCount = mapping->MapInstanceCount(); |
|
259 |
iMappings.Unlock(); |
|
260 |
mapping->UnmapPages(pages,mapInstanceCount); |
|
261 |
mapping->AsyncClose(); |
|
262 |
} |
|
263 |
} |
|
264 |
iMappings.Lock(); |
|
265 |
mapping = iter.Next(); |
|
266 |
} |
|
267 |
iter.Finish(); |
|
268 |
iMappings.Unlock(); |
|
269 |
||
270 |
#ifdef COARSE_GRAINED_TLB_MAINTENANCE |
|
271 |
InvalidateTLB(); |
|
272 |
#endif |
|
273 |
} |
|
274 |
||
275 |
||
276 |
void DMemoryObject::RestrictPages(RPageArray::TIter aPages, TRestrictPagesType aRestriction) |
|
277 |
{ |
|
278 |
TRACE2(("DMemoryObject[0x%08x]::RestrictPages(?,%d) index=0x%x count=0x%x",this,aRestriction,aPages.Index(),aPages.Count())); |
|
279 |
||
280 |
TUint offset = aPages.Index(); |
|
281 |
TUint offsetEnd = aPages.IndexEnd(); |
|
282 |
if(offset==offsetEnd) |
|
283 |
return; |
|
284 |
||
285 |
iMappings.Lock(); |
|
286 |
TMappingListIter iter; |
|
287 |
DMemoryMappingBase* mapping = iter.Start(iMappings); |
|
288 |
while(mapping) |
|
289 |
{ |
|
290 |
// get region where pages overlap the mapping... |
|
291 |
TUint start = mapping->iStartIndex; |
|
292 |
TUint end = start+mapping->iSizeInPages; |
|
293 |
if(start<offset) |
|
294 |
start = offset; |
|
295 |
if(end>offsetEnd) |
|
296 |
end = offsetEnd; |
|
297 |
if(start>=end) |
|
298 |
{ |
|
299 |
// the mapping doesn't contain the pages... |
|
300 |
iMappings.Unlock(); |
|
301 |
} |
|
302 |
else |
|
303 |
{ |
|
304 |
RPageArray::TIter pages = aPages.Slice(start,end); |
|
305 |
if(mapping->IsPhysicalPinning() || |
|
306 |
(!(aRestriction & ERestrictPagesForMovingFlag) && mapping->IsPinned())) |
|
307 |
{ |
|
308 |
// Pinned mappings veto page restrictions except for page moving |
|
309 |
// where only physically pinned mappings block page moving. |
|
310 |
iMappings.Unlock(); |
|
311 |
TRACE2(("DFineMemoryMapping[0x%08x] veto RestrictPages, index=0x%x count=0x%x",mapping,pages.Index(),pages.Count())); |
|
312 |
pages.VetoRestrict(aRestriction & ERestrictPagesForMovingFlag); |
|
313 |
// Mappings lock required for iter.Finish() as iter will be removed from the mappings list. |
|
314 |
iMappings.Lock(); |
|
315 |
break; |
|
316 |
} |
|
317 |
else |
|
318 |
{ |
|
319 |
// pages not pinned so do they need restricting... |
|
320 |
if(aRestriction == ERestrictPagesForMovingFlag) |
|
321 |
{ |
|
322 |
// nothing to do when just checking for pinned mappings for |
|
323 |
// page moving purposes and not restricting to NA. |
|
324 |
iMappings.Unlock(); |
|
325 |
} |
|
326 |
else |
|
327 |
{ |
|
328 |
// restrict pages in the mapping... |
|
329 |
mapping->Open(); |
|
330 |
TUint mapInstanceCount = mapping->MapInstanceCount(); |
|
331 |
iMappings.Unlock(); |
|
332 |
mapping->RestrictPagesNA(pages, mapInstanceCount); |
|
333 |
mapping->AsyncClose(); |
|
334 |
} |
|
335 |
} |
|
336 |
} |
|
337 |
iMappings.Lock(); |
|
338 |
mapping = iter.Next(); |
|
339 |
} |
|
340 |
||
341 |
if(aRestriction & ERestrictPagesForMovingFlag) |
|
342 |
{// Clear the mappings addded flag so page moving can detect whether any |
|
343 |
// new mappings have been added |
|
344 |
ClearMappingAddedFlag(); |
|
345 |
} |
|
346 |
||
347 |
iter.Finish(); |
|
348 |
iMappings.Unlock(); |
|
349 |
||
350 |
#ifdef COARSE_GRAINED_TLB_MAINTENANCE |
|
351 |
// Writable memory objects will have been restricted no access so invalidate TLB. |
|
352 |
if (aRestriction != ERestrictPagesForMovingFlag) |
|
353 |
InvalidateTLB(); |
|
354 |
#endif |
|
355 |
} |
|
356 |
||
357 |
||
358 |
TInt DMemoryObject::CheckNewMapping(DMemoryMappingBase* aMapping) |
|
359 |
{ |
|
360 |
if(iFlags&EDenyPinning && aMapping->IsPinned()) |
|
361 |
return KErrAccessDenied; |
|
362 |
if(iFlags&EDenyMappings) |
|
363 |
return KErrAccessDenied; |
|
364 |
if(iFlags&EDenyWriteMappings && !aMapping->IsReadOnly()) |
|
365 |
return KErrAccessDenied; |
|
366 |
#ifdef MMU_SUPPORTS_EXECUTE_NEVER |
|
367 |
if((iFlags&EDenyExecuteMappings) && aMapping->IsExecutable()) |
|
368 |
return KErrAccessDenied; |
|
369 |
#endif |
|
370 |
return KErrNone; |
|
371 |
} |
|
372 |
||
373 |
||
374 |
TInt DMemoryObject::AddMapping(DMemoryMappingBase* aMapping) |
|
375 |
{ |
|
376 |
__NK_ASSERT_DEBUG(!aMapping->IsCoarse()); |
|
377 |
||
378 |
// check mapping allowed... |
|
379 |
MmuLock::Lock(); |
|
380 |
iMappings.Lock(); |
|
381 |
||
382 |
TInt r = CheckNewMapping(aMapping); |
|
383 |
if(r == KErrNone) |
|
384 |
{ |
|
385 |
Open(); |
|
386 |
aMapping->LinkToMemory(this, iMappings); |
|
387 |
} |
|
388 |
||
389 |
iMappings.Unlock(); |
|
390 |
MmuLock::Unlock(); |
|
391 |
||
392 |
TRACE(("DMemoryObject[0x%08x]::AddMapping(0x%08x) returns %d", this, aMapping, r)); |
|
393 |
||
394 |
return r; |
|
395 |
} |
|
396 |
||
397 |
||
398 |
void DMemoryObject::RemoveMapping(DMemoryMappingBase* aMapping) |
|
399 |
{ |
|
400 |
aMapping->UnlinkFromMemory(iMappings); |
|
401 |
Close(); |
|
402 |
} |
|
403 |
||
404 |
||
405 |
TInt DMemoryObject::SetReadOnly() |
|
406 |
{ |
|
407 |
TRACE(("DMemoryObject[0x%08x]::SetReadOnly()",this)); |
|
408 |
__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(this)); |
|
409 |
||
410 |
TInt r = KErrNone; |
|
411 |
iMappings.Lock(); |
|
412 |
if (iFlags & EDenyWriteMappings) |
|
413 |
{// The object is already read only. |
|
414 |
iMappings.Unlock(); |
|
415 |
return KErrNone; |
|
416 |
} |
|
417 |
||
418 |
TMappingListIter iter; |
|
419 |
DMemoryMappingBase* mapping = iter.Start(iMappings); |
|
420 |
while(mapping) |
|
421 |
{ |
|
422 |
if (!mapping->IsReadOnly()) |
|
423 |
{ |
|
424 |
r = KErrInUse; |
|
425 |
goto exit; |
|
426 |
} |
|
427 |
// This will flash iMappings.Lock to stop it being held too long. |
|
428 |
// This is safe as new mappings will be added to the end of the list so we |
|
429 |
// won't miss them. |
|
430 |
mapping = iter.Next(); |
|
431 |
} |
|
432 |
// Block any writable mapping from being added to this memory object. |
|
433 |
// Use atomic operation as iMappings.Lock protects EDenyWriteMappings |
|
434 |
// but not the whole word. |
|
435 |
__e32_atomic_ior_ord8(&iFlags, (TUint8)EDenyWriteMappings); |
|
436 |
||
437 |
exit: |
|
438 |
iter.Finish(); |
|
439 |
iMappings.Unlock(); |
|
440 |
return r; |
|
441 |
} |
|
442 |
||
443 |
||
444 |
void DMemoryObject::DenyMappings() |
|
445 |
{ |
|
446 |
TRACE(("DMemoryObject[0x%08x]::LockMappings()",this)); |
|
447 |
MmuLock::Lock(); |
|
448 |
// Use atomic operation as MmuLock protects EDenyMappings |
|
449 |
// but not the whole word. |
|
450 |
__e32_atomic_ior_ord8(&iFlags, (TUint8)EDenyMappings); |
|
451 |
MmuLock::Unlock(); |
|
452 |
} |
|
453 |
||
454 |
||
455 |
TInt DMemoryObject::PhysAddr(TUint aIndex, TUint aCount, TPhysAddr& aPhysicalAddress, TPhysAddr* aPhysicalPageList) |
|
456 |
{ |
|
457 |
TRACE2(("DMemoryObject[0x%08x]::PhysAddr(0x%x,0x%x,?,?)",this,aIndex,aCount)); |
|
458 |
TInt r = iPages.PhysAddr(aIndex,aCount,aPhysicalAddress,aPhysicalPageList); |
|
459 |
TRACE2(("DMemoryObject[0x%08x]::PhysAddr(0x%x,0x%x,?,?) returns %d aPhysicalAddress=0x%08x",this,aIndex,aCount,r,aPhysicalAddress)); |
|
460 |
return r; |
|
461 |
} |
|
462 |
||
463 |
||
464 |
void DMemoryObject::BTraceCreate() |
|
465 |
{ |
|
466 |
BTraceContext8(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectCreate,this,iSizeInPages); |
|
467 |
} |
|
468 |
||
469 |
||
470 |
TUint DMemoryObject::PagingManagerData(TUint aIndex) |
|
471 |
{ |
|
472 |
TRACE2(("DMemoryObject[0x%08x]::PagingManagerData(0x%x)",this,aIndex)); |
|
473 |
__NK_ASSERT_DEBUG(IsDemandPaged()); |
|
474 |
TUint value = iPages.PagingManagerData(aIndex); |
|
475 |
TRACE2(("DMemoryObject[0x%08x]::PagingManagerData(0x%x) returns 0x%x",this,aIndex,value)); |
|
476 |
return value; |
|
477 |
} |
|
478 |
||
479 |
||
480 |
void DMemoryObject::SetPagingManagerData(TUint aIndex, TUint aValue) |
|
481 |
{ |
|
482 |
TRACE(("DMemoryObject[0x%08x]::SetPagingManagerData(0x%x,0x%08x)",this,aIndex,aValue)); |
|
483 |
__NK_ASSERT_DEBUG(IsDemandPaged()); |
|
484 |
iPages.SetPagingManagerData(aIndex, aValue); |
|
485 |
__NK_ASSERT_DEBUG(iPages.PagingManagerData(aIndex)==aValue); |
|
486 |
} |
|
487 |
||
488 |
||
489 |
||
490 |
// |
|
491 |
// DCoarseMemory::DPageTables |
|
492 |
// |
|
493 |
||
494 |
DCoarseMemory::DPageTables::DPageTables(DCoarseMemory* aMemory, TInt aNumPts, TUint aPteType) |
|
495 |
: iMemory(aMemory), iPteType(aPteType), iPermanenceCount(0), iNumPageTables(aNumPts) |
|
496 |
{ |
|
497 |
aMemory->Open(); |
|
498 |
iBlankPte = Mmu::BlankPte(aMemory->Attributes(),aPteType); |
|
499 |
} |
|
500 |
||
501 |
||
502 |
DCoarseMemory::DPageTables* DCoarseMemory::DPageTables::New(DCoarseMemory* aMemory, TUint aNumPages, TUint aPteType) |
|
503 |
{ |
|
504 |
TRACE2(("DCoarseMemory::DPageTables::New(0x%08x,0x%x,0x%08x)",aMemory, aNumPages, aPteType)); |
|
505 |
__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory)); |
|
506 |
__NK_ASSERT_DEBUG((aNumPages&(KChunkMask>>KPageShift))==0); |
|
507 |
TUint numPts = aNumPages>>(KChunkShift-KPageShift); |
|
508 |
DPageTables* self = (DPageTables*)Kern::AllocZ(sizeof(DPageTables)+(numPts-1)*sizeof(TPte*)); |
|
509 |
if(self) |
|
510 |
{ |
|
511 |
new (self) DPageTables(aMemory,numPts,aPteType); |
|
19
4a8fed1c0ef6
Revision: 201007
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
6
diff
changeset
|
512 |
// Add this page tables object to the memory object before we update any |
4a8fed1c0ef6
Revision: 201007
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
6
diff
changeset
|
513 |
// page table entries. To ensure that if any of aMemory's pages with |
4a8fed1c0ef6
Revision: 201007
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
6
diff
changeset
|
514 |
// corresponding page table entries in self are moved during Construct(), |
4a8fed1c0ef6
Revision: 201007
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
6
diff
changeset
|
515 |
// DCoarseMemory::RemapPage() will be able to find the page table entries |
4a8fed1c0ef6
Revision: 201007
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
6
diff
changeset
|
516 |
// to update via iPageTables. |
4a8fed1c0ef6
Revision: 201007
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
6
diff
changeset
|
517 |
__NK_ASSERT_DEBUG(!aMemory->iPageTables[aPteType]); |
4a8fed1c0ef6
Revision: 201007
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
6
diff
changeset
|
518 |
aMemory->iPageTables[aPteType] = self; |
0 | 519 |
TInt r = self->Construct(); |
520 |
if(r!=KErrNone) |
|
521 |
{ |
|
19
4a8fed1c0ef6
Revision: 201007
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
6
diff
changeset
|
522 |
aMemory->iPageTables[aPteType] = 0; |
0 | 523 |
self->Close(); |
524 |
self = 0; |
|
525 |
} |
|
526 |
} |
|
527 |
TRACE2(("DCoarseMemory::DPageTables::New(0x%08x,0x%x,0x%08x) returns 0x%08x",aMemory, aNumPages, aPteType, self)); |
|
528 |
return self; |
|
529 |
} |
|
530 |
||
531 |
||
532 |
TInt DCoarseMemory::DPageTables::Construct() |
|
533 |
{ |
|
534 |
if(iMemory->IsDemandPaged()) |
|
535 |
{ |
|
536 |
// do nothing, allow pages to be mapped on demand... |
|
537 |
return KErrNone; |
|
538 |
} |
|
539 |
||
540 |
RPageArray::TIter pageIter; |
|
541 |
iMemory->iPages.FindStart(0,iMemory->iSizeInPages,pageIter); |
|
542 |
||
543 |
// map pages... |
|
544 |
TInt r = KErrNone; |
|
545 |
for(;;) |
|
546 |
{ |
|
547 |
// find some pages... |
|
548 |
RPageArray::TIter pageList; |
|
549 |
TUint n = pageIter.Find(pageList); |
|
550 |
if(!n) |
|
551 |
break; // done |
|
552 |
||
553 |
// map some pages... |
|
554 |
r = MapPages(pageList); |
|
555 |
||
556 |
// done with pages... |
|
557 |
pageIter.FindRelease(n); |
|
558 |
||
559 |
if(r!=KErrNone) |
|
560 |
break; |
|
561 |
} |
|
562 |
||
563 |
iMemory->iPages.FindEnd(0,iMemory->iSizeInPages); |
|
564 |
||
565 |
return r; |
|
566 |
} |
|
567 |
||
568 |
||
569 |
void DCoarseMemory::DPageTables::Close() |
|
570 |
{ |
|
571 |
__NK_ASSERT_DEBUG(CheckCloseIsSafe()); |
|
572 |
MmuLock::Lock(); |
|
573 |
if (__e32_atomic_tas_ord32(&iReferenceCount, 1, -1, 0) != 1) |
|
574 |
{ |
|
575 |
MmuLock::Unlock(); |
|
576 |
return; |
|
577 |
} |
|
578 |
DCoarseMemory* memory = iMemory; |
|
579 |
if(memory) |
|
580 |
{ |
|
581 |
iMemory->iPageTables[iPteType] = 0; |
|
582 |
iMemory = 0; |
|
583 |
} |
|
584 |
MmuLock::Unlock(); |
|
585 |
if(memory) |
|
586 |
memory->Close(); |
|
587 |
delete this; |
|
588 |
} |
|
589 |
||
590 |
||
591 |
void DCoarseMemory::DPageTables::AsyncClose() |
|
592 |
{ |
|
20
597aaf25e343
Revision: 201008
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
19
diff
changeset
|
593 |
__ASSERT_CRITICAL |
597aaf25e343
Revision: 201008
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
19
diff
changeset
|
594 |
#ifdef _DEBUG |
597aaf25e343
Revision: 201008
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
19
diff
changeset
|
595 |
NFastMutex* fm = NKern::HeldFastMutex(); |
597aaf25e343
Revision: 201008
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
19
diff
changeset
|
596 |
if(fm) |
597aaf25e343
Revision: 201008
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
19
diff
changeset
|
597 |
{ |
597aaf25e343
Revision: 201008
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
19
diff
changeset
|
598 |
Kern::Printf("DCoarseMemory::DPageTables::[0x%08x]::AsyncClose() fast mutex violation %M",this,fm); |
597aaf25e343
Revision: 201008
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
19
diff
changeset
|
599 |
__NK_ASSERT_DEBUG(0); |
597aaf25e343
Revision: 201008
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
19
diff
changeset
|
600 |
} |
597aaf25e343
Revision: 201008
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
19
diff
changeset
|
601 |
#endif |
597aaf25e343
Revision: 201008
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
19
diff
changeset
|
602 |
|
0 | 603 |
MmuLock::Lock(); |
604 |
if (__e32_atomic_tas_ord32(&iReferenceCount, 1, -1, 0) != 1) |
|
605 |
{ |
|
606 |
MmuLock::Unlock(); |
|
607 |
return; |
|
608 |
} |
|
609 |
DCoarseMemory* memory = iMemory; |
|
610 |
if(memory) |
|
611 |
{ |
|
612 |
iMemory->iPageTables[iPteType] = 0; |
|
613 |
iMemory = 0; |
|
614 |
} |
|
615 |
MmuLock::Unlock(); |
|
616 |
if(memory) |
|
617 |
memory->AsyncClose(); |
|
618 |
AsyncDelete(); |
|
619 |
} |
|
620 |
||
621 |
||
622 |
DCoarseMemory::DPageTables::~DPageTables() |
|
623 |
{ |
|
624 |
TRACE2(("DCoarseMemory::DPageTables[0x%08x]::~DPageTables()",this)); |
|
625 |
__NK_ASSERT_DEBUG(!iMemory); |
|
626 |
__NK_ASSERT_DEBUG(iMappings.IsEmpty()); |
|
627 |
TUint i=0; |
|
628 |
while(i<iNumPageTables) |
|
629 |
{ |
|
630 |
TPte* pt = iTables[i]; |
|
631 |
if(pt) |
|
632 |
{ |
|
633 |
iTables[i] = 0; |
|
634 |
::PageTables.Lock(); |
|
635 |
::PageTables.Free(pt); |
|
636 |
::PageTables.Unlock(); |
|
637 |
} |
|
638 |
++i; |
|
639 |
} |
|
640 |
} |
|
641 |
||
642 |
||
643 |
TPte* DCoarseMemory::DPageTables::GetOrAllocatePageTable(TUint aChunkIndex) |
|
644 |
{ |
|
645 |
__NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
646 |
||
647 |
// get page table... |
|
648 |
TPte* pt = GetPageTable(aChunkIndex); |
|
649 |
if(!pt) |
|
650 |
pt = AllocatePageTable(aChunkIndex, iMemory->IsDemandPaged()); |
|
651 |
||
652 |
return pt; |
|
653 |
} |
|
654 |
||
655 |
||
656 |
TPte* DCoarseMemory::DPageTables::GetOrAllocatePageTable(TUint aChunkIndex, TPinArgs& aPinArgs) |
|
657 |
{ |
|
658 |
__NK_ASSERT_DEBUG(aPinArgs.iPinnedPageTables); |
|
659 |
||
660 |
if(!aPinArgs.HaveSufficientPages(KNumPagesToPinOnePageTable)) |
|
661 |
return 0; |
|
662 |
||
663 |
TPte* pinnedPt = 0; |
|
664 |
for(;;) |
|
665 |
{ |
|
666 |
TPte* pt = GetOrAllocatePageTable(aChunkIndex); |
|
667 |
||
668 |
if(pinnedPt && pinnedPt!=pt) |
|
669 |
{ |
|
670 |
// previously pinned page table not needed... |
|
6
0173bcd7697c
Revision: 201001
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
671 |
::PageTables.UnpinPageTable(pinnedPt,aPinArgs); |
0 | 672 |
|
673 |
// make sure we have memory for next pin attempt... |
|
674 |
MmuLock::Unlock(); |
|
675 |
aPinArgs.AllocReplacementPages(KNumPagesToPinOnePageTable); |
|
676 |
if(!aPinArgs.HaveSufficientPages(KNumPagesToPinOnePageTable)) // if out of memory... |
|
677 |
{ |
|
678 |
// make sure we free any unneeded page table we allocated... |
|
679 |
if(pt) |
|
680 |
FreePageTable(aChunkIndex); |
|
6
0173bcd7697c
Revision: 201001
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
681 |
MmuLock::Lock(); |
0 | 682 |
return 0; |
683 |
} |
|
6
0173bcd7697c
Revision: 201001
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
684 |
MmuLock::Lock(); |
0 | 685 |
} |
686 |
||
687 |
if(!pt) |
|
688 |
return 0; // out of memory |
|
689 |
||
690 |
if(pt==pinnedPt) |
|
691 |
{ |
|
692 |
// we got a page table and it was pinned... |
|
693 |
*aPinArgs.iPinnedPageTables++ = pt; |
|
694 |
++aPinArgs.iNumPinnedPageTables; |
|
695 |
return pt; |
|
696 |
} |
|
697 |
||
698 |
// don't pin page table if it's not paged (e.g. unpaged part of ROM)... |
|
699 |
SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt); |
|
700 |
if(!pti->IsDemandPaged()) |
|
701 |
return pt; |
|
702 |
||
703 |
// pin the page table... |
|
6
0173bcd7697c
Revision: 201001
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
704 |
if (::PageTables.PinPageTable(pt,aPinArgs) != KErrNone) |
0173bcd7697c
Revision: 201001
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
705 |
{ |
0173bcd7697c
Revision: 201001
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
706 |
// Couldn't pin the page table... |
0173bcd7697c
Revision: 201001
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
707 |
MmuLock::Unlock(); |
0173bcd7697c
Revision: 201001
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
708 |
// make sure we free any unneeded page table we allocated... |
0173bcd7697c
Revision: 201001
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
709 |
FreePageTable(aChunkIndex); |
0173bcd7697c
Revision: 201001
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
710 |
MmuLock::Lock(); |
0173bcd7697c
Revision: 201001
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
711 |
return 0; |
0173bcd7697c
Revision: 201001
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
4
diff
changeset
|
712 |
} |
0 | 713 |
pinnedPt = pt; |
714 |
} |
|
715 |
} |
|
716 |
||
717 |
||
718 |
TPte* DCoarseMemory::DPageTables::AllocatePageTable(TUint aChunkIndex, TBool aDemandPaged, TBool aPermanent) |
|
719 |
{ |
|
720 |
TRACE2(("DCoarseMemory::DPageTables[0x%08x]::AllocatePageTable(0x%08x,%d,%d)",this,aChunkIndex,aDemandPaged,aPermanent)); |
|
721 |
||
722 |
TPte* pt; |
|
723 |
do |
|
724 |
{ |
|
725 |
// acquire page table lock... |
|
726 |
MmuLock::Unlock(); |
|
727 |
::PageTables.Lock(); |
|
728 |
||
729 |
// see if we still need to allocate a page table... |
|
730 |
pt = iTables[aChunkIndex]; |
|
731 |
if(!pt) |
|
732 |
{ |
|
733 |
// allocate page table... |
|
734 |
pt = ::PageTables.Alloc(aDemandPaged); |
|
735 |
if(!pt) |
|
736 |
{ |
|
737 |
// out of memory... |
|
738 |
::PageTables.Unlock(); |
|
739 |
MmuLock::Lock(); |
|
740 |
return 0; |
|
741 |
} |
|
742 |
AssignPageTable(aChunkIndex,pt); |
|
743 |
} |
|
744 |
||
745 |
// release page table lock... |
|
746 |
::PageTables.Unlock(); |
|
747 |
MmuLock::Lock(); |
|
748 |
||
749 |
// check again... |
|
750 |
pt = iTables[aChunkIndex]; |
|
751 |
} |
|
752 |
while(!pt); |
|
753 |
||
754 |
// we have a page table... |
|
755 |
if(aPermanent) |
|
756 |
{ |
|
757 |
__NK_ASSERT_ALWAYS(!aDemandPaged); |
|
758 |
SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt); |
|
759 |
pti->IncPermanenceCount(); |
|
760 |
} |
|
761 |
return pt; |
|
762 |
} |
|
763 |
||
764 |
||
765 |
void DCoarseMemory::DPageTables::AssignPageTable(TUint aChunkIndex, TPte* aPageTable) |
|
766 |
{ |
|
767 |
__NK_ASSERT_DEBUG(PageTablesLockIsHeld()); |
|
768 |
||
769 |
MmuLock::Lock(); |
|
770 |
||
771 |
// get physical address of page table now, this can't change whilst we have the page table allocator mutex... |
|
772 |
TPhysAddr ptPhys = Mmu::PageTablePhysAddr(aPageTable); |
|
773 |
||
774 |
// update mappings with new page table... |
|
775 |
TUint offset = aChunkIndex<<(KChunkShift-KPageShift); |
|
776 |
iMappings.Lock(); |
|
777 |
TMappingListIter iter; |
|
778 |
DMemoryMapping* mapping = (DMemoryMapping*)iter.Start(iMappings); |
|
779 |
TUint flash = 0; |
|
780 |
while(mapping) |
|
781 |
{ |
|
782 |
TUint size = mapping->iSizeInPages; |
|
783 |
TUint start = offset-mapping->iStartIndex; |
|
784 |
if(start<size && !mapping->BeingDetached()) |
|
785 |
{ |
|
786 |
// page table is used by this mapping, so set PDE... |
|
787 |
TLinAddr linAddrAndOsAsid = mapping->LinAddrAndOsAsid()+start*KPageSize; |
|
788 |
TPde* pPde = Mmu::PageDirectoryEntry(linAddrAndOsAsid&KPageMask,linAddrAndOsAsid); |
|
789 |
TPde pde = ptPhys|mapping->BlankPde(); |
|
4
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
790 |
#ifdef __USER_MEMORY_GUARDS_ENABLED__ |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
791 |
if (mapping->IsUserMapping()) |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
792 |
pde = PDE_IN_DOMAIN(pde, USER_MEMORY_DOMAIN); |
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
793 |
#endif |
0 | 794 |
TRACE2(("!PDE %x=%x",pPde,pde)); |
795 |
__NK_ASSERT_DEBUG(((*pPde^pde)&~KPdeMatchMask)==0 || *pPde==KPdeUnallocatedEntry); |
|
796 |
*pPde = pde; |
|
797 |
SinglePdeUpdated(pPde); |
|
798 |
||
799 |
++flash; // increase flash rate because we've done quite a bit more work |
|
800 |
} |
|
801 |
iMappings.Unlock(); |
|
802 |
MmuLock::Flash(flash,KMaxMappingsInOneGo); |
|
803 |
iMappings.Lock(); |
|
804 |
mapping = (DMemoryMapping*)iter.Next(); |
|
805 |
} |
|
806 |
iter.Finish(); |
|
807 |
iMappings.Unlock(); |
|
808 |
||
809 |
// next, assign page table to us... |
|
810 |
// NOTE: Must happen before MmuLock is released after reaching the end of the mapping list |
|
811 |
// otherwise it would be possible for a new mapping to be added and mapped before we manage |
|
812 |
// to update iTables with the page table it should use. |
|
813 |
SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPageTable); |
|
814 |
pti->SetCoarse(iMemory,aChunkIndex,iPteType); |
|
815 |
__NK_ASSERT_DEBUG(!iTables[aChunkIndex]); |
|
816 |
iTables[aChunkIndex] = aPageTable; // new mappings can now see the page table |
|
817 |
||
818 |
MmuLock::Unlock(); |
|
819 |
} |
|
820 |
||
821 |
||
822 |
void DCoarseMemory::DPageTables::FreePageTable(TUint aChunkIndex) |
|
823 |
{ |
|
824 |
TRACE2(("DCoarseMemory::DPageTables[0x%08x]::FreePageTable(0x%08x)",this,aChunkIndex)); |
|
825 |
||
826 |
// acquire locks... |
|
827 |
::PageTables.Lock(); |
|
828 |
MmuLock::Lock(); |
|
829 |
||
830 |
// test if page table still needs freeing... |
|
831 |
TPte* pt = iTables[aChunkIndex]; |
|
832 |
if(pt) |
|
833 |
{ |
|
834 |
SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt); |
|
835 |
if(pti->PageCount()==0 && pti->PermanenceCount()==0) |
|
836 |
{ |
|
837 |
// page table needs freeing... |
|
838 |
UnassignPageTable(aChunkIndex); |
|
839 |
MmuLock::Unlock(); |
|
840 |
::PageTables.Free(pt); |
|
841 |
::PageTables.Unlock(); |
|
842 |
return; |
|
843 |
} |
|
844 |
} |
|
845 |
||
846 |
// page table doesn't need freeing... |
|
847 |
MmuLock::Unlock(); |
|
848 |
::PageTables.Unlock(); |
|
849 |
return; |
|
850 |
} |
|
851 |
||
852 |
||
853 |
void DCoarseMemory::StealPageTable(TUint aChunkIndex, TUint aPteType) |
|
854 |
{ |
|
855 |
__NK_ASSERT_DEBUG(PageTablesLockIsHeld()); |
|
856 |
__NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
857 |
__NK_ASSERT_DEBUG(iPageTables[aPteType]); |
|
858 |
iPageTables[aPteType]->StealPageTable(aChunkIndex); |
|
859 |
} |
|
860 |
||
861 |
||
862 |
void DCoarseMemory::DPageTables::StealPageTable(TUint aChunkIndex) |
|
863 |
{ |
|
864 |
__NK_ASSERT_DEBUG(PageTablesLockIsHeld()); |
|
865 |
__NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
866 |
#ifdef _DEBUG |
|
867 |
TPte* pt = iTables[aChunkIndex]; |
|
868 |
__NK_ASSERT_DEBUG(pt); |
|
869 |
SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt); |
|
870 |
__NK_ASSERT_DEBUG(pti->PageCount()==0); |
|
871 |
__NK_ASSERT_DEBUG(pti->PermanenceCount()==0); |
|
872 |
#endif |
|
873 |
UnassignPageTable(aChunkIndex); |
|
874 |
} |
|
875 |
||
876 |
||
877 |
void DCoarseMemory::DPageTables::UnassignPageTable(TUint aChunkIndex) |
|
878 |
{ |
|
879 |
__NK_ASSERT_DEBUG(PageTablesLockIsHeld()); |
|
880 |
__NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
881 |
||
882 |
#ifdef _DEBUG |
|
883 |
TPhysAddr ptPhys = Mmu::PageTablePhysAddr(iTables[aChunkIndex]); |
|
884 |
#endif |
|
885 |
||
886 |
// zero page table pointer immediately so new mappings or memory commits will be force to |
|
887 |
// create a new one (which will block until we've finished here because it also needs the |
|
888 |
// PageTablesLock... |
|
889 |
iTables[aChunkIndex] = 0; |
|
890 |
||
891 |
// remove page table from mappings... |
|
892 |
TUint offset = aChunkIndex<<(KChunkShift-KPageShift); |
|
893 |
iMappings.Lock(); |
|
894 |
TMappingListIter iter; |
|
895 |
DMemoryMapping* mapping = (DMemoryMapping*)iter.Start(iMappings); |
|
896 |
TUint flash = 0; |
|
897 |
while(mapping) |
|
898 |
{ |
|
899 |
__NK_ASSERT_DEBUG(iTables[aChunkIndex]==0); // can't have been recreated because we hold PageTablesLock |
|
900 |
TUint size = mapping->iSizeInPages; |
|
901 |
TUint start = offset-mapping->iStartIndex; |
|
902 |
if(start<size) |
|
903 |
{ |
|
904 |
// page table is used by this mapping, so clear PDE... |
|
905 |
TLinAddr linAddrAndOsAsid = mapping->LinAddrAndOsAsid()+start*KPageSize; |
|
906 |
TPde* pPde = Mmu::PageDirectoryEntry(linAddrAndOsAsid&KPageMask,linAddrAndOsAsid); |
|
907 |
TPde pde = KPdeUnallocatedEntry; |
|
908 |
TRACE2(("!PDE %x=%x",pPde,pde)); |
|
909 |
__NK_ASSERT_DEBUG(*pPde==pde || (*pPde&~KPageTableMask)==ptPhys); |
|
910 |
*pPde = pde; |
|
911 |
SinglePdeUpdated(pPde); |
|
912 |
||
913 |
++flash; // increase flash rate because we've done quite a bit more work |
|
914 |
} |
|
915 |
iMappings.Unlock(); |
|
916 |
MmuLock::Flash(flash,KMaxMappingsInOneGo); |
|
917 |
iMappings.Lock(); |
|
918 |
mapping = (DMemoryMapping*)iter.Next(); |
|
919 |
} |
|
920 |
iter.Finish(); |
|
921 |
||
922 |
iMappings.Unlock(); |
|
923 |
} |
|
924 |
||
925 |
||
926 |
TInt DCoarseMemory::DPageTables::AllocatePermanentPageTables() |
|
927 |
{ |
|
928 |
__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(iMemory)); |
|
929 |
__NK_ASSERT_ALWAYS(!iMemory->IsDemandPaged()); |
|
930 |
||
931 |
if(iPermanenceCount++) |
|
932 |
{ |
|
933 |
// page tables already marked permanent, so end... |
|
934 |
return KErrNone; |
|
935 |
} |
|
936 |
||
937 |
// allocate all page tables... |
|
938 |
MmuLock::Lock(); |
|
939 |
TUint flash = 0; |
|
940 |
TUint i; |
|
941 |
for(i=0; i<iNumPageTables; ++i) |
|
942 |
{ |
|
943 |
TPte* pt = iTables[i]; |
|
944 |
if(pt) |
|
945 |
{ |
|
946 |
// already have page table... |
|
947 |
SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt); |
|
948 |
pti->IncPermanenceCount(); |
|
949 |
MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo); |
|
950 |
} |
|
951 |
else |
|
952 |
{ |
|
953 |
// allocate new page table... |
|
954 |
pt = AllocatePageTable(i,EFalse,ETrue); |
|
955 |
if(!pt) |
|
956 |
{ |
|
957 |
MmuLock::Unlock(); |
|
958 |
--iPermanenceCount; |
|
959 |
FreePermanentPageTables(0,i); |
|
960 |
return KErrNoMemory; |
|
961 |
} |
|
962 |
} |
|
963 |
} |
|
964 |
MmuLock::Unlock(); |
|
965 |
||
966 |
return KErrNone; |
|
967 |
} |
|
968 |
||
969 |
||
970 |
void DCoarseMemory::DPageTables::FreePermanentPageTables(TUint aChunkIndex, TUint aChunkCount) |
|
971 |
{ |
|
972 |
MmuLock::Lock(); |
|
973 |
||
974 |
TUint flash = 0; |
|
975 |
TUint i; |
|
976 |
for(i=aChunkIndex; i<aChunkIndex+aChunkCount; ++i) |
|
977 |
{ |
|
978 |
TPte* pt = iTables[i]; |
|
979 |
SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt); |
|
980 |
if(pti->DecPermanenceCount() || pti->PageCount()) |
|
981 |
{ |
|
982 |
// still in use... |
|
983 |
MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo); |
|
984 |
} |
|
985 |
else |
|
986 |
{ |
|
987 |
// page table no longer used for anything... |
|
988 |
MmuLock::Unlock(); |
|
989 |
FreePageTable(i); |
|
990 |
MmuLock::Lock(); |
|
991 |
} |
|
992 |
} |
|
993 |
||
994 |
MmuLock::Unlock(); |
|
995 |
} |
|
996 |
||
997 |
||
998 |
void DCoarseMemory::DPageTables::FreePermanentPageTables() |
|
999 |
{ |
|
1000 |
__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(iMemory)); |
|
1001 |
||
1002 |
if(--iPermanenceCount) |
|
1003 |
{ |
|
1004 |
// page tables still permanent, so end... |
|
1005 |
return; |
|
1006 |
} |
|
1007 |
||
1008 |
FreePermanentPageTables(0,iNumPageTables); |
|
1009 |
} |
|
1010 |
||
1011 |
||
1012 |
TInt DCoarseMemory::DPageTables::AddMapping(DCoarseMapping* aMapping) |
|
1013 |
{ |
|
1014 |
TRACE(("DCoarseMemory::DPageTables[0x%08x]::AddMapping(0x%08x)",this,aMapping)); |
|
1015 |
__NK_ASSERT_DEBUG(aMapping->IsCoarse()); |
|
1016 |
Open(); |
|
1017 |
MmuLock::Lock(); |
|
1018 |
iMappings.Lock(); |
|
1019 |
aMapping->LinkToMemory(iMemory,iMappings); |
|
1020 |
iMappings.Unlock(); |
|
1021 |
MmuLock::Unlock(); |
|
1022 |
return KErrNone; |
|
1023 |
} |
|
1024 |
||
1025 |
||
1026 |
void DCoarseMemory::DPageTables::RemoveMapping(DCoarseMapping* aMapping) |
|
1027 |
{ |
|
1028 |
aMapping->UnlinkFromMemory(iMappings); |
|
1029 |
Close(); |
|
1030 |
} |
|
1031 |
||
1032 |
||
1033 |
void DCoarseMemory::DPageTables::RemapPage(TPhysAddr& aPageArray, TUint aIndex, TBool aInvalidateTLB) |
|
1034 |
{ |
|
1035 |
TUint pteIndex = aIndex & (KChunkMask>>KPageShift); |
|
1036 |
||
1037 |
// get address of page table... |
|
1038 |
MmuLock::Lock(); |
|
1039 |
TUint i = aIndex>>(KChunkShift-KPageShift); |
|
1040 |
TPte* pPte = GetPageTable(i); |
|
1041 |
||
1042 |
if (!pPte) |
|
1043 |
{// This page has been unmapped so just return. |
|
1044 |
MmuLock::Unlock(); |
|
1045 |
return; |
|
1046 |
} |
|
1047 |
||
1048 |
// remap the page... |
|
1049 |
pPte += pteIndex; |
|
1050 |
Mmu::RemapPage(pPte, aPageArray, iBlankPte); |
|
1051 |
||
1052 |
MmuLock::Unlock(); |
|
1053 |
||
1054 |
if (aInvalidateTLB) |
|
1055 |
FlushTLB(aIndex, aIndex + 1); |
|
1056 |
} |
|
1057 |
||
1058 |
||
1059 |
TInt DCoarseMemory::DPageTables::MapPages(RPageArray::TIter aPages) |
|
1060 |
{ |
|
1061 |
__NK_ASSERT_DEBUG(aPages.Count()); |
|
1062 |
||
1063 |
for(;;) |
|
1064 |
{ |
|
1065 |
TUint pteIndex = aPages.Index()&(KChunkMask>>KPageShift); |
|
1066 |
||
1067 |
// calculate max number of pages to do... |
|
1068 |
TUint n = (KChunkSize>>KPageShift)-pteIndex; // pages left in page table |
|
1069 |
if(n>KMaxPagesInOneGo) |
|
1070 |
n = KMaxPagesInOneGo; |
|
1071 |
||
1072 |
// get some pages... |
|
1073 |
TPhysAddr* pages; |
|
1074 |
n = aPages.Pages(pages,n); |
|
1075 |
if(!n) |
|
1076 |
break; |
|
1077 |
||
1078 |
// get address of page table... |
|
1079 |
MmuLock::Lock(); |
|
1080 |
TUint i = aPages.Index()>>(KChunkShift-KPageShift); |
|
1081 |
TPte* pPte = GetOrAllocatePageTable(i); |
|
1082 |
||
1083 |
// check for OOM... |
|
1084 |
if(!pPte) |
|
1085 |
{ |
|
1086 |
MmuLock::Unlock(); |
|
1087 |
return KErrNoMemory; |
|
1088 |
} |
|
1089 |
||
1090 |
// map some pages... |
|
1091 |
pPte += pteIndex; |
|
1092 |
TBool keepPt = Mmu::MapPages(pPte, n, pages, iBlankPte); |
|
1093 |
MmuLock::Unlock(); |
|
1094 |
||
1095 |
// free page table if no longer needed... |
|
1096 |
if(!keepPt) |
|
1097 |
FreePageTable(i); |
|
1098 |
||
1099 |
// move on... |
|
1100 |
aPages.Skip(n); |
|
1101 |
} |
|
1102 |
||
1103 |
return KErrNone; |
|
1104 |
} |
|
1105 |
||
1106 |
||
1107 |
void DCoarseMemory::DPageTables::UnmapPages(RPageArray::TIter aPages, TBool aDecommitting) |
|
1108 |
{ |
|
1109 |
__NK_ASSERT_DEBUG(aPages.Count()); |
|
1110 |
||
1111 |
TUint startIndex = aPages.Index(); |
|
1112 |
||
1113 |
for(;;) |
|
1114 |
{ |
|
1115 |
TUint pteIndex = aPages.Index()&(KChunkMask>>KPageShift); |
|
1116 |
||
1117 |
// calculate max number of pages to do... |
|
1118 |
TUint n = (KChunkSize>>KPageShift)-pteIndex; // pages left in page table |
|
1119 |
if(n>KMaxPagesInOneGo) |
|
1120 |
n = KMaxPagesInOneGo; |
|
1121 |
||
1122 |
// get some pages... |
|
1123 |
TPhysAddr* pages; |
|
1124 |
n = aPages.Pages(pages,n); |
|
1125 |
if(!n) |
|
1126 |
break; |
|
1127 |
||
1128 |
// get address of PTE for pages... |
|
1129 |
MmuLock::Lock(); |
|
1130 |
TUint i = aPages.Index()>>(KChunkShift-KPageShift); |
|
1131 |
TPte* pPte = iTables[i]; |
|
1132 |
if(pPte) |
|
1133 |
{ |
|
1134 |
// unmap some pages... |
|
1135 |
pPte += pteIndex; |
|
1136 |
TBool keepPt = Mmu::UnmapPages(pPte,n,pages); |
|
1137 |
MmuLock::Unlock(); |
|
1138 |
||
1139 |
// free page table if no longer needed... |
|
1140 |
if(!keepPt) |
|
1141 |
FreePageTable(i); |
|
1142 |
} |
|
1143 |
else |
|
1144 |
{ |
|
1145 |
// no page table found... |
|
1146 |
MmuLock::Unlock(); |
|
1147 |
} |
|
1148 |
||
1149 |
// move on... |
|
1150 |
aPages.Skip(n); |
|
1151 |
} |
|
1152 |
||
1153 |
FlushTLB(startIndex,aPages.IndexEnd()); |
|
1154 |
} |
|
1155 |
||
1156 |
||
1157 |
void DCoarseMemory::DPageTables::RestrictPagesNA(RPageArray::TIter aPages) |
|
1158 |
{ |
|
1159 |
__NK_ASSERT_DEBUG(aPages.Count()); |
|
1160 |
||
1161 |
TUint startIndex = aPages.Index(); |
|
1162 |
||
1163 |
for(;;) |
|
1164 |
{ |
|
1165 |
TUint pteIndex = aPages.Index()&(KChunkMask>>KPageShift); |
|
1166 |
||
1167 |
// calculate max number of pages to do... |
|
1168 |
TUint n = (KChunkSize>>KPageShift)-pteIndex; // pages left in page table |
|
1169 |
if(n>KMaxPagesInOneGo) |
|
1170 |
n = KMaxPagesInOneGo; |
|
1171 |
||
1172 |
// get some pages... |
|
1173 |
TPhysAddr* pages; |
|
1174 |
n = aPages.Pages(pages,n); |
|
1175 |
if(!n) |
|
1176 |
break; |
|
1177 |
||
1178 |
// get address of PTE for pages... |
|
1179 |
MmuLock::Lock(); |
|
1180 |
TUint i = aPages.Index()>>(KChunkShift-KPageShift); |
|
1181 |
TPte* pPte = iTables[i]; |
|
1182 |
if(pPte) |
|
1183 |
{ |
|
1184 |
// restrict some pages... |
|
1185 |
pPte += pteIndex; |
|
1186 |
Mmu::RestrictPagesNA(pPte,n,pages); |
|
1187 |
} |
|
1188 |
MmuLock::Unlock(); |
|
1189 |
||
1190 |
// move on... |
|
1191 |
aPages.Skip(n); |
|
1192 |
} |
|
1193 |
||
1194 |
FlushTLB(startIndex,aPages.IndexEnd()); |
|
1195 |
} |
|
1196 |
||
1197 |
||
1198 |
TInt DCoarseMemory::DPageTables::PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, |
|
1199 |
DMemoryMappingBase* aMapping, TUint aMapInstanceCount) |
|
1200 |
{ |
|
1201 |
__NK_ASSERT_DEBUG(aPages.Count()); |
|
1202 |
||
1203 |
TBool pinPageTable = aPinArgs.iPinnedPageTables!=0; // check if we need to pin the first page table |
|
1204 |
for(;;) |
|
1205 |
{ |
|
1206 |
TUint pteIndex = aPages.Index()&(KChunkMask>>KPageShift); |
|
1207 |
if(pteIndex==0) |
|
1208 |
pinPageTable = aPinArgs.iPinnedPageTables!=0; // started a new page table, check if we need to pin it |
|
1209 |
||
1210 |
// calculate max number of pages to do... |
|
1211 |
TUint n = (KChunkSize>>KPageShift)-pteIndex; // pages left in page table |
|
1212 |
if(n>KMaxPagesInOneGo) |
|
1213 |
n = KMaxPagesInOneGo; |
|
1214 |
||
1215 |
// get some pages... |
|
1216 |
TPhysAddr* pages; |
|
1217 |
n = aPages.Pages(pages,n); |
|
1218 |
if(!n) |
|
1219 |
break; |
|
1220 |
||
1221 |
// make sure we have memory to pin the page table if required... |
|
1222 |
if(pinPageTable) |
|
1223 |
aPinArgs.AllocReplacementPages(KNumPagesToPinOnePageTable); |
|
1224 |
||
1225 |
// get address of page table... |
|
1226 |
MmuLock::Lock(); |
|
1227 |
TUint i = aPages.Index()>>(KChunkShift-KPageShift); |
|
1228 |
TPte* pPte; |
|
1229 |
if(pinPageTable) |
|
1230 |
pPte = GetOrAllocatePageTable(i,aPinArgs); |
|
1231 |
else |
|
1232 |
pPte = GetOrAllocatePageTable(i); |
|
1233 |
||
1234 |
// check for OOM... |
|
1235 |
if(!pPte) |
|
1236 |
{ |
|
1237 |
MmuLock::Unlock(); |
|
1238 |
return KErrNoMemory; |
|
1239 |
} |
|
1240 |
||
1241 |
if (aMapInstanceCount != aMapping->MapInstanceCount()) |
|
1242 |
{// The mapping that took the page fault has been reused. |
|
1243 |
MmuLock::Unlock(); |
|
1244 |
FreePageTable(i); // This will only free if this is the only pt referencer. |
|
1245 |
return KErrNotFound; |
|
1246 |
} |
|
1247 |
||
1248 |
// map some pages... |
|
1249 |
pPte += pteIndex; |
|
1250 |
TPte blankPte = iBlankPte; |
|
1251 |
if(aPinArgs.iReadOnly) |
|
1252 |
blankPte = Mmu::MakePteInaccessible(blankPte,true); |
|
1253 |
TBool keepPt = Mmu::PageInPages(pPte, n, pages, blankPte); |
|
1254 |
MmuLock::Unlock(); |
|
1255 |
||
1256 |
// free page table if no longer needed... |
|
1257 |
if(!keepPt) |
|
1258 |
FreePageTable(i); |
|
1259 |
||
1260 |
// move on... |
|
1261 |
aPages.Skip(n); |
|
1262 |
pinPageTable = false; |
|
1263 |
} |
|
1264 |
||
1265 |
return KErrNone; |
|
1266 |
} |
|
1267 |
||
1268 |
||
1269 |
TBool DCoarseMemory::DPageTables::MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex) |
|
1270 |
{ |
|
1271 |
__NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
1272 |
||
1273 |
TUint pteIndex = aIndex & (KChunkMask >> KPageShift); |
|
1274 |
||
1275 |
// get address of page table... |
|
1276 |
TUint i = aIndex >> (KChunkShift - KPageShift); |
|
1277 |
TPte* pPte = GetPageTable(i); |
|
1278 |
||
1279 |
// Check the page is still mapped.. |
|
1280 |
if (!pPte) |
|
1281 |
return EFalse; |
|
1282 |
||
1283 |
// map the page... |
|
1284 |
pPte += pteIndex; |
|
1285 |
Mmu::RemapPage(pPte, aPageArrayPtr, iBlankPte); |
|
1286 |
return ETrue; |
|
1287 |
} |
|
1288 |
||
1289 |
||
1290 |
void DCoarseMemory::DPageTables::FlushTLB(TUint aStartIndex, TUint aEndIndex) |
|
1291 |
{ |
|
1292 |
#ifndef COARSE_GRAINED_TLB_MAINTENANCE |
|
1293 |
iMappings.Lock(); |
|
1294 |
TMappingListIter iter; |
|
1295 |
DMemoryMapping* mapping = (DMemoryMapping*)iter.Start(iMappings); |
|
1296 |
while(mapping) |
|
1297 |
{ |
|
1298 |
// get region which overlaps the mapping... |
|
1299 |
TUint start = mapping->iStartIndex; |
|
1300 |
TUint end = start+mapping->iSizeInPages; |
|
1301 |
if(start<aStartIndex) |
|
1302 |
start = aStartIndex; |
|
1303 |
if(end>aEndIndex) |
|
1304 |
end = aEndIndex; |
|
1305 |
if(start>=end) |
|
1306 |
{ |
|
1307 |
// the mapping doesn't contain the pages... |
|
1308 |
iMappings.Unlock(); |
|
1309 |
} |
|
1310 |
else |
|
1311 |
{ |
|
1312 |
// flush TLB for pages in the mapping... |
|
1313 |
TUint size = end-start; |
|
1314 |
start -= mapping->iStartIndex; |
|
1315 |
TLinAddr addr = mapping->LinAddrAndOsAsid()+start*KPageSize; |
|
1316 |
TLinAddr endAddr = addr+size*KPageSize; |
|
1317 |
iMappings.Unlock(); |
|
1318 |
do |
|
1319 |
{ |
|
1320 |
InvalidateTLBForPage(addr); |
|
1321 |
} |
|
1322 |
while((addr+=KPageSize)<endAddr); |
|
1323 |
} |
|
1324 |
iMappings.Lock(); |
|
1325 |
mapping = (DMemoryMapping*)iter.Next(); |
|
1326 |
} |
|
1327 |
iter.Finish(); |
|
1328 |
iMappings.Unlock(); |
|
1329 |
#endif |
|
1330 |
} |
|
1331 |
||
1332 |
||
1333 |
||
1334 |
// |
|
1335 |
// DCoarseMemory |
|
1336 |
// |
|
1337 |
||
1338 |
DCoarseMemory::DCoarseMemory(DMemoryManager* aManager, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags) |
|
1339 |
: DMemoryObject(aManager,ECoarseObject,aSizeInPages,aAttributes,aCreateFlags) |
|
1340 |
{ |
|
1341 |
} |
|
1342 |
||
1343 |
||
1344 |
DCoarseMemory* DCoarseMemory::New(DMemoryManager* aManager, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags) |
|
1345 |
{ |
|
1346 |
DCoarseMemory* self = new DCoarseMemory(aManager, aSizeInPages, aAttributes, aCreateFlags); |
|
1347 |
if(self) |
|
1348 |
{ |
|
1349 |
if(self->Construct()==KErrNone) |
|
1350 |
return self; |
|
1351 |
self->Close(); |
|
1352 |
} |
|
1353 |
return 0; |
|
1354 |
} |
|
1355 |
||
1356 |
||
1357 |
DCoarseMemory::~DCoarseMemory() |
|
1358 |
{ |
|
1359 |
TRACE2(("DCoarseMemory[0x%08x]::~DCoarseMemory()",this)); |
|
1360 |
#ifdef _DEBUG |
|
1361 |
for(TUint i=0; i<ENumPteTypes; i++) |
|
1362 |
{ |
|
1363 |
__NK_ASSERT_DEBUG(!iPageTables[i]); |
|
1364 |
} |
|
1365 |
#endif |
|
1366 |
} |
|
1367 |
||
1368 |
||
1369 |
DMemoryMapping* DCoarseMemory::CreateMapping(TUint aIndex, TUint aCount) |
|
1370 |
{ |
|
1371 |
if (((aIndex|aCount)&(KChunkMask>>KPageShift))==0) |
|
1372 |
return new DCoarseMapping(); |
|
1373 |
else |
|
1374 |
return new DFineMapping(); |
|
1375 |
} |
|
1376 |
||
1377 |
||
1378 |
TInt DCoarseMemory::MapPages(RPageArray::TIter aPages) |
|
1379 |
{ |
|
1380 |
TRACE2(("DCoarseMemory[0x%08x]::MapPages(?) index=0x%x count=0x%x",this,aPages.Index(),aPages.Count())); |
|
1381 |
||
1382 |
// map pages in all page tables for coarse mapping... |
|
1383 |
MmuLock::Lock(); |
|
1384 |
TUint pteType = 0; |
|
1385 |
do |
|
1386 |
{ |
|
1387 |
DPageTables* tables = iPageTables[pteType]; |
|
1388 |
if(tables) |
|
1389 |
{ |
|
1390 |
tables->Open(); |
|
1391 |
MmuLock::Unlock(); |
|
1392 |
TInt r = tables->MapPages(aPages); |
|
1393 |
tables->AsyncClose(); |
|
1394 |
if(r!=KErrNone) |
|
1395 |
return r; |
|
1396 |
MmuLock::Lock(); |
|
1397 |
} |
|
1398 |
} |
|
1399 |
while(++pteType<ENumPteTypes); |
|
1400 |
MmuLock::Unlock(); |
|
1401 |
||
1402 |
// map page in all fine mappings... |
|
1403 |
return DMemoryObject::MapPages(aPages); |
|
1404 |
} |
|
1405 |
||
1406 |
||
1407 |
void DCoarseMemory::RemapPage(TPhysAddr& aPageArray, TUint aIndex, TBool aInvalidateTLB) |
|
1408 |
{ |
|
1409 |
TRACE2(("DCoarseMemory[0x%08x]::RemapPage() index=0x%x",this, aIndex)); |
|
1410 |
||
1411 |
// remap pages in all page tables for coarse mapping... |
|
1412 |
MmuLock::Lock(); |
|
1413 |
TUint pteType = 0; |
|
1414 |
do |
|
1415 |
{ |
|
1416 |
DPageTables* tables = iPageTables[pteType]; |
|
1417 |
if(tables) |
|
1418 |
{ |
|
1419 |
tables->Open(); |
|
1420 |
MmuLock::Unlock(); |
|
1421 |
tables->RemapPage(aPageArray, aIndex, aInvalidateTLB); |
|
1422 |
tables->AsyncClose(); |
|
1423 |
MmuLock::Lock(); |
|
1424 |
} |
|
1425 |
} |
|
1426 |
while(++pteType<ENumPteTypes); |
|
1427 |
MmuLock::Unlock(); |
|
1428 |
||
1429 |
// remap page in all fine mappings... |
|
1430 |
DMemoryObject::RemapPage(aPageArray, aIndex, aInvalidateTLB); |
|
1431 |
} |
|
1432 |
||
1433 |
||
1434 |
void DCoarseMemory::UnmapPages(RPageArray::TIter aPages, TBool aDecommitting) |
|
1435 |
{ |
|
1436 |
TRACE2(("DCoarseMemory[0x%08x]::UnmapPages(?,%d) index=0x%x count=0x%x",this,(bool)aDecommitting,aPages.Index(),aPages.Count())); |
|
1437 |
||
1438 |
if(!aPages.Count()) |
|
1439 |
return; |
|
1440 |
||
1441 |
// unmap pages from all page tables for coarse mapping... |
|
1442 |
MmuLock::Lock(); |
|
1443 |
TUint pteType = 0; |
|
1444 |
do |
|
1445 |
{ |
|
1446 |
DPageTables* tables = iPageTables[pteType]; |
|
1447 |
if(tables) |
|
1448 |
{ |
|
1449 |
tables->Open(); |
|
1450 |
MmuLock::Unlock(); |
|
1451 |
tables->UnmapPages(aPages,aDecommitting); |
|
1452 |
tables->AsyncClose(); |
|
1453 |
MmuLock::Lock(); |
|
1454 |
} |
|
1455 |
} |
|
1456 |
while(++pteType<ENumPteTypes); |
|
1457 |
MmuLock::Unlock(); |
|
1458 |
||
1459 |
// unmap pages from all fine mappings... |
|
1460 |
DMemoryObject::UnmapPages(aPages,aDecommitting); |
|
1461 |
} |
|
1462 |
||
1463 |
||
1464 |
void DCoarseMemory::RestrictPages(RPageArray::TIter aPages, TRestrictPagesType aRestriction) |
|
1465 |
{ |
|
1466 |
TRACE2(("DCoarseMemory[0x%08x]::RestrictPages(?,%d) index=0x%x count=0x%x",this,aRestriction,aPages.Index(),aPages.Count())); |
|
1467 |
__ASSERT_COMPILE(ERestrictPagesForMovingFlag != ERestrictPagesNoAccessForMoving); |
|
1468 |
||
1469 |
if(!aPages.Count()) |
|
1470 |
return; |
|
1471 |
||
1472 |
if (aRestriction != ERestrictPagesForMovingFlag) |
|
1473 |
{// restrict pages in all the page tables for the coarse mapping... |
|
1474 |
MmuLock::Lock(); |
|
1475 |
TUint pteType = 0; |
|
1476 |
do |
|
1477 |
{ |
|
1478 |
DPageTables* tables = iPageTables[pteType]; |
|
1479 |
if(tables) |
|
1480 |
{ |
|
1481 |
tables->Open(); |
|
1482 |
MmuLock::Unlock(); |
|
1483 |
tables->RestrictPagesNA(aPages); |
|
1484 |
tables->AsyncClose(); |
|
1485 |
MmuLock::Lock(); |
|
1486 |
} |
|
1487 |
} |
|
1488 |
while(++pteType<ENumPteTypes); |
|
1489 |
MmuLock::Unlock(); |
|
1490 |
} |
|
1491 |
||
1492 |
// restrict pages in all fine mappings, will also check for pinned mappings... |
|
1493 |
DMemoryObject::RestrictPages(aPages,aRestriction); |
|
1494 |
} |
|
1495 |
||
1496 |
||
1497 |
TPte* DCoarseMemory::GetPageTable(TUint aPteType, TUint aChunkIndex) |
|
1498 |
{ |
|
1499 |
__NK_ASSERT_DEBUG(aChunkIndex < (iSizeInPages >> KPagesInPDEShift)); |
|
1500 |
return iPageTables[aPteType]->GetPageTable(aChunkIndex); |
|
1501 |
} |
|
1502 |
||
1503 |
||
1504 |
TInt DCoarseMemory::PageIn(DCoarseMapping* aMapping, RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount) |
|
1505 |
{ |
|
1506 |
__NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
1507 |
||
1508 |
DPageTables* tables = iPageTables[aMapping->PteType()]; |
|
1509 |
tables->Open(); |
|
1510 |
||
1511 |
MmuLock::Unlock(); |
|
1512 |
||
1513 |
#ifndef COARSE_GRAINED_TLB_MAINTENANCE |
|
1514 |
TLinAddr startAddr = aMapping->Base()+(aPages.Index()-aMapping->iStartIndex)*KPageSize; |
|
1515 |
TLinAddr endAddr = startAddr+aPages.Count()*KPageSize; |
|
1516 |
#endif |
|
1517 |
||
1518 |
TInt r = tables->PageIn(aPages, aPinArgs, aMapping, aMapInstanceCount); |
|
1519 |
||
1520 |
// clean TLB... |
|
1521 |
#ifdef COARSE_GRAINED_TLB_MAINTENANCE |
|
1522 |
InvalidateTLBForAsid(aMapping->OsAsid()); |
|
1523 |
#else |
|
1524 |
TLinAddr addr = startAddr+aMapping->OsAsid(); |
|
1525 |
do InvalidateTLBForPage(addr); |
|
1526 |
while((addr+=KPageSize)<endAddr); |
|
1527 |
#endif |
|
1528 |
||
1529 |
tables->AsyncClose(); |
|
1530 |
||
1531 |
return r; |
|
1532 |
} |
|
1533 |
||
1534 |
||
1535 |
TBool DCoarseMemory::MovingPageIn(DCoarseMapping* aMapping, TPhysAddr& aPageArrayPtr, TUint aIndex) |
|
1536 |
{ |
|
1537 |
DCoarseMemory::DPageTables* tables = iPageTables[aMapping->PteType()]; |
|
1538 |
return tables->MovingPageIn(aPageArrayPtr, aIndex); |
|
1539 |
} |
|
1540 |
||
1541 |
||
1542 |
TPte* DCoarseMemory::FindPageTable(DCoarseMapping* aMapping, TLinAddr aLinAddr, TUint aMemoryIndex) |
|
1543 |
{ |
|
1544 |
DCoarseMemory::DPageTables* tables = iPageTables[aMapping->PteType()]; |
|
1545 |
||
1546 |
// get address of page table... |
|
1547 |
TUint i = aMemoryIndex >> (KChunkShift - KPageShift); |
|
1548 |
return tables->GetPageTable(i); |
|
1549 |
} |
|
1550 |
||
1551 |
||
1552 |
TInt DCoarseMemory::ClaimInitialPages(TLinAddr aBase, TUint aSize, TMappingPermissions aPermissions, TBool aAllowGaps, TBool aAllowNonRamPages) |
|
1553 |
{ |
|
1554 |
TRACE(("DCoarseMemory[0x%08x]::ClaimInitialPages(0x%08x,0x%08x,0x%08x,%d,%d)",this,aBase,aSize,aPermissions,aAllowGaps,aAllowNonRamPages)); |
|
1555 |
||
1556 |
// validate arguments... |
|
1557 |
if(aBase&KChunkMask || aBase<KGlobalMemoryBase) |
|
1558 |
return KErrArgument; |
|
1559 |
if(aSize&KPageMask || aSize>iSizeInPages*KPageSize) |
|
1560 |
return KErrArgument; |
|
1561 |
||
1562 |
// get DPageTables object... |
|
1563 |
TUint pteType = Mmu::PteType(aPermissions,true); |
|
1564 |
MemoryObjectLock::Lock(this); |
|
1565 |
DPageTables* tables = GetOrAllocatePageTables(pteType); |
|
1566 |
MemoryObjectLock::Unlock(this); |
|
1567 |
__NK_ASSERT_DEBUG(tables); |
|
1568 |
||
1569 |
// check and allocate page array entries... |
|
1570 |
RPageArray::TIter pageIter; |
|
1571 |
TInt r = iPages.AddStart(0,aSize>>KPageShift,pageIter); |
|
1572 |
__NK_ASSERT_ALWAYS(r==KErrNone); |
|
1573 |
||
1574 |
// hold MmuLock for long time, shouldn't matter as this is only done during boot |
|
1575 |
::PageTables.Lock(); |
|
1576 |
MmuLock::Lock(); |
|
1577 |
||
1578 |
TPte blankPte = tables->iBlankPte; |
|
1579 |
TPte** pPt = tables->iTables; |
|
1580 |
TPde* pPde = Mmu::PageDirectoryEntry(KKernelOsAsid,aBase); |
|
1581 |
TUint offset = 0; |
|
1582 |
TUint size = aSize; |
|
1583 |
while(size) |
|
1584 |
{ |
|
1585 |
TPde pde = *pPde; |
|
1586 |
TRACE(("DCoarseMemory::ClaimInitialPages: %08x: 0x%08x",aBase+offset,pde)); |
|
1587 |
||
1588 |
TPte* pPte = NULL; |
|
1589 |
SPageTableInfo* pti = NULL; |
|
1590 |
||
1591 |
if (Mmu::PdeMapsSection(pde)) |
|
1592 |
{ |
|
1593 |
TPhysAddr sectionBase = Mmu::SectionBaseFromPde(pde); |
|
1594 |
TRACE((" chunk is section mapped, base at %08x", sectionBase)); |
|
1595 |
__NK_ASSERT_DEBUG(sectionBase != KPhysAddrInvalid); |
|
1596 |
||
1597 |
TPde pde = sectionBase | Mmu::BlankSectionPde(Attributes(),pteType); |
|
1598 |
__NK_ASSERT_DEBUG(((*pPde^pde)&~KPdeMatchMask)==0); |
|
1599 |
*pPde = pde; |
|
1600 |
SinglePdeUpdated(pPde); |
|
1601 |
InvalidateTLB(); |
|
1602 |
||
1603 |
// We allocate and populate a page table for the section even though it won't be mapped |
|
1604 |
// initially because the presense of the page table is used to check whether RAM is |
|
1605 |
// mapped in a chunk, and because it makes it possible to break the section mapping |
|
1606 |
// without allocating memory. This may change in the future. |
|
1607 |
||
1608 |
// Note these page table are always unpaged here regardless of paged bit in iFlags |
|
1609 |
// (e.g. ROM object is marked as paged despite initial pages being unpaged) |
|
1610 |
pPte = tables->AllocatePageTable(offset >> KChunkShift, EFalse, EFalse); |
|
1611 |
if (!pPte) |
|
1612 |
{ |
|
1613 |
MmuLock::Unlock(); |
|
1614 |
return KErrNoMemory; |
|
1615 |
} |
|
1616 |
pti = SPageTableInfo::FromPtPtr(pPte); |
|
1617 |
} |
|
1618 |
else if (Mmu::PdeMapsPageTable(pde)) |
|
1619 |
{ |
|
1620 |
pPte = Mmu::PageTableFromPde(*pPde); |
|
1621 |
TRACE((" page table found at %08x", pPte)); |
|
1622 |
__NK_ASSERT_DEBUG(pPte); |
|
1623 |
pti = SPageTableInfo::FromPtPtr(pPte); |
|
1624 |
pti->SetCoarse(this,offset>>KChunkShift,pteType); |
|
1625 |
} |
|
1626 |
||
1627 |
*pPt++ = pPte; |
|
1628 |
++pPde; |
|
1629 |
||
1630 |
TUint numPages = 0; |
|
1631 |
do |
|
1632 |
{ |
|
1633 |
TPhysAddr pagePhys = Mmu::LinearToPhysical(aBase+offset); |
|
1634 |
TPte pte; |
|
1635 |
if(pagePhys==KPhysAddrInvalid) |
|
1636 |
{ |
|
1637 |
if(size) |
|
1638 |
{ |
|
1639 |
__NK_ASSERT_ALWAYS(aAllowGaps); // we have a gap, check this is allowed |
|
1640 |
pageIter.Skip(1); |
|
1641 |
} |
|
1642 |
||
1643 |
pte = KPteUnallocatedEntry; |
|
1644 |
} |
|
1645 |
else |
|
1646 |
{ |
|
1647 |
__NK_ASSERT_ALWAYS(size); // pages can't be mapped above aSize |
|
1648 |
||
1649 |
pageIter.Add(1,&pagePhys); |
|
1650 |
||
1651 |
SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pagePhys); |
|
1652 |
__NK_ASSERT_ALWAYS(pi || aAllowNonRamPages); |
|
1653 |
if(pi) |
|
1654 |
{ |
|
1655 |
__NK_ASSERT_ALWAYS(pi->Type()==SPageInfo::EFixed); |
|
1656 |
pi->SetManaged(this,offset>>KPageShift,PageInfoFlags()); |
|
1657 |
} |
|
1658 |
||
1659 |
++numPages; |
|
1660 |
pte = pagePhys|blankPte; |
|
1661 |
} |
|
1662 |
||
1663 |
if(pPte) |
|
1664 |
{ |
|
1665 |
TRACE2(("!PTE %x=%x (was %x)",pPte,pte,*pPte)); |
|
1666 |
__NK_ASSERT_DEBUG(((*pPte^pte)&~KPteMatchMask)==0 || *pPte==KPteUnallocatedEntry); |
|
1667 |
*pPte = pte; |
|
1668 |
CacheMaintenance::SinglePteUpdated((TLinAddr)pPte); |
|
1669 |
++pPte; |
|
1670 |
} |
|
1671 |
||
1672 |
offset += KPageSize; |
|
1673 |
if(size) |
|
1674 |
size -= KPageSize; |
|
1675 |
} |
|
1676 |
while(offset&(KChunkMask&~KPageMask)); |
|
1677 |
||
1678 |
if(pti) |
|
1679 |
{ |
|
1680 |
pti->IncPageCount(numPages); |
|
1681 |
TRACE2(("pt %x page count=%d",TLinAddr(pPte)-KPageTableSize,numPages)); |
|
1682 |
__NK_ASSERT_DEBUG(pti->CheckPageCount()); |
|
1683 |
} |
|
1684 |
} |
|
1685 |
||
1686 |
InvalidateTLBForAsid(KKernelOsAsid); |
|
1687 |
||
1688 |
MmuLock::Unlock(); |
|
1689 |
::PageTables.Unlock(); |
|
1690 |
||
1691 |
// release page array entries... |
|
1692 |
iPages.AddEnd(0,aSize>>KPageShift); |
|
1693 |
||
1694 |
return KErrNone; |
|
1695 |
} |
|
1696 |
||
1697 |
||
1698 |
DCoarseMemory::DPageTables* DCoarseMemory::GetOrAllocatePageTables(TUint aPteType) |
|
1699 |
{ |
|
1700 |
__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(this)); |
|
1701 |
||
1702 |
MmuLock::Lock(); |
|
1703 |
DPageTables* tables = iPageTables[aPteType]; |
|
1704 |
if(tables) |
|
1705 |
tables->Open(); |
|
1706 |
MmuLock::Unlock(); |
|
1707 |
||
1708 |
if(!tables) |
|
1709 |
{ |
|
1710 |
// allocate a new one if required... |
|
1711 |
tables = DPageTables::New(this, iSizeInPages, aPteType); |
|
1712 |
} |
|
1713 |
||
1714 |
return tables; |
|
1715 |
} |
|
1716 |
||
1717 |
||
1718 |
TInt DCoarseMemory::AddMapping(DMemoryMappingBase* aMapping) |
|
1719 |
{ |
|
1720 |
if(!aMapping->IsCoarse()) |
|
1721 |
{ |
|
1722 |
// not coarse mapping... |
|
1723 |
return DMemoryObject::AddMapping(aMapping); |
|
1724 |
} |
|
1725 |
||
1726 |
__NK_ASSERT_DEBUG(aMapping->IsPinned()==false); // coarse mappings can't pin |
|
1727 |
||
1728 |
// Check mapping allowed. Must hold memory object lock to prevent changes |
|
1729 |
// to object's restrictions. |
|
1730 |
MemoryObjectLock::Lock(this); |
|
1731 |
TInt r = CheckNewMapping(aMapping); |
|
1732 |
if(r!=KErrNone) |
|
1733 |
{ |
|
1734 |
MemoryObjectLock::Unlock(this); |
|
1735 |
return r; |
|
1736 |
} |
|
1737 |
||
1738 |
// get DPageTable for mapping... |
|
1739 |
DPageTables* tables = GetOrAllocatePageTables(aMapping->PteType()); |
|
1740 |
||
1741 |
// Safe to release here as no restrictions to this type of mapping can be added as |
|
1742 |
// we now have an iPageTables entry for this type of mapping. |
|
1743 |
MemoryObjectLock::Unlock(this); |
|
1744 |
if(!tables) |
|
1745 |
return KErrNoMemory; |
|
1746 |
||
1747 |
// add mapping to DPageTable... |
|
1748 |
r = tables->AddMapping((DCoarseMapping*)aMapping); |
|
1749 |
if(r==KErrNone) |
|
1750 |
{ |
|
1751 |
// allocate permanent page tables if required... |
|
1752 |
if(aMapping->Flags()&DMemoryMapping::EPermanentPageTables) |
|
1753 |
{ |
|
1754 |
MemoryObjectLock::Lock(this); |
|
1755 |
r = tables->AllocatePermanentPageTables(); |
|
1756 |
MemoryObjectLock::Unlock(this); |
|
1757 |
||
1758 |
if(r==KErrNone) |
|
1759 |
__e32_atomic_ior_ord8(&aMapping->Flags(), (TUint8)DMemoryMapping::EPageTablesAllocated); |
|
1760 |
else |
|
1761 |
tables->RemoveMapping((DCoarseMapping*)aMapping); |
|
1762 |
} |
|
1763 |
} |
|
1764 |
||
1765 |
tables->Close(); |
|
1766 |
||
1767 |
return r; |
|
1768 |
} |
|
1769 |
||
1770 |
||
1771 |
void DCoarseMemory::RemoveMapping(DMemoryMappingBase* aMapping) |
|
1772 |
{ |
|
1773 |
if(!aMapping->IsCoarse()) |
|
1774 |
{ |
|
1775 |
// not coarse mapping... |
|
1776 |
DMemoryObject::RemoveMapping(aMapping); |
|
1777 |
return; |
|
1778 |
} |
|
1779 |
||
1780 |
// need a temporary reference on self because we may be removing the last mapping |
|
1781 |
// which will delete this... |
|
1782 |
Open(); |
|
1783 |
||
1784 |
// get DPageTable the mapping is attached to... |
|
1785 |
DPageTables* tables = iPageTables[aMapping->PteType()]; |
|
1786 |
__NK_ASSERT_DEBUG(tables); // must exist because aMapping has a reference on it |
|
1787 |
||
1788 |
// free permanent page tables if required... |
|
1789 |
if(aMapping->Flags()&DMemoryMapping::EPageTablesAllocated) |
|
1790 |
{ |
|
1791 |
MemoryObjectLock::Lock(this); |
|
1792 |
tables->FreePermanentPageTables(); |
|
1793 |
MemoryObjectLock::Unlock(this); |
|
1794 |
} |
|
1795 |
||
1796 |
// remove mapping from page tables object... |
|
1797 |
tables->RemoveMapping((DCoarseMapping*)aMapping); |
|
1798 |
||
1799 |
Close(); // may delete this memory object |
|
1800 |
} |
|
1801 |
||
1802 |
||
1803 |
TInt DCoarseMemory::SetReadOnly() |
|
1804 |
{ |
|
1805 |
__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(this)); |
|
1806 |
||
1807 |
// Search for writable iPageTable entries. |
|
1808 |
// We hold the MemoryObjectLock so iPageTable entries can't be added or removed. |
|
1809 |
MmuLock::Lock(); |
|
1810 |
TUint pteType = 0; |
|
1811 |
do |
|
1812 |
{ |
|
1813 |
if((pteType & EPteTypeWritable) && iPageTables[pteType]) |
|
1814 |
{ |
|
1815 |
MmuLock::Unlock(); |
|
1816 |
return KErrInUse; |
|
1817 |
} |
|
1818 |
} |
|
1819 |
while(++pteType < ENumPteTypes); |
|
1820 |
MmuLock::Unlock(); |
|
1821 |
||
1822 |
// unmap pages from all fine mappings... |
|
1823 |
return DMemoryObject::SetReadOnly(); |
|
1824 |
} |
|
1825 |
||
1826 |
||
1827 |
// |
|
1828 |
// DFineMemory |
|
1829 |
// |
|
1830 |
||
1831 |
DFineMemory::DFineMemory(DMemoryManager* aManager, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags) |
|
1832 |
: DMemoryObject(aManager,0,aSizeInPages,aAttributes,aCreateFlags) |
|
1833 |
{ |
|
1834 |
} |
|
1835 |
||
1836 |
||
1837 |
DFineMemory* DFineMemory::New(DMemoryManager* aManager, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags) |
|
1838 |
{ |
|
1839 |
DFineMemory* self = new DFineMemory(aManager,aSizeInPages,aAttributes,aCreateFlags); |
|
1840 |
if(self) |
|
1841 |
{ |
|
1842 |
if(self->Construct()==KErrNone) |
|
1843 |
return self; |
|
1844 |
self->Close(); |
|
1845 |
} |
|
1846 |
return 0; |
|
1847 |
} |
|
1848 |
||
1849 |
||
1850 |
DFineMemory::~DFineMemory() |
|
1851 |
{ |
|
1852 |
TRACE2(("DFineMemory[0x%08x]::~DFineMemory",this)); |
|
1853 |
} |
|
1854 |
||
1855 |
||
1856 |
TInt DFineMemory::ClaimInitialPages(TLinAddr aBase, TUint aSize, TMappingPermissions aPermissions, TBool aAllowGaps, TBool aAllowNonRamPages) |
|
1857 |
{ |
|
1858 |
TRACE(("DFineMemory[0x%08x]::ClaimInitialPages(0x%08x,0x%08x,0x%08x,%d,%d)",this,aBase,aSize,aPermissions,aAllowGaps,aAllowNonRamPages)); |
|
1859 |
(void)aPermissions; |
|
1860 |
||
1861 |
// validate arguments... |
|
1862 |
if(aBase&KPageMask || aBase<KGlobalMemoryBase) |
|
1863 |
return KErrArgument; |
|
1864 |
if(aSize&KPageMask || aSize>iSizeInPages*KPageSize) |
|
1865 |
return KErrArgument; |
|
1866 |
||
1867 |
#ifdef _DEBUG |
|
1868 |
// calculate 'blankPte', the correct PTE value for pages in this memory object... |
|
1869 |
TUint pteType = Mmu::PteType(aPermissions,true); |
|
1870 |
TPte blankPte = Mmu::BlankPte(Attributes(),pteType); |
|
1871 |
#endif |
|
1872 |
||
1873 |
// get page table... |
|
1874 |
TPde* pPde = Mmu::PageDirectoryEntry(KKernelOsAsid,aBase); |
|
1875 |
TPte* pPte = Mmu::PageTableFromPde(*pPde); |
|
1876 |
if(!pPte) |
|
1877 |
return KErrNone; // no pages mapped |
|
1878 |
||
1879 |
// check and allocate page array entries... |
|
1880 |
RPageArray::TIter pageIter; |
|
1881 |
TInt r = iPages.AddStart(0,aSize>>KPageShift,pageIter); |
|
1882 |
__NK_ASSERT_ALWAYS(r==KErrNone); |
|
1883 |
||
1884 |
// hold MmuLock for long time, shouldn't matter as this is only done during boot |
|
1885 |
MmuLock::Lock(); |
|
1886 |
||
1887 |
// setup page table for fine mappings... |
|
1888 |
SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pPte); |
|
1889 |
__NK_ASSERT_DEBUG(pti->CheckPageCount()); |
|
1890 |
TBool pageTableOk = pti->ClaimFine(aBase&~KChunkMask,KKernelOsAsid); |
|
1891 |
__NK_ASSERT_ALWAYS(pageTableOk); |
|
1892 |
TRACE(("DFineMemory::ClaimInitialPages page table = 0x%08x",pPte)); |
|
1893 |
||
1894 |
TUint pteIndex = (aBase>>KPageShift)&(KChunkMask>>KPageShift); |
|
1895 |
TUint pageIndex = 0; |
|
1896 |
TUint size = aSize; |
|
1897 |
while(pageIndex<iSizeInPages) |
|
1898 |
{ |
|
1899 |
TPhysAddr pagePhys = Mmu::PtePhysAddr(pPte[pteIndex],pteIndex); |
|
1900 |
if(pagePhys==KPhysAddrInvalid) |
|
1901 |
{ |
|
1902 |
if(size) |
|
1903 |
{ |
|
1904 |
__NK_ASSERT_ALWAYS(aAllowGaps); // we have a gap, check this is allowed |
|
1905 |
pageIter.Skip(1); |
|
1906 |
} |
|
1907 |
||
1908 |
// check PTE is correct... |
|
1909 |
__NK_ASSERT_DEBUG(pPte[pteIndex]==KPteUnallocatedEntry); |
|
1910 |
} |
|
1911 |
else |
|
1912 |
{ |
|
1913 |
__NK_ASSERT_ALWAYS(size); // pages can't be mapped above aSize |
|
1914 |
||
1915 |
pageIter.Add(1,&pagePhys); |
|
1916 |
||
1917 |
SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pagePhys); |
|
1918 |
||
1919 |
if(!pi) |
|
1920 |
__NK_ASSERT_ALWAYS(aAllowNonRamPages); |
|
1921 |
else |
|
1922 |
{ |
|
1923 |
__NK_ASSERT_ALWAYS(pi->Type()==SPageInfo::EFixed); |
|
1924 |
pi->SetManaged(this,pageIndex,PageInfoFlags()); |
|
1925 |
} |
|
1926 |
||
1927 |
#ifdef _DEBUG |
|
1928 |
// check PTE is correct... |
|
1929 |
TPte pte = pagePhys|blankPte; |
|
1930 |
__NK_ASSERT_DEBUG(((pPte[pteIndex]^pte)&~KPteMatchMask)==0); |
|
1931 |
#endif |
|
1932 |
} |
|
1933 |
||
1934 |
// move on to next page... |
|
1935 |
++pteIndex; |
|
1936 |
__NK_ASSERT_ALWAYS(pteIndex<(KChunkSize>>KPageShift)); |
|
1937 |
++pageIndex; |
|
1938 |
if(size) |
|
1939 |
size -= KPageSize; |
|
1940 |
} |
|
1941 |
||
1942 |
MmuLock::Unlock(); |
|
1943 |
||
1944 |
// release page array entries... |
|
1945 |
iPages.AddEnd(0,aSize>>KPageShift); |
|
1946 |
||
1947 |
return KErrNone; |
|
1948 |
} |
|
1949 |
||
1950 |
||
1951 |