|
1 // Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of the License "Eclipse Public License v1.0" |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: |
|
14 // |
|
15 |
|
16 #include "memmodel.h" |
|
17 #include "kernel/cache_maintenance.inl" |
|
18 #include <kernel/cache.h> |
|
19 #include <ramalloc.h> |
|
20 #include <defrag.h> |
|
21 #include "mm.h" |
|
22 #include "mmu.h" |
|
23 #include "mpager.h" |
|
24 #include "mmapping.h" |
|
25 #include "mobject.h" |
|
26 #include "mmanager.h" |
|
27 #include "mpagearray.h" |
|
28 |
|
29 |
|
30 // |
|
31 // SPageInfo |
|
32 // |
|
33 |
|
34 // check enough space for page infos... |
|
35 __ASSERT_COMPILE((KPageInfoLinearEnd-KPageInfoLinearBase)/sizeof(SPageInfo)==(1<<(32-KPageShift))); |
|
36 |
|
37 // check KPageInfoShift... |
|
38 __ASSERT_COMPILE(sizeof(SPageInfo)==(1<<KPageInfoShift)); |
|
39 |
|
40 |
|
41 SPageInfo* SPageInfo::SafeFromPhysAddr(TPhysAddr aAddress) |
|
42 { |
|
43 __NK_ASSERT_DEBUG((aAddress&KPageMask)==0); |
|
44 TUint index = aAddress>>(KPageShift+KPageShift-KPageInfoShift); |
|
45 TUint flags = ((TUint8*)KPageInfoMap)[index>>3]; |
|
46 TUint mask = 1<<(index&7); |
|
47 if(!(flags&mask)) |
|
48 return 0; // no SPageInfo for aAddress |
|
49 SPageInfo* info = FromPhysAddr(aAddress); |
|
50 if(info->iType==SPageInfo::EInvalid) |
|
51 return 0; |
|
52 return info; |
|
53 } |
|
54 |
|
55 |
|
56 #ifdef _DEBUG |
|
57 |
|
58 void SPageInfo::CheckAccess(const char* aMessage, TUint aFlags) |
|
59 { |
|
60 if(K::Initialising || NKern::Crashed()) |
|
61 return; |
|
62 |
|
63 if((aFlags&ECheckNotAllocated) && (iType!=EUnknown)) |
|
64 { |
|
65 Kern::Printf("SPageInfo[0x%08x]::CheckAccess failed, PhysAddr()=0x%08x, iType==%d : %s",this,PhysAddr(),iType,aMessage); |
|
66 __NK_ASSERT_DEBUG(0); |
|
67 goto fail; |
|
68 } |
|
69 |
|
70 if((aFlags&ECheckNotUnused) && (iType==EUnused)) |
|
71 { |
|
72 Kern::Printf("SPageInfo[0x%08x]::CheckAccess failed, PhysAddr()=0x%08x, iType==%d : %s",this,PhysAddr(),iType,aMessage); |
|
73 __NK_ASSERT_DEBUG(0); |
|
74 goto fail; |
|
75 } |
|
76 |
|
77 if((aFlags&ECheckUnused) && (iType!=EUnused)) |
|
78 { |
|
79 Kern::Printf("SPageInfo[0x%08x]::CheckAccess failed, PhysAddr()=0x%08x, iType==%d : %s",this,PhysAddr(),iType,aMessage); |
|
80 __NK_ASSERT_DEBUG(0); |
|
81 goto fail; |
|
82 } |
|
83 |
|
84 if((aFlags&ECheckNotPaged) && (iPagedState!=EUnpaged)) |
|
85 { |
|
86 Kern::Printf("SPageInfo[0x%08x]::CheckAccess failed, PhysAddr()=0x%08x, iPagedState=%d : %s",this,PhysAddr(),iPagedState,aMessage); |
|
87 __NK_ASSERT_DEBUG(0); |
|
88 goto fail; |
|
89 } |
|
90 |
|
91 if((aFlags&ECheckRamAllocLock) && !RamAllocLock::IsHeld()) |
|
92 { |
|
93 Kern::Printf("SPageInfo[0x%08x]::CheckAccess failed, PhysAddr()=0x%08x, iType==%d : %s",this,PhysAddr(),iType,aMessage); |
|
94 __NK_ASSERT_DEBUG(0); |
|
95 goto fail; |
|
96 } |
|
97 |
|
98 if((aFlags&ENoCheckMmuLock) || MmuLock::IsHeld()) |
|
99 return; |
|
100 fail: |
|
101 Kern::Printf("SPageInfo[0x%08x]::CheckAccess failed, PhysAddr()=0x%08x : %s",this,PhysAddr(),aMessage); |
|
102 Mmu::Panic(Mmu::EUnsafePageInfoAccess); |
|
103 } |
|
104 |
|
105 |
|
106 void SPageInfo::Dump() |
|
107 { |
|
108 Kern::Printf("SPageInfo for page %x = %d,%d,%02x,0x%08x,0x%x,%d",PhysAddr(),iType,iPagedState,iFlags,iOwner,iIndex,iPinCount); |
|
109 } |
|
110 |
|
111 #endif |
|
112 |
|
113 |
|
114 |
|
115 // |
|
116 // SPageTableInfo |
|
117 // |
|
118 |
|
119 // check enough space for page table infos... |
|
120 __ASSERT_COMPILE((KPageTableInfoEnd-KPageTableInfoBase)/sizeof(SPageTableInfo) |
|
121 >=(KPageTableEnd-KPageTableBase)/KPageTableSize); |
|
122 |
|
123 // check KPtBlockShift... |
|
124 __ASSERT_COMPILE((sizeof(SPageTableInfo)<<KPtBlockShift)==KPageSize); |
|
125 |
|
126 |
|
127 #ifdef _DEBUG |
|
128 |
|
129 TBool SPageTableInfo::CheckPageCount() |
|
130 { |
|
131 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
132 TPte* pt = PageTable(); |
|
133 TUint realCount = 0; |
|
134 do if(*pt++) ++realCount; |
|
135 while(TLinAddr(pt)&(KPageTableMask/sizeof(TPte)*sizeof(TPte))); |
|
136 if(iPageCount==realCount) |
|
137 return true; |
|
138 Kern::Printf("CheckPageCount Failed: pt=0x%08x count=%d realCount=%d",TLinAddr(pt)-KPageTableSize,iPageCount,realCount); |
|
139 return false; |
|
140 } |
|
141 |
|
142 |
|
143 void SPageTableInfo::CheckChangeUse(const char* aName) |
|
144 { |
|
145 if(K::Initialising) |
|
146 return; |
|
147 if(PageTablesLockIsHeld() && MmuLock::IsHeld()) |
|
148 return; |
|
149 Kern::Printf("SPageTableInfo::CheckChangeUse failed : %s",aName); |
|
150 Mmu::Panic(Mmu::EUnsafePageTableInfoAccess); |
|
151 } |
|
152 |
|
153 |
|
154 void SPageTableInfo::CheckCheckUse(const char* aName) |
|
155 { |
|
156 if(K::Initialising) |
|
157 return; |
|
158 if(PageTablesLockIsHeld() || MmuLock::IsHeld()) |
|
159 return; |
|
160 Kern::Printf("SPageTableInfo::CheckCheckUse failed : %s",aName); |
|
161 Mmu::Panic(Mmu::EUnsafePageTableInfoAccess); |
|
162 } |
|
163 |
|
164 |
|
165 void SPageTableInfo::CheckAccess(const char* aName) |
|
166 { |
|
167 if(K::Initialising) |
|
168 return; |
|
169 if(MmuLock::IsHeld()) |
|
170 return; |
|
171 Kern::Printf("SPageTableInfo::CheckAccess failed : %s",aName); |
|
172 Mmu::Panic(Mmu::EUnsafePageTableInfoAccess); |
|
173 } |
|
174 |
|
175 |
|
176 void SPageTableInfo::CheckInit(const char* aName) |
|
177 { |
|
178 if(K::Initialising) |
|
179 return; |
|
180 if(PageTablesLockIsHeld() && iType==EUnused) |
|
181 return; |
|
182 Kern::Printf("SPageTableInfo::CheckInit failed : %s",aName); |
|
183 Mmu::Panic(Mmu::EUnsafePageTableInfoAccess); |
|
184 } |
|
185 |
|
186 #endif |
|
187 |
|
188 |
|
189 |
|
190 // |
|
191 // RamAllocLock |
|
192 // |
|
193 |
|
194 _LIT(KLitRamAlloc,"RamAlloc"); |
|
195 _LIT(KLitPhysMemSync,"PhysMemSync"); |
|
196 |
|
197 void RamAllocLock::Lock() |
|
198 { |
|
199 Mmu& m = TheMmu; |
|
200 Kern::MutexWait(*m.iRamAllocatorMutex); |
|
201 if(!m.iRamAllocLockCount++) |
|
202 { |
|
203 // first lock, so setup memory fail data... |
|
204 m.iRamAllocFailed = EFalse; |
|
205 __NK_ASSERT_DEBUG(m.iRamAllocInitialFreePages==m.FreeRamInPages()); // free RAM shouldn't have changed whilst lock was held |
|
206 } |
|
207 } |
|
208 |
|
209 |
|
210 void RamAllocLock::Unlock() |
|
211 { |
|
212 Mmu& m = TheMmu; |
|
213 if(--m.iRamAllocLockCount) |
|
214 { |
|
215 Kern::MutexSignal(*m.iRamAllocatorMutex); |
|
216 return; |
|
217 } |
|
218 TBool failed = m.iRamAllocFailed; |
|
219 TUint initial = m.iRamAllocInitialFreePages; |
|
220 TUint final = m.FreeRamInPages(); |
|
221 m.iRamAllocInitialFreePages = final; // new baseline value |
|
222 TUint changes = K::CheckFreeMemoryLevel(initial*KPageSize,final*KPageSize,failed); |
|
223 if(changes) |
|
224 { |
|
225 __KTRACE_OPT(KMMU,Kern::Printf("RamAllocLock::Unlock() changes=%x",changes)); |
|
226 } |
|
227 Kern::MutexSignal(*m.iRamAllocatorMutex); |
|
228 } |
|
229 |
|
230 |
|
231 TBool RamAllocLock::Flash() |
|
232 { |
|
233 Unlock(); |
|
234 Lock(); |
|
235 return true; // lock was released |
|
236 } |
|
237 |
|
238 |
|
239 TBool RamAllocLock::IsHeld() |
|
240 { |
|
241 Mmu& m = TheMmu; |
|
242 return m.iRamAllocatorMutex->iCleanup.iThread == &Kern::CurrentThread() && m.iRamAllocLockCount; |
|
243 } |
|
244 |
|
245 |
|
246 |
|
247 // |
|
248 // MmuLock |
|
249 // |
|
250 |
|
251 #ifdef _DEBUG |
|
252 TUint MmuLock::UnlockGuardNest =0; |
|
253 TUint MmuLock::UnlockGuardFail =0; |
|
254 #endif |
|
255 |
|
256 NFastMutex MmuLock::iLock; |
|
257 |
|
258 void MmuLock::Lock() |
|
259 { |
|
260 NKern::FMWait(&iLock); |
|
261 } |
|
262 |
|
263 void MmuLock::Unlock() |
|
264 { |
|
265 UnlockGuardCheck(); |
|
266 NKern::FMSignal(&iLock); |
|
267 } |
|
268 |
|
269 TBool MmuLock::Flash() |
|
270 { |
|
271 UnlockGuardCheck(); |
|
272 return NKern::FMFlash(&iLock); |
|
273 } |
|
274 |
|
275 TBool MmuLock::IsHeld() |
|
276 { |
|
277 NFastMutex& m = iLock; |
|
278 return m.HeldByCurrentThread(); |
|
279 } |
|
280 |
|
281 |
|
282 |
|
283 // |
|
284 // Initialisation |
|
285 // |
|
286 |
|
287 Mmu TheMmu; |
|
288 |
|
289 void Mmu::Init1Common() |
|
290 { |
|
291 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("Mmu::Init1Common")); |
|
292 |
|
293 // Mmu data |
|
294 TUint pteType = PteType(ESupervisorReadWrite,true); |
|
295 iTempPteCached = BlankPte((TMemoryAttributes)(EMemoryAttributeNormalCached|EMemoryAttributeDefaultShareable),pteType); |
|
296 iTempPteUncached = BlankPte((TMemoryAttributes)(EMemoryAttributeNormalUncached|EMemoryAttributeDefaultShareable),pteType); |
|
297 iTempPteCacheMaintenance = BlankPte((TMemoryAttributes)(CacheMaintenance::TemporaryMapping()|EMemoryAttributeDefaultShareable),pteType); |
|
298 |
|
299 // other |
|
300 PP::MaxUserThreadStack=0x14000; // 80K - STDLIB asks for 64K for PosixServer!!!! |
|
301 PP::UserThreadStackGuard=0x2000; // 8K |
|
302 PP::MaxStackSpacePerProcess=0x200000; // 2Mb |
|
303 K::SupervisorThreadStackSize=0x1000; // 4K |
|
304 PP::SupervisorThreadStackGuard=0x1000; // 4K |
|
305 K::MachineConfig=(TMachineConfig*)KMachineConfigLinAddr; |
|
306 PP::RamDriveStartAddress=0; |
|
307 PP::RamDriveRange=0; |
|
308 PP::RamDriveMaxSize=0x20000000; // 512MB, probably will be reduced later |
|
309 K::MemModelAttributes=EMemModelTypeFlexible|EMemModelAttrNonExProt|EMemModelAttrKernProt|EMemModelAttrWriteProt| |
|
310 EMemModelAttrVA|EMemModelAttrProcessProt|EMemModelAttrSameVA|EMemModelAttrSvKernProt| |
|
311 EMemModelAttrIPCKernProt|EMemModelAttrRamCodeProt; |
|
312 } |
|
313 |
|
314 |
|
315 #if 0 |
|
316 void Mmu::VerifyRam() |
|
317 { |
|
318 Kern::Printf("Mmu::VerifyRam() pass 1"); |
|
319 RamAllocLock::Lock(); |
|
320 |
|
321 TPhysAddr p = 0; |
|
322 do |
|
323 { |
|
324 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(p); |
|
325 if(pi) |
|
326 { |
|
327 Kern::Printf("%08x %d",p,pi->Type()); |
|
328 if(pi->Type()==SPageInfo::EUnused) |
|
329 { |
|
330 volatile TPhysAddr* b = (volatile TPhysAddr*)MapTemp(p,0); |
|
331 b[0] = p; |
|
332 b[1] = ~p; |
|
333 __NK_ASSERT_DEBUG(b[0]==p); |
|
334 __NK_ASSERT_DEBUG(b[1]==~p); |
|
335 UnmapTemp(); |
|
336 } |
|
337 } |
|
338 p += KPageSize; |
|
339 } |
|
340 while(p); |
|
341 |
|
342 TBool fail = false; |
|
343 Kern::Printf("Mmu::VerifyRam() pass 2"); |
|
344 do |
|
345 { |
|
346 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(p); |
|
347 if(pi) |
|
348 { |
|
349 if(pi->Type()==SPageInfo::EUnused) |
|
350 { |
|
351 volatile TPhysAddr* b = (volatile TPhysAddr*)MapTemp(p,0); |
|
352 if(b[0]!=p || b[1]!=~p) |
|
353 { |
|
354 fail = true; |
|
355 Kern::Printf("%08x FAILED %x %x",b[0],b[1]); |
|
356 } |
|
357 UnmapTemp(); |
|
358 } |
|
359 } |
|
360 p += KPageSize; |
|
361 } |
|
362 while(p); |
|
363 |
|
364 __NK_ASSERT_DEBUG(!fail); |
|
365 RamAllocLock::Unlock(); |
|
366 } |
|
367 #endif |
|
368 |
|
369 |
|
370 void Mmu::Init2Common() |
|
371 { |
|
372 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("Mmu::Init2Common")); |
|
373 |
|
374 // create allocator... |
|
375 const SRamInfo& info = *(const SRamInfo*)TheSuperPage().iRamBootData; |
|
376 iRamPageAllocator = DRamAllocator::New(info, iRamZones, iRamZoneCallback); |
|
377 |
|
378 // initialise all pages in banks as unused... |
|
379 const SRamBank* bank = info.iBanks; |
|
380 while(bank->iSize) |
|
381 { |
|
382 TUint32 base = bank->iBase; |
|
383 TUint32 size = bank->iSize; |
|
384 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("Found RAM bank 0x%08x size %d",base,size)); |
|
385 if(base+size<=base || ((base|size)&KPageMask)) |
|
386 Panic(EInvalidRamBankAtBoot); |
|
387 |
|
388 SPageInfo* pi = SPageInfo::FromPhysAddr(base); |
|
389 SPageInfo* piEnd = pi+(size>>KPageShift); |
|
390 while(pi<piEnd) |
|
391 (pi++)->SetUnused(); |
|
392 ++bank; |
|
393 } |
|
394 // step over the last bank to get to the reserved banks. |
|
395 ++bank; |
|
396 // mark any reserved regions as allocated... |
|
397 while(bank->iSize) |
|
398 { |
|
399 TUint32 base = bank->iBase; |
|
400 TUint32 size = bank->iSize; |
|
401 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("Found reserved bank 0x%08x size %d",base,size)); |
|
402 if(base+size<=base || ((base|size)&KPageMask)) |
|
403 Panic(EInvalidReservedBankAtBoot); |
|
404 |
|
405 SPageInfo* pi = SPageInfo::FromPhysAddr(base); |
|
406 SPageInfo* piEnd = pi+(size>>KPageShift); |
|
407 while(pi<piEnd) |
|
408 (pi++)->SetPhysAlloc(); |
|
409 ++bank; |
|
410 } |
|
411 |
|
412 // Clear the inital (and only so far) page table info page so all unused |
|
413 // page tables infos will be marked as unused. |
|
414 __ASSERT_COMPILE(SPageTableInfo::EUnused == 0); |
|
415 memclr((TAny*)KPageTableInfoBase, KPageSize); |
|
416 |
|
417 // look for page tables - assume first page table maps page tables |
|
418 TPte* pPte = (TPte*)KPageTableBase; |
|
419 TInt i; |
|
420 for(i=0; i<KChunkSize/KPageSize; ++i) |
|
421 { |
|
422 TPte pte = *pPte++; |
|
423 if(pte==KPteUnallocatedEntry) // after boot, page tables are contiguous |
|
424 break; |
|
425 TPhysAddr ptpgPhys = Mmu::PtePhysAddr(pte,i); |
|
426 __KTRACE_OPT(KBOOT,Kern::Printf("Page Table Group %08x -> Phys %08x", KPageTableBase+i*KPageSize, ptpgPhys)); |
|
427 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(ptpgPhys); |
|
428 __ASSERT_ALWAYS(pi, Panic(EInvalidPageTableAtBoot)); |
|
429 pi->SetFixed(i); // this also sets the SPageInfo::iOffset so that linear-to-physical works |
|
430 } |
|
431 |
|
432 // look for mapped pages |
|
433 TPde* pd = Mmu::PageDirectory(KKernelOsAsid); |
|
434 for(i=0; i<(1<<(32-KChunkShift)); ++i) |
|
435 { |
|
436 TPde pde = pd[i]; |
|
437 if(pde==KPdeUnallocatedEntry) |
|
438 continue; |
|
439 TPhysAddr pdePhys = Mmu::PdePhysAddr(pde); |
|
440 TPte* pt = 0; |
|
441 if(pdePhys!=KPhysAddrInvalid) |
|
442 { |
|
443 __KTRACE_OPT(KBOOT,Kern::Printf("Addr %08x -> Whole PDE Phys %08x", i<<KChunkShift, pdePhys)); |
|
444 } |
|
445 else |
|
446 { |
|
447 pt = Mmu::PageTableFromPde(pde); |
|
448 __KTRACE_OPT(KBOOT,Kern::Printf("Addr %08x -> page table %08x", i<<KChunkShift, pt)); |
|
449 __ASSERT_ALWAYS(pt,Panic(EInvalidPdeAtBoot)); // bad PDE |
|
450 } |
|
451 |
|
452 TInt j; |
|
453 TInt np = 0; |
|
454 for(j=0; j<KChunkSize/KPageSize; ++j) |
|
455 { |
|
456 TBool present = ETrue; // all pages present if whole PDE mapping |
|
457 TPte pte = 0; |
|
458 if(pt) |
|
459 { |
|
460 pte = pt[j]; |
|
461 present = pte!=KPteUnallocatedEntry; |
|
462 } |
|
463 if(present) |
|
464 { |
|
465 ++np; |
|
466 TPhysAddr pa = pt ? Mmu::PtePhysAddr(pte,j) : (pdePhys + (j<<KPageShift)); |
|
467 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pa); |
|
468 __KTRACE_OPT(KBOOT,Kern::Printf("Addr: %08x PA=%08x", |
|
469 (i<<KChunkShift)+(j<<KPageShift), pa)); |
|
470 if(pi) // ignore non-RAM mappings |
|
471 { |
|
472 TInt r = iRamPageAllocator->MarkPageAllocated(pa, EPageFixed); |
|
473 // allow KErrAlreadyExists since it's possible that a page is doubly mapped |
|
474 __ASSERT_ALWAYS(r==KErrNone || r==KErrAlreadyExists, Panic(EBadMappedPageAfterBoot)); |
|
475 if(pi->Type()==SPageInfo::EUnused) |
|
476 pi->SetFixed(); |
|
477 } |
|
478 } |
|
479 } |
|
480 __KTRACE_OPT(KBOOT,Kern::Printf("Addr: %08x #PTEs=%d",(i<<KChunkShift),np)); |
|
481 if(pt) |
|
482 { |
|
483 SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt); |
|
484 pti->Boot(np); |
|
485 } |
|
486 } |
|
487 |
|
488 TInt r = K::MutexCreate(iRamAllocatorMutex, KLitRamAlloc, NULL, EFalse, KMutexOrdRamAlloc); |
|
489 if(r!=KErrNone) |
|
490 Panic(ERamAllocMutexCreateFailed); |
|
491 iRamAllocLockCount = 0; |
|
492 iRamAllocInitialFreePages = FreeRamInPages(); |
|
493 |
|
494 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("Mmu::DoInit2")); |
|
495 |
|
496 for(i=0; i<KNumTempMappingSlots; ++i) |
|
497 iTempMap[i].Alloc(1); |
|
498 |
|
499 iPhysMemSyncTemp.Alloc(1); |
|
500 r = K::MutexCreate(iPhysMemSyncMutex, KLitPhysMemSync, NULL, EFalse, KMutexOrdSyncPhysMem); |
|
501 if(r!=KErrNone) |
|
502 Panic(EPhysMemSyncMutexCreateFailed); |
|
503 // VerifyRam(); |
|
504 } |
|
505 |
|
506 |
|
507 void Mmu::Init2FinalCommon() |
|
508 { |
|
509 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("Mmu::Init2FinalCommon")); |
|
510 // hack, reduce free memory to <2GB... |
|
511 while(FreeRamInPages()>=0x80000000/KPageSize) |
|
512 { |
|
513 TPhysAddr dummyPage; |
|
514 TInt r = iRamPageAllocator->AllocRamPages(&dummyPage,1, EPageFixed); |
|
515 __NK_ASSERT_ALWAYS(r==KErrNone); |
|
516 } |
|
517 // hack, reduce total RAM to <2GB... |
|
518 if(TheSuperPage().iTotalRamSize<0) |
|
519 TheSuperPage().iTotalRamSize = 0x80000000-KPageSize; |
|
520 |
|
521 // Save current free RAM size - there can never be more free RAM than this |
|
522 TUint maxFreePages = FreeRamInPages(); |
|
523 K::MaxFreeRam = maxFreePages*KPageSize; |
|
524 if(maxFreePages < (TUint(PP::RamDriveMaxSize)>>KPageShift)) |
|
525 PP::RamDriveMaxSize = maxFreePages*KPageSize; |
|
526 |
|
527 // update this to stop assert triggering in RamAllocLock::Lock() |
|
528 iRamAllocInitialFreePages = maxFreePages; |
|
529 } |
|
530 |
|
531 |
|
532 void Mmu::Init3() |
|
533 { |
|
534 iDefrag = new Defrag; |
|
535 if (!iDefrag) |
|
536 Panic(EDefragAllocFailed); |
|
537 iDefrag->Init3(TheMmu.iRamPageAllocator); |
|
538 } |
|
539 |
|
540 // |
|
541 // Utils |
|
542 // |
|
543 |
|
544 void Mmu::Panic(TPanic aPanic) |
|
545 { |
|
546 Kern::Fault("MMU",aPanic); |
|
547 } |
|
548 |
|
549 |
|
550 TUint Mmu::FreeRamInPages() |
|
551 { |
|
552 return iRamPageAllocator->FreeRamInPages()+ThePager.NumberOfFreePages(); |
|
553 } |
|
554 |
|
555 |
|
556 TUint Mmu::TotalPhysicalRamPages() |
|
557 { |
|
558 return iRamPageAllocator->TotalPhysicalRamPages(); |
|
559 } |
|
560 |
|
561 |
|
562 const SRamZone* Mmu::RamZoneConfig(TRamZoneCallback& aCallback) const |
|
563 { |
|
564 aCallback = iRamZoneCallback; |
|
565 return iRamZones; |
|
566 } |
|
567 |
|
568 |
|
569 void Mmu::SetRamZoneConfig(const SRamZone* aZones, TRamZoneCallback aCallback) |
|
570 { |
|
571 iRamZones = aZones; |
|
572 iRamZoneCallback = aCallback; |
|
573 } |
|
574 |
|
575 |
|
576 TInt Mmu::ModifyRamZoneFlags(TUint aId, TUint aClearMask, TUint aSetMask) |
|
577 { |
|
578 return iRamPageAllocator->ModifyZoneFlags(aId, aClearMask, aSetMask); |
|
579 } |
|
580 |
|
581 |
|
582 TInt Mmu::GetRamZonePageCount(TUint aId, SRamZonePageCount& aPageData) |
|
583 { |
|
584 return iRamPageAllocator->GetZonePageCount(aId, aPageData); |
|
585 } |
|
586 |
|
587 |
|
588 TInt Mmu::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aBytes, TPhysAddr& aPhysAddr, TInt aAlign) |
|
589 { |
|
590 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam(?,%d,%d,?,%d)", aZoneIdCount, aBytes, aPhysAddr, aAlign)); |
|
591 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
|
592 |
|
593 TInt r = iRamPageAllocator->ZoneAllocContiguousRam(aZoneIdList, aZoneIdCount, aBytes, aPhysAddr, EPageFixed, aAlign); |
|
594 if(r!=KErrNone) |
|
595 iRamAllocFailed = ETrue; |
|
596 else |
|
597 { |
|
598 TUint pages = MM::RoundToPageCount(aBytes); |
|
599 AllocatedPhysicalRam(aPhysAddr, pages, (Mmu::TRamAllocFlags)EMemAttStronglyOrdered); |
|
600 } |
|
601 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam returns %d and aPhysAddr=0x%08x",r,aPhysAddr)); |
|
602 return r; |
|
603 } |
|
604 |
|
605 |
|
606 TInt Mmu::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aNumPages, TPhysAddr* aPageList) |
|
607 { |
|
608 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam(?,%d,%d,?)", aZoneIdCount, aNumPages)); |
|
609 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
|
610 |
|
611 TInt r = iRamPageAllocator->ZoneAllocRamPages(aZoneIdList, aZoneIdCount, aPageList, aNumPages, EPageFixed); |
|
612 if(r!=KErrNone) |
|
613 iRamAllocFailed = ETrue; |
|
614 else |
|
615 { |
|
616 PagesAllocated(aPageList, aNumPages, (Mmu::TRamAllocFlags)EMemAttStronglyOrdered); |
|
617 |
|
618 // update page infos... |
|
619 TUint flash = 0; |
|
620 TPhysAddr* pageEnd = aPageList + aNumPages; |
|
621 MmuLock::Lock(); |
|
622 TPhysAddr* page = aPageList; |
|
623 while (page < pageEnd) |
|
624 { |
|
625 MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo/2); |
|
626 TPhysAddr pagePhys = *page++; |
|
627 __NK_ASSERT_DEBUG(pagePhys != KPhysAddrInvalid); |
|
628 SPageInfo::FromPhysAddr(pagePhys)->SetPhysAlloc(); |
|
629 } |
|
630 MmuLock::Unlock(); |
|
631 } |
|
632 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam returns %d",r)); |
|
633 return r; |
|
634 } |
|
635 |
|
636 |
|
637 TInt Mmu::RamHalFunction(TInt aFunction, TAny* a1, TAny* a2) |
|
638 { |
|
639 // This function should only be registered with hal and therefore can only |
|
640 // be invoked after the ram allocator has been created. |
|
641 __NK_ASSERT_DEBUG(iRamPageAllocator); |
|
642 return iRamPageAllocator->HalFunction(aFunction, a1, a2); |
|
643 } |
|
644 |
|
645 |
|
646 void Mmu::ChangePageType(SPageInfo* aPageInfo, TZonePageType aOldPageType, TZonePageType aNewPageType) |
|
647 { |
|
648 iRamPageAllocator->ChangePageType(aPageInfo, aOldPageType, aNewPageType); |
|
649 } |
|
650 |
|
651 TInt Mmu::HandlePageFault(TLinAddr aPc, TLinAddr aFaultAddress, TUint aAccessPermissions, TAny* aExceptionInfo) |
|
652 { |
|
653 TRACE(("Mmu::HandlePageFault(0x%08x,0x%08x,%d)",aPc,aFaultAddress,aAccessPermissions)); |
|
654 |
|
655 DMemModelThread* thread = (DMemModelThread*)TheCurrentThread; |
|
656 // Get the os asid of the process taking the fault, no need to open a reference |
|
657 // as it is the current thread's process so can't be freed. |
|
658 TUint faultOsAsid = ((DMemModelProcess*)thread->iNThread.iAddressSpace)->OsAsid(); |
|
659 |
|
660 // check if any fast mutexes held... |
|
661 NFastMutex* fm = NKern::HeldFastMutex(); |
|
662 TPagingExcTrap* trap = thread->iPagingExcTrap; |
|
663 if(fm) |
|
664 { |
|
665 // check there is an XTRAP_PAGING in effect... |
|
666 if(!trap) |
|
667 { |
|
668 // oops, kill system... |
|
669 __KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("Fault with FM Held! addr=0x%08x (%O pc=%x)",aFaultAddress,thread,aPc)); |
|
670 Exc::Fault(aExceptionInfo); |
|
671 } |
|
672 |
|
673 // release the fast mutex... |
|
674 NKern::FMSignal(fm); |
|
675 } |
|
676 |
|
677 NKern::ThreadEnterCS(); |
|
678 |
|
679 // work out address space for aFaultAddress... |
|
680 TUint osAsid = faultOsAsid; |
|
681 TLinAddr addr = aFaultAddress; |
|
682 if(thread->iAliasLinAddr && TUint(addr - thread->iAliasLinAddr) < TUint(KPageSize)) |
|
683 { |
|
684 // Address in aliased memory... |
|
685 addr = (addr - thread->iAliasLinAddr) + thread->iAliasTarget; |
|
686 // Get the os asid of the process thread is aliasing, no need to open |
|
687 // a reference on it as one was already opened when the alias was created. |
|
688 osAsid = thread->iAliasProcess->OsAsid(); |
|
689 } |
|
690 else if(addr>=KGlobalMemoryBase) |
|
691 { |
|
692 // Address in global region, so look it up in kernel's address space... |
|
693 osAsid = KKernelOsAsid; |
|
694 } |
|
695 |
|
696 // NOTE, osAsid will remain valid for duration of this function because it is either |
|
697 // - The current thread's address space, which can't go away whilst the thread |
|
698 // is running. |
|
699 // - The address space of another thread which we are aliasing memory from, |
|
700 // and we would only do this if we have a reference on this other thread, |
|
701 // which has a reference on it's process, which should own the address space! |
|
702 |
|
703 #ifdef __BROADCAST_CACHE_MAINTENANCE__ |
|
704 if (thread->iAliasLinAddr) |
|
705 { |
|
706 // If an alias is in effect, the the thread will be locked to the current CPU, |
|
707 // but we need to be able to migrate between CPUs for cache maintainance. This |
|
708 // must be dealt with by removing the alias and restoring it with a paging trap |
|
709 // handler. |
|
710 if(!trap) |
|
711 { |
|
712 // oops, kill system... |
|
713 __KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("Fault with thread locked to current CPU! addr=0x%08x (%O pc=%x)",aFaultAddress,thread,aPc)); |
|
714 Exc::Fault(aExceptionInfo); |
|
715 } |
|
716 thread->RemoveAlias(); |
|
717 } |
|
718 #endif |
|
719 |
|
720 // find mapping... |
|
721 TUint offsetInMapping; |
|
722 TUint mapInstanceCount; |
|
723 DMemoryMapping* mapping = MM::FindMappingInAddressSpace(osAsid, addr, 1, offsetInMapping, mapInstanceCount); |
|
724 // TRACE(("%O mapping=0x%08x",TheCurrentThread,mapping)); |
|
725 TInt r = KErrNotFound; |
|
726 |
|
727 if(mapping) |
|
728 { |
|
729 // Pinning mappings should not be found from within an address space. |
|
730 __NK_ASSERT_DEBUG(!mapping->IsPinned()); |
|
731 MmuLock::Lock(); |
|
732 |
|
733 // check if we need to process page fault... |
|
734 if(!Mmu::CheckPteTypePermissions(mapping->PteType(),aAccessPermissions) || |
|
735 mapInstanceCount != mapping->MapInstanceCount()) |
|
736 { |
|
737 // Invalid access to the page. |
|
738 MmuLock::Unlock(); |
|
739 r = KErrAbort; |
|
740 } |
|
741 else |
|
742 { |
|
743 // we do need to handle fault so is this a demand paging or page moving fault |
|
744 DMemoryObject* memory = mapping->Memory(); |
|
745 if(!memory) |
|
746 MmuLock::Unlock(); |
|
747 else |
|
748 { |
|
749 TUint faultIndex = (offsetInMapping >> KPageShift) + mapping->iStartIndex; |
|
750 memory->Open(); |
|
751 |
|
752 // This is safe as we have the instance count so can detect the mapping |
|
753 // being reused and we have a reference to the memory object so it can't |
|
754 // be deleted. |
|
755 MmuLock::Unlock(); |
|
756 |
|
757 if(memory->IsDemandPaged()) |
|
758 { |
|
759 // Let the pager handle the fault... |
|
760 r = ThePager.HandlePageFault( aPc, aFaultAddress, faultOsAsid, faultIndex, |
|
761 aAccessPermissions, memory, mapping, mapInstanceCount, |
|
762 thread, aExceptionInfo); |
|
763 } |
|
764 else |
|
765 {// The page could be being moved so verify that with its manager. |
|
766 DMemoryManager* manager = memory->iManager; |
|
767 r = manager->HandleFault(memory, faultIndex, mapping, mapInstanceCount, aAccessPermissions); |
|
768 } |
|
769 if (r == KErrNone) |
|
770 {// alias PDE needs updating because page tables have changed... |
|
771 thread->RefreshAlias(); |
|
772 } |
|
773 memory->Close(); |
|
774 } |
|
775 } |
|
776 mapping->Close(); |
|
777 } |
|
778 |
|
779 if (trap) |
|
780 { |
|
781 // restore address space (because the trap will bypass any code |
|
782 // which would have done this.)... |
|
783 DMemModelThread::RestoreAddressSpace(); |
|
784 } |
|
785 |
|
786 NKern::ThreadLeaveCS(); // thread will die now if CheckRealtimeThreadFault caused a panic |
|
787 |
|
788 // deal with XTRAP_PAGING... |
|
789 if(trap) |
|
790 { |
|
791 // re-acquire any fast mutex which was held before the page fault... |
|
792 if(fm) |
|
793 NKern::FMWait(fm); |
|
794 if (r == KErrNone) |
|
795 { |
|
796 trap->Exception(1); // return from exception trap with result '1' (value>0) |
|
797 // code doesn't continue beyond this point. |
|
798 __NK_ASSERT_DEBUG(0); |
|
799 } |
|
800 } |
|
801 |
|
802 return r; |
|
803 } |
|
804 |
|
805 |
|
806 // |
|
807 // Memory allocation |
|
808 // |
|
809 |
|
810 TInt Mmu::AllocRam( TPhysAddr* aPages, TUint aCount, TRamAllocFlags aFlags, TZonePageType aZonePageType, |
|
811 TUint aBlockZoneId, TBool aBlockRest) |
|
812 { |
|
813 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocRam(?,%d,%x)",aCount,aFlags)); |
|
814 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
|
815 #ifdef _DEBUG |
|
816 if(K::CheckForSimulatedAllocFail()) |
|
817 { |
|
818 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocRam returns simulated OOM %d",KErrNoMemory)); |
|
819 return KErrNoMemory; |
|
820 } |
|
821 #endif |
|
822 TInt missing = iRamPageAllocator->AllocRamPages(aPages, aCount, aZonePageType, aBlockZoneId, aBlockRest); |
|
823 if(missing && !(aFlags&EAllocNoPagerReclaim) && ThePager.GetFreePages(missing)) |
|
824 missing = iRamPageAllocator->AllocRamPages(aPages, aCount, aZonePageType, aBlockZoneId, aBlockRest); |
|
825 TInt r = missing ? KErrNoMemory : KErrNone; |
|
826 if(r!=KErrNone) |
|
827 iRamAllocFailed = ETrue; |
|
828 else |
|
829 PagesAllocated(aPages,aCount,aFlags); |
|
830 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocRam returns %d",r)); |
|
831 return r; |
|
832 } |
|
833 |
|
834 |
|
835 void Mmu::FreeRam(TPhysAddr* aPages, TUint aCount, TZonePageType aZonePageType) |
|
836 { |
|
837 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::FreeRam(?,%d)",aCount)); |
|
838 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
|
839 |
|
840 // update page infos... |
|
841 TPhysAddr* pages = aPages; |
|
842 TPhysAddr* pagesEnd = pages+aCount; |
|
843 TPhysAddr* pagesOut = aPages; |
|
844 MmuLock::Lock(); |
|
845 TUint flash = 0; |
|
846 while(pages<pagesEnd) |
|
847 { |
|
848 MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo/2); |
|
849 TPhysAddr pagePhys = *pages++; |
|
850 __NK_ASSERT_DEBUG(pagePhys!=KPhysAddrInvalid); |
|
851 SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys); |
|
852 PageFreed(pi); |
|
853 |
|
854 // If this is an old page of a page being moved that was previously pinned |
|
855 // then make sure it is freed as discardable otherwise despite DPager::DonatePages() |
|
856 // having marked it as discardable it would be freed as movable. |
|
857 __NK_ASSERT_DEBUG(pi->PagedState() != SPageInfo::EPagedPinnedMoved || aCount == 1); |
|
858 if (pi->PagedState() == SPageInfo::EPagedPinnedMoved) |
|
859 aZonePageType = EPageDiscard; |
|
860 |
|
861 if(ThePager.PageFreed(pi)==KErrNone) |
|
862 --aCount; // pager has dealt with this page, so one less for us |
|
863 else |
|
864 { |
|
865 // All paged pages should have been dealt with by the pager above. |
|
866 __NK_ASSERT_DEBUG(pi->PagedState() == SPageInfo::EUnpaged); |
|
867 *pagesOut++ = pagePhys; // store page address for freeing later |
|
868 } |
|
869 } |
|
870 MmuLock::Unlock(); |
|
871 |
|
872 iRamPageAllocator->FreeRamPages(aPages, aCount, aZonePageType); |
|
873 } |
|
874 |
|
875 |
|
876 TInt Mmu::AllocContiguousRam(TPhysAddr& aPhysAddr, TUint aCount, TUint aAlign, TRamAllocFlags aFlags) |
|
877 { |
|
878 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocContiguousRam(?,0x%x,%d,%x)",aCount,aAlign,aFlags)); |
|
879 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
|
880 #ifdef _DEBUG |
|
881 if(K::CheckForSimulatedAllocFail()) |
|
882 { |
|
883 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocContiguousRam returns simulated OOM %d",KErrNoMemory)); |
|
884 return KErrNoMemory; |
|
885 } |
|
886 // Only the page sets EAllocNoPagerReclaim and it shouldn't allocate contiguous ram. |
|
887 __NK_ASSERT_DEBUG(!(aFlags&EAllocNoPagerReclaim)); |
|
888 #endif |
|
889 TInt r = iRamPageAllocator->AllocContiguousRam(aCount, aPhysAddr, EPageFixed, aAlign+KPageShift); |
|
890 if(r==KErrNoMemory && aCount > KMaxFreeableContiguousPages) |
|
891 { |
|
892 // flush paging cache and retry... |
|
893 ThePager.FlushAll(); |
|
894 r = iRamPageAllocator->AllocContiguousRam(aCount, aPhysAddr, EPageFixed, aAlign+KPageShift); |
|
895 } |
|
896 if(r!=KErrNone) |
|
897 iRamAllocFailed = ETrue; |
|
898 else |
|
899 PagesAllocated((TPhysAddr*)(aPhysAddr|1), aCount, aFlags); |
|
900 __KTRACE_OPT(KMMU,Kern::Printf("AllocContiguouseRam returns %d and aPhysAddr=0x%08x",r,aPhysAddr)); |
|
901 return r; |
|
902 } |
|
903 |
|
904 |
|
905 void Mmu::FreeContiguousRam(TPhysAddr aPhysAddr, TUint aCount) |
|
906 { |
|
907 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::FreeContiguousRam(0x%08x,0x%x)",aPhysAddr,aCount)); |
|
908 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
|
909 __NK_ASSERT_DEBUG((aPhysAddr&KPageMask)==0); |
|
910 |
|
911 TUint pageCount = aCount; |
|
912 |
|
913 // update page infos... |
|
914 SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr); |
|
915 SPageInfo* piEnd = pi+pageCount; |
|
916 TUint flash = 0; |
|
917 MmuLock::Lock(); |
|
918 while(pi<piEnd) |
|
919 { |
|
920 MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo); |
|
921 PageFreed(pi++); |
|
922 } |
|
923 MmuLock::Unlock(); |
|
924 |
|
925 // free pages... |
|
926 while(pageCount) |
|
927 { |
|
928 iRamPageAllocator->FreeRamPage(aPhysAddr, EPageFixed); |
|
929 aPhysAddr += KPageSize; |
|
930 --pageCount; |
|
931 } |
|
932 } |
|
933 |
|
934 |
|
935 TInt Mmu::AllocPhysicalRam(TPhysAddr* aPages, TUint aCount, TRamAllocFlags aFlags) |
|
936 { |
|
937 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocPhysicalRam(?,%d,%x)",aCount,aFlags)); |
|
938 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
|
939 // Allocate fixed pages as physically allocated pages aren't movable or discardable. |
|
940 TInt r = AllocRam(aPages, aCount, aFlags, EPageFixed); |
|
941 if (r!=KErrNone) |
|
942 return r; |
|
943 |
|
944 // update page infos... |
|
945 TPhysAddr* pages = aPages; |
|
946 TPhysAddr* pagesEnd = pages+aCount; |
|
947 MmuLock::Lock(); |
|
948 TUint flash = 0; |
|
949 while(pages<pagesEnd) |
|
950 { |
|
951 MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo/2); |
|
952 TPhysAddr pagePhys = *pages++; |
|
953 __NK_ASSERT_DEBUG(pagePhys!=KPhysAddrInvalid); |
|
954 SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys); |
|
955 pi->SetPhysAlloc(); |
|
956 } |
|
957 MmuLock::Unlock(); |
|
958 |
|
959 return KErrNone; |
|
960 } |
|
961 |
|
962 |
|
963 void Mmu::FreePhysicalRam(TPhysAddr* aPages, TUint aCount) |
|
964 { |
|
965 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::FreePhysicalRam(?,%d)",aCount)); |
|
966 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
|
967 |
|
968 // update page infos... |
|
969 TPhysAddr* pages = aPages; |
|
970 TPhysAddr* pagesEnd = pages+aCount; |
|
971 MmuLock::Lock(); |
|
972 TUint flash = 0; |
|
973 while(pages<pagesEnd) |
|
974 { |
|
975 MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo/2); |
|
976 TPhysAddr pagePhys = *pages++; |
|
977 __NK_ASSERT_DEBUG(pagePhys!=KPhysAddrInvalid); |
|
978 SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys); |
|
979 __ASSERT_ALWAYS(pi->Type()==SPageInfo::EPhysAlloc, Panic(EBadFreePhysicalRam)); |
|
980 __ASSERT_ALWAYS(!pi->UseCount(), Panic(EBadFreePhysicalRam)); |
|
981 pi->SetUnused(); |
|
982 } |
|
983 MmuLock::Unlock(); |
|
984 |
|
985 iRamPageAllocator->FreeRamPages(aPages,aCount, EPageFixed); |
|
986 } |
|
987 |
|
988 |
|
989 TInt Mmu::AllocPhysicalRam(TPhysAddr& aPhysAddr, TUint aCount, TUint aAlign, TRamAllocFlags aFlags) |
|
990 { |
|
991 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocPhysicalRam(?,0x%x,d,%x)",aCount,aAlign,aFlags)); |
|
992 TInt r = AllocContiguousRam(aPhysAddr,aCount,aAlign,aFlags); |
|
993 if (r!=KErrNone) |
|
994 return r; |
|
995 |
|
996 // update page infos... |
|
997 SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr); |
|
998 SPageInfo* piEnd = pi+aCount; |
|
999 TUint flash = 0; |
|
1000 MmuLock::Lock(); |
|
1001 while(pi<piEnd) |
|
1002 { |
|
1003 MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo); |
|
1004 pi->SetPhysAlloc(); |
|
1005 ++pi; |
|
1006 } |
|
1007 MmuLock::Unlock(); |
|
1008 |
|
1009 return KErrNone; |
|
1010 } |
|
1011 |
|
1012 |
|
1013 void Mmu::FreePhysicalRam(TPhysAddr aPhysAddr, TUint aCount) |
|
1014 { |
|
1015 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::FreePhysicalRam(0x%08x,0x%x)",aPhysAddr,aCount)); |
|
1016 |
|
1017 // update page infos... |
|
1018 SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr); |
|
1019 SPageInfo* piEnd = pi+aCount; |
|
1020 TUint flash = 0; |
|
1021 MmuLock::Lock(); |
|
1022 while(pi<piEnd) |
|
1023 { |
|
1024 MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo); |
|
1025 __ASSERT_ALWAYS(pi->Type()==SPageInfo::EPhysAlloc, Panic(EBadFreePhysicalRam)); |
|
1026 __ASSERT_ALWAYS(!pi->UseCount(), Panic(EBadFreePhysicalRam)); |
|
1027 pi->SetUnused(); |
|
1028 ++pi; |
|
1029 } |
|
1030 MmuLock::Unlock(); |
|
1031 |
|
1032 iRamPageAllocator->FreePhysicalRam(aPhysAddr, aCount << KPageShift); |
|
1033 } |
|
1034 |
|
1035 |
|
1036 TInt Mmu::ClaimPhysicalRam(TPhysAddr aPhysAddr, TUint aCount, TRamAllocFlags aFlags) |
|
1037 { |
|
1038 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::ClaimPhysicalRam(0x%08x,0x%x,0x%08x)",aPhysAddr,aCount,aFlags)); |
|
1039 aPhysAddr &= ~KPageMask; |
|
1040 TInt r = iRamPageAllocator->ClaimPhysicalRam(aPhysAddr,(aCount << KPageShift)); |
|
1041 if(r!=KErrNone) |
|
1042 return r; |
|
1043 |
|
1044 PagesAllocated((TPhysAddr*)(aPhysAddr|1), aCount, aFlags); |
|
1045 |
|
1046 // update page infos... |
|
1047 SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr); |
|
1048 SPageInfo* piEnd = pi+aCount; |
|
1049 TUint flash = 0; |
|
1050 MmuLock::Lock(); |
|
1051 while(pi<piEnd) |
|
1052 { |
|
1053 MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo); |
|
1054 pi->SetPhysAlloc(); |
|
1055 ++pi; |
|
1056 } |
|
1057 MmuLock::Unlock(); |
|
1058 |
|
1059 return KErrNone; |
|
1060 } |
|
1061 |
|
1062 |
|
1063 void Mmu::AllocatedPhysicalRam(TPhysAddr aPhysAddr, TUint aCount, TRamAllocFlags aFlags) |
|
1064 { |
|
1065 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocatedPhysicalRam(0x%08x,0x%x,d,%x)",aPhysAddr,aCount,aFlags)); |
|
1066 |
|
1067 PagesAllocated((TPhysAddr*)(aPhysAddr|1), aCount, aFlags); |
|
1068 |
|
1069 // update page infos... |
|
1070 SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr); |
|
1071 SPageInfo* piEnd = pi+aCount; |
|
1072 TUint flash = 0; |
|
1073 MmuLock::Lock(); |
|
1074 while(pi<piEnd) |
|
1075 { |
|
1076 MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo); |
|
1077 pi->SetPhysAlloc(); |
|
1078 ++pi; |
|
1079 } |
|
1080 MmuLock::Unlock(); |
|
1081 } |
|
1082 |
|
1083 |
|
1084 // |
|
1085 // Misc |
|
1086 // |
|
1087 |
|
1088 #ifdef _DEBUG |
|
1089 /** |
|
1090 Perform a page table walk to return the physical address of |
|
1091 the memory mapped at virtual address \a aLinAddr in the |
|
1092 address space \a aOsAsid. |
|
1093 |
|
1094 If the page table used was not one allocated by the kernel |
|
1095 then the results are unpredictable and may cause a system fault. |
|
1096 |
|
1097 @pre #MmuLock held. |
|
1098 */ |
|
1099 TPhysAddr Mmu::LinearToPhysical(TLinAddr aLinAddr, TInt aOsAsid) |
|
1100 { |
|
1101 __NK_ASSERT_DEBUG(MmuLock::IsHeld() || K::Initialising); |
|
1102 return UncheckedLinearToPhysical(aLinAddr,aOsAsid); |
|
1103 } |
|
1104 #endif |
|
1105 |
|
1106 |
|
1107 /** |
|
1108 Next virtual address available for allocation by TTempMapping. |
|
1109 This is initialised to #KTempAddr and addresses may be allocated |
|
1110 until they reach #KTempAddrEnd. |
|
1111 */ |
|
1112 TLinAddr Mmu::TTempMapping::iNextLinAddr = KTempAddr; |
|
1113 |
|
1114 |
|
1115 /** |
|
1116 Allocate virtual address space required to map a given number of memory pages. |
|
1117 |
|
1118 The actual size of allocated virtual allocated needs to accommodate \a aNumPages |
|
1119 number of pages of any colour. For example: if \a aNumPages == 4 and #KPageColourCount == 4, |
|
1120 then at least 7 pages are required. |
|
1121 |
|
1122 @param aNumPages Maximum number of pages that can be mapped into this temporary mapping. |
|
1123 |
|
1124 @pre Called in single threaded content (boot) only. |
|
1125 |
|
1126 @pre #iNextLinAddr points to virtual page with zero colour. |
|
1127 @post #iNextLinAddr points to virtual page with zero colour. |
|
1128 */ |
|
1129 void Mmu::TTempMapping::Alloc(TUint aNumPages) |
|
1130 { |
|
1131 __NK_ASSERT_DEBUG(aNumPages<=(KTempAddrEnd-KTempAddr)/KPageSize); |
|
1132 |
|
1133 // This runs during the boot only (single threaded context) so the access to iNextLinAddr is not guarded by any mutex. |
|
1134 TLinAddr tempAddr = iNextLinAddr; |
|
1135 TUint numPages = (KPageColourMask+aNumPages+KPageColourMask)&~KPageColourMask; |
|
1136 iNextLinAddr = tempAddr+numPages*KPageSize; |
|
1137 |
|
1138 __NK_ASSERT_ALWAYS(iNextLinAddr<=KTempAddrEnd); |
|
1139 |
|
1140 __NK_ASSERT_DEBUG(iSize==0); |
|
1141 iLinAddr = tempAddr; |
|
1142 MmuLock::Lock(); |
|
1143 iPtePtr = Mmu::PtePtrFromLinAddr(tempAddr,KKernelOsAsid); |
|
1144 __NK_ASSERT_DEBUG(iPtePtr); |
|
1145 MmuLock::Unlock(); |
|
1146 iBlankPte = TheMmu.iTempPteCached; |
|
1147 iSize = aNumPages; |
|
1148 iCount = 0; |
|
1149 |
|
1150 TRACEB(("Mmu::TTempMapping::Alloc(%d) iLinAddr=0x%08x, iPtePtr=0x%08x",aNumPages,iLinAddr,iPtePtr)); |
|
1151 } |
|
1152 |
|
1153 |
|
1154 /** |
|
1155 Map a single physical page into this temporary mapping. |
|
1156 |
|
1157 Supervisor read/write access and EMemoryAttributeStandard memory attributes apply. |
|
1158 |
|
1159 @param aPage The physical page to map. |
|
1160 @param aColour The required colour for the mapping. |
|
1161 |
|
1162 @return The linear address at which the page is mapped. |
|
1163 */ |
|
1164 TLinAddr Mmu::TTempMapping::Map(TPhysAddr aPage, TUint aColour) |
|
1165 { |
|
1166 __NK_ASSERT_DEBUG(iSize>=1); |
|
1167 __NK_ASSERT_DEBUG(iCount==0); |
|
1168 |
|
1169 TUint colour = aColour&KPageColourMask; |
|
1170 TLinAddr addr = iLinAddr+(colour<<KPageShift); |
|
1171 TPte* pPte = iPtePtr+colour; |
|
1172 iColour = colour; |
|
1173 |
|
1174 __ASSERT_DEBUG(*pPte==KPteUnallocatedEntry,MM::Panic(MM::ETempMappingAlreadyInUse)); |
|
1175 *pPte = (aPage&~KPageMask) | iBlankPte; |
|
1176 CacheMaintenance::SinglePteUpdated((TLinAddr)pPte); |
|
1177 InvalidateTLBForPage(addr|KKernelOsAsid); |
|
1178 |
|
1179 iCount = 1; |
|
1180 return addr; |
|
1181 } |
|
1182 |
|
1183 /** |
|
1184 Map a single physical page into this temporary mapping using the given page table entry (PTE) value. |
|
1185 |
|
1186 @param aPage The physical page to map. |
|
1187 @param aColour The required colour for the mapping. |
|
1188 @param aBlankPte The PTE value to use for mapping the page, |
|
1189 with the physical address component equal to zero. |
|
1190 |
|
1191 @return The linear address at which the page is mapped. |
|
1192 */ |
|
1193 TLinAddr Mmu::TTempMapping::Map(TPhysAddr aPage, TUint aColour, TPte aBlankPte) |
|
1194 { |
|
1195 __NK_ASSERT_DEBUG(iSize>=1); |
|
1196 __NK_ASSERT_DEBUG(iCount==0); |
|
1197 |
|
1198 TUint colour = aColour&KPageColourMask; |
|
1199 TLinAddr addr = iLinAddr+(colour<<KPageShift); |
|
1200 TPte* pPte = iPtePtr+colour; |
|
1201 iColour = colour; |
|
1202 |
|
1203 __ASSERT_DEBUG(*pPte==KPteUnallocatedEntry,MM::Panic(MM::ETempMappingAlreadyInUse)); |
|
1204 *pPte = (aPage&~KPageMask) | aBlankPte; |
|
1205 CacheMaintenance::SinglePteUpdated((TLinAddr)pPte); |
|
1206 InvalidateTLBForPage(addr|KKernelOsAsid); |
|
1207 |
|
1208 iCount = 1; |
|
1209 return addr; |
|
1210 } |
|
1211 |
|
1212 |
|
1213 /** |
|
1214 Map a number of physical pages into this temporary mapping. |
|
1215 |
|
1216 Supervisor read/write access and EMemoryAttributeStandard memory attributes apply. |
|
1217 |
|
1218 @param aPages The array of physical pages to map. |
|
1219 @param aCount The number of pages to map. |
|
1220 @param aColour The required colour for the first page. |
|
1221 Consecutive pages will be coloured accordingly. |
|
1222 |
|
1223 @return The linear address at which the first page is mapped. |
|
1224 */ |
|
1225 TLinAddr Mmu::TTempMapping::Map(TPhysAddr* aPages, TUint aCount, TUint aColour) |
|
1226 { |
|
1227 __NK_ASSERT_DEBUG(iSize>=aCount); |
|
1228 __NK_ASSERT_DEBUG(iCount==0); |
|
1229 |
|
1230 TUint colour = aColour&KPageColourMask; |
|
1231 TLinAddr addr = iLinAddr+(colour<<KPageShift); |
|
1232 TPte* pPte = iPtePtr+colour; |
|
1233 iColour = colour; |
|
1234 |
|
1235 for(TUint i=0; i<aCount; ++i) |
|
1236 { |
|
1237 __ASSERT_DEBUG(pPte[i]==KPteUnallocatedEntry,MM::Panic(MM::ETempMappingAlreadyInUse)); |
|
1238 pPte[i] = (aPages[i]&~KPageMask) | iBlankPte; |
|
1239 CacheMaintenance::SinglePteUpdated((TLinAddr)&pPte[i]); |
|
1240 InvalidateTLBForPage((addr+i*KPageSize)|KKernelOsAsid); |
|
1241 } |
|
1242 |
|
1243 iCount = aCount; |
|
1244 return addr; |
|
1245 } |
|
1246 |
|
1247 |
|
1248 /** |
|
1249 Unmap all pages from this temporary mapping. |
|
1250 |
|
1251 @param aIMBRequired True if IMB barrier is required prior unmapping. |
|
1252 */ |
|
1253 void Mmu::TTempMapping::Unmap(TBool aIMBRequired) |
|
1254 { |
|
1255 __NK_ASSERT_DEBUG(iSize>=1); |
|
1256 if(aIMBRequired) |
|
1257 CacheMaintenance::CodeChanged(iLinAddr+iColour*KPageSize,iCount*KPageSize); |
|
1258 Unmap(); |
|
1259 } |
|
1260 |
|
1261 |
|
1262 /** |
|
1263 Unmap all pages from this temporary mapping. |
|
1264 */ |
|
1265 void Mmu::TTempMapping::Unmap() |
|
1266 { |
|
1267 __NK_ASSERT_DEBUG(iSize>=1); |
|
1268 |
|
1269 TUint colour = iColour; |
|
1270 TLinAddr addr = iLinAddr+(colour<<KPageShift); |
|
1271 TPte* pPte = iPtePtr+colour; |
|
1272 TUint count = iCount; |
|
1273 |
|
1274 while(count) |
|
1275 { |
|
1276 *pPte = KPteUnallocatedEntry; |
|
1277 CacheMaintenance::SinglePteUpdated((TLinAddr)pPte); |
|
1278 InvalidateTLBForPage(addr|KKernelOsAsid); |
|
1279 addr += KPageSize; |
|
1280 ++pPte; |
|
1281 --count; |
|
1282 } |
|
1283 |
|
1284 iCount = 0; |
|
1285 } |
|
1286 |
|
1287 |
|
1288 /** |
|
1289 Remove any thread IPC aliases which use the specified page table. |
|
1290 This is used by the page table allocator when a page table is freed. |
|
1291 |
|
1292 @pre #PageTablesLockIsHeld |
|
1293 */ |
|
1294 void Mmu::RemoveAliasesForPageTable(TPhysAddr aPageTable) |
|
1295 { |
|
1296 __NK_ASSERT_DEBUG(PageTablesLockIsHeld()); |
|
1297 |
|
1298 MmuLock::Lock(); |
|
1299 |
|
1300 SDblQue checkedList; |
|
1301 |
|
1302 TUint ptId = aPageTable>>KPageTableShift; |
|
1303 while(!iAliasList.IsEmpty()) |
|
1304 { |
|
1305 SDblQueLink* next = iAliasList.First()->Deque(); |
|
1306 checkedList.Add(next); |
|
1307 DMemModelThread* thread = (DMemModelThread*)((TInt)next-_FOFF(DMemModelThread,iAliasLink)); |
|
1308 if((thread->iAliasPde>>KPageTableShift)==ptId) |
|
1309 { |
|
1310 // the page table is being aliased by the thread, so remove it... |
|
1311 TRACE2(("Thread %O RemoveAliasesForPageTable", this)); |
|
1312 thread->iAliasPde = KPdeUnallocatedEntry; |
|
1313 #ifdef __SMP__ // we need to also unmap the page table in case thread is running on another core... |
|
1314 // need Data Memory Barrier (DMB) here to make sure iAliasPde change is |
|
1315 // seen before we set the PDE entry, otherwise 'thread' may read old value |
|
1316 // and put it back |
|
1317 __e32_memory_barrier(); |
|
1318 *thread->iAliasPdePtr = KPdeUnallocatedEntry; |
|
1319 SinglePdeUpdated(thread->iAliasPdePtr); |
|
1320 __NK_ASSERT_DEBUG((thread->iAliasLinAddr&KPageMask)==0); |
|
1321 // Invalidate the tlb for the page using os asid of the process that created the alias |
|
1322 // this is safe as the os asid will be valid as thread must be running otherwise the alias |
|
1323 // would have been removed. |
|
1324 InvalidateTLBForPage(thread->iAliasLinAddr | ((DMemModelProcess*)thread->iOwningProcess)->OsAsid()); |
|
1325 // note, race condition with 'thread' updating its iAliasLinAddr is |
|
1326 // not a problem because 'thread' will not the be accessing the aliased |
|
1327 // region and will take care of invalidating the TLB. |
|
1328 // FIXME: There is still a race here. If the thread owning the alias reads the |
|
1329 // PDE before we clear thread->iAliasPde and writes it after we clear |
|
1330 // *thread->iAliasPdePtr the alias still ends up restored when it shouldn't be. |
|
1331 #endif |
|
1332 } |
|
1333 MmuLock::Flash(); |
|
1334 } |
|
1335 |
|
1336 // copy checkedList back to iAliasList |
|
1337 iAliasList.MoveFrom(&checkedList); |
|
1338 |
|
1339 MmuLock::Unlock(); |
|
1340 } |
|
1341 |
|
1342 |
|
1343 void DMemModelThread::RefreshAlias() |
|
1344 { |
|
1345 if(iAliasLinAddr) |
|
1346 { |
|
1347 TRACE2(("Thread %O RefreshAlias", this)); |
|
1348 // Get the os asid, this is the current thread so no need to open a reference. |
|
1349 TUint thisAsid = ((DMemModelProcess*)iOwningProcess)->OsAsid(); |
|
1350 MmuLock::Lock(); |
|
1351 TInt osAsid = iAliasProcess->OsAsid(); |
|
1352 TPde pde = *Mmu::PageDirectoryEntry(osAsid,iAliasTarget); |
|
1353 iAliasPde = pde; |
|
1354 *iAliasPdePtr = pde; |
|
1355 SinglePdeUpdated(iAliasPdePtr); |
|
1356 InvalidateTLBForPage(iAliasLinAddr|thisAsid); |
|
1357 MmuLock::Unlock(); |
|
1358 } |
|
1359 } |
|
1360 |
|
1361 |
|
1362 |
|
1363 // |
|
1364 // Mapping/unmapping functions |
|
1365 // |
|
1366 |
|
1367 |
|
1368 /** |
|
1369 Modify page table entries (PTEs) so they map the given memory pages. |
|
1370 Entries are only updated if the current state of the corresponding page |
|
1371 is RPageArray::ECommitted. |
|
1372 |
|
1373 @param aPtePtr Pointer into a page table for the PTE of the first page. |
|
1374 @param aCount The number of pages to modify. |
|
1375 @param aPages Pointer to the entry for the first page in a memory object's #RPageArray. |
|
1376 Each entry contains the physical address of a page together with its |
|
1377 current state (RPageArray::TState). |
|
1378 @param aBlankPte The value to use for each PTE, with the physical address component equal |
|
1379 to zero. |
|
1380 |
|
1381 @return False, if the page table no longer maps any entries and may be freed. |
|
1382 True otherwise, to indicate that the page table is still needed. |
|
1383 |
|
1384 @pre #MmuLock held. |
|
1385 @post #MmuLock held and has not been released by this function. |
|
1386 */ |
|
1387 TBool Mmu::MapPages(TPte* const aPtePtr, const TUint aCount, TPhysAddr* aPages, TPte aBlankPte) |
|
1388 { |
|
1389 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
1390 __NK_ASSERT_DEBUG(aCount); |
|
1391 __NK_ASSERT_DEBUG(aBlankPte!=KPteUnallocatedEntry); |
|
1392 |
|
1393 TUint count = 0; |
|
1394 if(aCount==1) |
|
1395 { |
|
1396 // get page to map... |
|
1397 TPhysAddr pagePhys = *aPages; |
|
1398 TPte pte = *aPtePtr; |
|
1399 if(!RPageArray::TargetStateIsCommitted(pagePhys)) |
|
1400 goto done; // page no longer needs mapping |
|
1401 |
|
1402 // clear type flags... |
|
1403 pagePhys &= ~KPageMask; |
|
1404 |
|
1405 // check nobody has already mapped the page... |
|
1406 if(pte!=KPteUnallocatedEntry) |
|
1407 { |
|
1408 // already mapped... |
|
1409 #ifdef _DEBUG |
|
1410 if((pte^pagePhys)>=TPte(KPageSize)) |
|
1411 { |
|
1412 // but different! |
|
1413 Kern::Printf("Mmu::MapPages already mapped %x->%x",pagePhys,pte); |
|
1414 __NK_ASSERT_DEBUG(0); |
|
1415 } |
|
1416 #endif |
|
1417 return true; // return true to keep page table (it already had at least page mapped) |
|
1418 } |
|
1419 |
|
1420 // map page... |
|
1421 pte = pagePhys|aBlankPte; |
|
1422 TRACE2(("!PTE %x=%x",aPtePtr,pte)); |
|
1423 *aPtePtr = pte; |
|
1424 count = 1; |
|
1425 |
|
1426 // clean cache... |
|
1427 CacheMaintenance::SinglePteUpdated((TLinAddr)aPtePtr); |
|
1428 } |
|
1429 else |
|
1430 { |
|
1431 // check we are only updating a single page table... |
|
1432 __NK_ASSERT_DEBUG(((TLinAddr(aPtePtr)^TLinAddr(aPtePtr+aCount-1))>>KPageTableShift)==0); |
|
1433 |
|
1434 // map pages... |
|
1435 TPte* pPte = aPtePtr; |
|
1436 TPte* pPteEnd = aPtePtr+aCount; |
|
1437 do |
|
1438 { |
|
1439 // map page... |
|
1440 TPhysAddr pagePhys = *aPages++; |
|
1441 TPte pte = *pPte++; |
|
1442 if(RPageArray::TargetStateIsCommitted(pagePhys)) |
|
1443 { |
|
1444 // clear type flags... |
|
1445 pagePhys &= ~KPageMask; |
|
1446 |
|
1447 // page not being freed, so try and map it... |
|
1448 if(pte!=KPteUnallocatedEntry) |
|
1449 { |
|
1450 // already mapped... |
|
1451 #ifdef _DEBUG |
|
1452 if((pte^pagePhys)>=TPte(KPageSize)) |
|
1453 { |
|
1454 // but different! |
|
1455 Kern::Printf("Mmu::MapPages already mapped %x->%x",pagePhys,pte); |
|
1456 __NK_ASSERT_DEBUG(0); |
|
1457 } |
|
1458 #endif |
|
1459 } |
|
1460 else |
|
1461 { |
|
1462 // map page... |
|
1463 pte = pagePhys|aBlankPte; |
|
1464 TRACE2(("!PTE %x=%x",pPte-1,pte)); |
|
1465 pPte[-1] = pte; |
|
1466 ++count; |
|
1467 } |
|
1468 } |
|
1469 } |
|
1470 while(pPte!=pPteEnd); |
|
1471 |
|
1472 // clean cache... |
|
1473 CacheMaintenance::MultiplePtesUpdated((TLinAddr)aPtePtr,(TLinAddr)pPte-(TLinAddr)aPtePtr); |
|
1474 } |
|
1475 |
|
1476 done: |
|
1477 // update page counts... |
|
1478 SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPtePtr); |
|
1479 count = pti->IncPageCount(count); |
|
1480 TRACE2(("pt %x page count=%d",TLinAddr(aPtePtr)&~KPageTableMask,pti->PageCount())); |
|
1481 __NK_ASSERT_DEBUG(pti->CheckPageCount()); |
|
1482 |
|
1483 // see if page table needs freeing... |
|
1484 TUint keepPt = count | pti->PermanenceCount(); |
|
1485 |
|
1486 __NK_ASSERT_DEBUG(!pti->IsDemandPaged()); // check not demand paged page table |
|
1487 |
|
1488 return keepPt; |
|
1489 } |
|
1490 |
|
1491 |
|
1492 /** |
|
1493 Modify page table entries (PTEs) so they map a new page. |
|
1494 Entries are only updated if the current state of the corresponding page |
|
1495 is RPageArray::ECommitted or RPageArray::EMoving. |
|
1496 |
|
1497 @param aPtePtr Pointer into a page table for the PTE of the page. |
|
1498 @param aPage Pointer to the entry for the page in a memory object's #RPageArray. |
|
1499 The entry contains the physical address of a page together with its |
|
1500 current state (RPageArray::TState). |
|
1501 @param aBlankPte The value to use for each PTE, with the physical address component equal |
|
1502 to zero. |
|
1503 |
|
1504 @pre #MmuLock held. |
|
1505 @post #MmuLock held and has not been released by this function. |
|
1506 */ |
|
1507 void Mmu::RemapPage(TPte* const aPtePtr, TPhysAddr& aPage, TPte aBlankPte) |
|
1508 { |
|
1509 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
1510 __NK_ASSERT_DEBUG(aBlankPte!=KPteUnallocatedEntry); |
|
1511 |
|
1512 // get page to remap... |
|
1513 TPhysAddr pagePhys = aPage; |
|
1514 |
|
1515 // Only remap the page if it is committed or it is being moved and |
|
1516 // no other operation has been performed on the page. |
|
1517 if(!RPageArray::TargetStateIsCommitted(pagePhys)) |
|
1518 return; // page no longer needs mapping |
|
1519 |
|
1520 // Only remap the page if it is currently mapped, i.e. doesn't have an unallocated pte. |
|
1521 // This will only be true if a new mapping is being added but it hasn't yet updated |
|
1522 // all the ptes for the pages that it maps. |
|
1523 TPte pte = *aPtePtr; |
|
1524 if (pte == KPteUnallocatedEntry) |
|
1525 return; |
|
1526 |
|
1527 // clear type flags... |
|
1528 pagePhys &= ~KPageMask; |
|
1529 |
|
1530 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pagePhys); |
|
1531 if (pi) |
|
1532 { |
|
1533 SPageInfo::TPagedState pagedState = pi->PagedState(); |
|
1534 if (pagedState != SPageInfo::EUnpaged) |
|
1535 { |
|
1536 // The page is demand paged. Only remap the page if it is pinned or is currently |
|
1537 // accessible but to the old physical page. |
|
1538 if (pagedState != SPageInfo::EPagedPinned && |
|
1539 (Mmu::IsPteInaccessible(pte) || (pte^pagePhys) < TPte(KPageSize))) |
|
1540 return; |
|
1541 if (!pi->IsDirty()) |
|
1542 { |
|
1543 // Ensure that the page is mapped as read only to prevent pages being marked dirty |
|
1544 // by page moving despite not having been written to |
|
1545 Mmu::MakePteInaccessible(aBlankPte, EFalse); |
|
1546 } |
|
1547 } |
|
1548 } |
|
1549 |
|
1550 // Map the page in the page array entry as this is always the physical |
|
1551 // page that the memory object's page should be mapped to. |
|
1552 pte = pagePhys|aBlankPte; |
|
1553 TRACE2(("!PTE %x=%x",aPtePtr,pte)); |
|
1554 *aPtePtr = pte; |
|
1555 |
|
1556 // clean cache... |
|
1557 CacheMaintenance::SinglePteUpdated((TLinAddr)aPtePtr); |
|
1558 } |
|
1559 |
|
1560 |
|
1561 /** |
|
1562 Modify page table entries (PTEs) so they no longer map any memory pages. |
|
1563 |
|
1564 @param aPtePtr Pointer into a page table for the PTE of the first page. |
|
1565 @param aCount The number of pages to modify. |
|
1566 |
|
1567 @return False, if the page table no longer maps any entries and may be freed. |
|
1568 True otherwise, to indicate that the page table is still needed. |
|
1569 |
|
1570 @pre #MmuLock held. |
|
1571 @post #MmuLock held and has not been released by this function. |
|
1572 */ |
|
1573 TBool Mmu::UnmapPages(TPte* const aPtePtr, TUint aCount) |
|
1574 { |
|
1575 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
1576 __NK_ASSERT_DEBUG(aCount); |
|
1577 |
|
1578 TUint count = 0; |
|
1579 if(aCount==1) |
|
1580 { |
|
1581 if(*aPtePtr==KPteUnallocatedEntry) |
|
1582 return true; // page already unmapped |
|
1583 |
|
1584 // unmap page... |
|
1585 ++count; |
|
1586 TPte pte = KPteUnallocatedEntry; |
|
1587 TRACE2(("!PTE %x=%x",aPtePtr,pte)); |
|
1588 *aPtePtr = pte; |
|
1589 |
|
1590 // clean cache... |
|
1591 CacheMaintenance::SinglePteUpdated((TLinAddr)aPtePtr); |
|
1592 } |
|
1593 else |
|
1594 { |
|
1595 // check we are only updating a single page table... |
|
1596 __NK_ASSERT_DEBUG(((TLinAddr(aPtePtr)^TLinAddr(aPtePtr+aCount-1))>>KPageTableShift)==0); |
|
1597 |
|
1598 // unmap pages... |
|
1599 TPte* pPte = aPtePtr; |
|
1600 TPte* pPteEnd = aPtePtr+aCount; |
|
1601 do |
|
1602 { |
|
1603 if(*pPte!=KPteUnallocatedEntry) |
|
1604 { |
|
1605 // unmap page... |
|
1606 ++count; |
|
1607 TPte pte = KPteUnallocatedEntry; |
|
1608 TRACE2(("!PTE %x=%x",pPte,pte)); |
|
1609 *pPte = pte; |
|
1610 } |
|
1611 } |
|
1612 while(++pPte<pPteEnd); |
|
1613 |
|
1614 if(!count) |
|
1615 return true; // no PTEs changed, so nothing more to do |
|
1616 |
|
1617 // clean cache... |
|
1618 CacheMaintenance::MultiplePtesUpdated((TLinAddr)aPtePtr,(TLinAddr)pPte-(TLinAddr)aPtePtr); |
|
1619 } |
|
1620 |
|
1621 // update page table info... |
|
1622 SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPtePtr); |
|
1623 count = pti->DecPageCount(count); |
|
1624 TRACE2(("pt %x page count=%d",TLinAddr(aPtePtr)&~KPageTableMask,count)); |
|
1625 __NK_ASSERT_DEBUG(pti->CheckPageCount()); |
|
1626 |
|
1627 // see if page table needs freeing... |
|
1628 TUint keepPt = count | pti->PermanenceCount(); |
|
1629 |
|
1630 return keepPt; |
|
1631 } |
|
1632 |
|
1633 |
|
1634 /** |
|
1635 Modify page table entries (PTEs) so they no longer map the given memory pages. |
|
1636 Entries are only updated if the current state of the corresponding page |
|
1637 is 'decommitted' i.e. RPageArray::TargetStateIsDecommitted returns true. |
|
1638 |
|
1639 @param aPtePtr Pointer into a page table for the PTE of the first page. |
|
1640 @param aCount The number of pages to modify. |
|
1641 @param aPages Pointer to the entry for the first page in a memory object's #RPageArray. |
|
1642 Each entry contains the physical address of a page together with its |
|
1643 current state (RPageArray::TState). |
|
1644 |
|
1645 @return False, if the page table no longer maps any entries and may be freed. |
|
1646 True otherwise, to indicate that the page table is still needed. |
|
1647 |
|
1648 @pre #MmuLock held. |
|
1649 @post #MmuLock held and has not been released by this function. |
|
1650 */ |
|
1651 TBool Mmu::UnmapPages(TPte* const aPtePtr, TUint aCount, TPhysAddr* aPages) |
|
1652 { |
|
1653 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
1654 __NK_ASSERT_DEBUG(aCount); |
|
1655 |
|
1656 TUint count = 0; |
|
1657 if(aCount==1) |
|
1658 { |
|
1659 if(*aPtePtr==KPteUnallocatedEntry) |
|
1660 return true; // page already unmapped |
|
1661 |
|
1662 if(!RPageArray::TargetStateIsDecommitted(*aPages)) |
|
1663 return true; // page has been reallocated |
|
1664 |
|
1665 // unmap page... |
|
1666 ++count; |
|
1667 TPte pte = KPteUnallocatedEntry; |
|
1668 TRACE2(("!PTE %x=%x",aPtePtr,pte)); |
|
1669 *aPtePtr = pte; |
|
1670 |
|
1671 // clean cache... |
|
1672 CacheMaintenance::SinglePteUpdated((TLinAddr)aPtePtr); |
|
1673 } |
|
1674 else |
|
1675 { |
|
1676 // check we are only updating a single page table... |
|
1677 __NK_ASSERT_DEBUG(((TLinAddr(aPtePtr)^TLinAddr(aPtePtr+aCount-1))>>KPageTableShift)==0); |
|
1678 |
|
1679 // unmap pages... |
|
1680 TPte* pPte = aPtePtr; |
|
1681 TPte* pPteEnd = aPtePtr+aCount; |
|
1682 do |
|
1683 { |
|
1684 if(RPageArray::TargetStateIsDecommitted(*aPages++) && *pPte!=KPteUnallocatedEntry) |
|
1685 { |
|
1686 // unmap page... |
|
1687 ++count; |
|
1688 TPte pte = KPteUnallocatedEntry; |
|
1689 TRACE2(("!PTE %x=%x",pPte,pte)); |
|
1690 *pPte = pte; |
|
1691 } |
|
1692 } |
|
1693 while(++pPte<pPteEnd); |
|
1694 |
|
1695 if(!count) |
|
1696 return true; // no PTEs changed, so nothing more to do |
|
1697 |
|
1698 // clean cache... |
|
1699 CacheMaintenance::MultiplePtesUpdated((TLinAddr)aPtePtr,(TLinAddr)pPte-(TLinAddr)aPtePtr); |
|
1700 } |
|
1701 |
|
1702 // update page table info... |
|
1703 SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPtePtr); |
|
1704 count = pti->DecPageCount(count); |
|
1705 TRACE2(("pt %x page count=%d",TLinAddr(aPtePtr)&~KPageTableMask,count)); |
|
1706 __NK_ASSERT_DEBUG(pti->CheckPageCount()); |
|
1707 |
|
1708 // see if page table needs freeing... |
|
1709 TUint keepPt = count | pti->PermanenceCount(); |
|
1710 |
|
1711 return keepPt; |
|
1712 } |
|
1713 |
|
1714 |
|
1715 /** |
|
1716 Modify page table entries (PTEs) so the given memory pages are not accessible. |
|
1717 Entries are only updated if the current state of the corresponding page |
|
1718 is RPageArray::ERestrictingNA. |
|
1719 |
|
1720 @param aPtePtr Pointer into a page table for the PTE of the first page. |
|
1721 @param aCount The number of pages to modify. |
|
1722 @param aPages Pointer to the entry for the first page in a memory object's #RPageArray. |
|
1723 Each entry contains the physical address of a page together with its |
|
1724 current state (RPageArray::TState). |
|
1725 |
|
1726 @pre #MmuLock held. |
|
1727 @post #MmuLock held and has not been released by this function. |
|
1728 */ |
|
1729 void Mmu::RestrictPagesNA(TPte* const aPtePtr, TUint aCount, TPhysAddr* aPages) |
|
1730 { |
|
1731 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
1732 __NK_ASSERT_DEBUG(aCount); |
|
1733 |
|
1734 if(aCount==1) |
|
1735 { |
|
1736 TPhysAddr page = *aPages; |
|
1737 TPte pte = *aPtePtr; |
|
1738 RPageArray::TState state = RPageArray::State(page); |
|
1739 if(state != RPageArray::ERestrictingNA && state != RPageArray::EMoving) |
|
1740 return; // page no longer needs restricting |
|
1741 |
|
1742 if(pte==KPteUnallocatedEntry) |
|
1743 return; // page gone |
|
1744 |
|
1745 // restrict page... |
|
1746 pte = Mmu::MakePteInaccessible(pte,false); |
|
1747 TRACE2(("!PTE %x=%x",aPtePtr,pte)); |
|
1748 *aPtePtr = pte; |
|
1749 |
|
1750 // clean cache... |
|
1751 CacheMaintenance::SinglePteUpdated((TLinAddr)aPtePtr); |
|
1752 } |
|
1753 else |
|
1754 { |
|
1755 // check we are only updating a single page table... |
|
1756 __NK_ASSERT_DEBUG(((TLinAddr(aPtePtr)^TLinAddr(aPtePtr+aCount-1))>>KPageTableShift)==0); |
|
1757 |
|
1758 // restrict pages... |
|
1759 TPte* pPte = aPtePtr; |
|
1760 TPte* pPteEnd = aPtePtr+aCount; |
|
1761 do |
|
1762 { |
|
1763 TPhysAddr page = *aPages++; |
|
1764 TPte pte = *pPte++; |
|
1765 if(RPageArray::State(page)==RPageArray::ERestrictingNA && pte!=KPteUnallocatedEntry) |
|
1766 { |
|
1767 pte = Mmu::MakePteInaccessible(pte,false); |
|
1768 TRACE2(("!PTE %x=%x",pPte-1,pte)); |
|
1769 pPte[-1] = pte; |
|
1770 } |
|
1771 } |
|
1772 while(pPte<pPteEnd); |
|
1773 |
|
1774 // clean cache... |
|
1775 CacheMaintenance::MultiplePtesUpdated((TLinAddr)aPtePtr,(TLinAddr)pPte-(TLinAddr)aPtePtr); |
|
1776 } |
|
1777 } |
|
1778 |
|
1779 |
|
1780 /** |
|
1781 Modify page table entries (PTEs) so they map the given demand paged memory pages. |
|
1782 |
|
1783 Entries are only updated if the current state of the corresponding page |
|
1784 is RPageArray::ECommitted. |
|
1785 |
|
1786 This function is used for demand paged memory when handling a page fault or |
|
1787 memory pinning operation. It will widen the access permission of existing entries |
|
1788 if required to match \a aBlankPte and will 'rejuvenate' the page table. |
|
1789 |
|
1790 @param aPtePtr Pointer into a page table for the PTE of the first page. |
|
1791 @param aCount The number of pages to modify. |
|
1792 @param aPages Pointer to the entry for the first page in a memory object's #RPageArray. |
|
1793 Each entry contains the physical address of a page together with its |
|
1794 current state (RPageArray::TState). |
|
1795 @param aBlankPte The value to use for each PTE, with the physical address component equal |
|
1796 to zero. |
|
1797 |
|
1798 @return False, if the page table no longer maps any entries and may be freed. |
|
1799 True otherwise, to indicate that the page table is still needed. |
|
1800 |
|
1801 @pre #MmuLock held. |
|
1802 @post MmuLock held (but may have been released by this function) |
|
1803 */ |
|
1804 TBool Mmu::PageInPages(TPte* const aPtePtr, const TUint aCount, TPhysAddr* aPages, TPte aBlankPte) |
|
1805 { |
|
1806 __NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
1807 __NK_ASSERT_DEBUG(aCount); |
|
1808 __NK_ASSERT_DEBUG(aBlankPte!=KPteUnallocatedEntry); |
|
1809 |
|
1810 TUint count = 0; |
|
1811 |
|
1812 if(aCount==1) |
|
1813 { |
|
1814 // get page to map... |
|
1815 TPhysAddr page = *aPages; |
|
1816 TPte pte = *aPtePtr; |
|
1817 if(!RPageArray::TargetStateIsCommitted(page)) |
|
1818 goto done; // page no longer needs mapping |
|
1819 |
|
1820 #ifdef _DEBUG |
|
1821 if(pte!=KPteUnallocatedEntry) |
|
1822 { |
|
1823 if ((pte^page)>=TPte(KPageSize) && !Mmu::IsPteInaccessible(pte) && |
|
1824 !Mmu::IsPteReadOnly(pte)) |
|
1825 { |
|
1826 // Page has been mapped before but the physical address is different |
|
1827 // and the page hasn't been moved as it is not inaccessible. |
|
1828 Kern::Printf("Mmu::PageInPages already mapped %x->%x",page,pte); |
|
1829 __NK_ASSERT_DEBUG(0); |
|
1830 } |
|
1831 } |
|
1832 #endif |
|
1833 if(!Mmu::IsPteMoreAccessible(aBlankPte,pte)) |
|
1834 return true; // return true to keep page table (it already had at least page mapped) |
|
1835 |
|
1836 // remap page with new increased permissions... |
|
1837 if(pte==KPteUnallocatedEntry) |
|
1838 count = 1; // we'll be adding a new pte entry, count it |
|
1839 if(!Mmu::IsPteReadOnly(aBlankPte)) |
|
1840 ThePager.SetWritable(*SPageInfo::FromPhysAddr(page)); |
|
1841 pte = (page&~KPageMask)|aBlankPte; |
|
1842 TRACE2(("!PTE %x=%x",aPtePtr,pte)); |
|
1843 *aPtePtr = pte; |
|
1844 |
|
1845 // clean cache... |
|
1846 CacheMaintenance::SinglePteUpdated((TLinAddr)aPtePtr); |
|
1847 } |
|
1848 else |
|
1849 { |
|
1850 // check we are only updating a single page table... |
|
1851 __NK_ASSERT_DEBUG(((TLinAddr(aPtePtr)^TLinAddr(aPtePtr+aCount-1))>>KPageTableShift)==0); |
|
1852 |
|
1853 // map pages... |
|
1854 TPte* pPte = aPtePtr; |
|
1855 TPte* pPteEnd = aPtePtr+aCount; |
|
1856 do |
|
1857 { |
|
1858 // map page... |
|
1859 TPhysAddr page = *aPages++; |
|
1860 TPte pte = *pPte++; |
|
1861 if(RPageArray::TargetStateIsCommitted(page)) |
|
1862 { |
|
1863 #ifdef _DEBUG |
|
1864 if(pte!=KPteUnallocatedEntry) |
|
1865 { |
|
1866 if ((pte^page)>=TPte(KPageSize) && !Mmu::IsPteInaccessible(pte) && |
|
1867 !Mmu::IsPteReadOnly(pte)) |
|
1868 { |
|
1869 // Page has been mapped before but the physical address is different |
|
1870 // and the page hasn't been moved as it is not inaccessible. |
|
1871 Kern::Printf("Mmu::PageInPages already mapped %x->%x",page,pte); |
|
1872 __NK_ASSERT_DEBUG(0); |
|
1873 } |
|
1874 } |
|
1875 #endif |
|
1876 if(Mmu::IsPteMoreAccessible(aBlankPte,pte)) |
|
1877 { |
|
1878 // remap page with new increased permissions... |
|
1879 if(pte==KPteUnallocatedEntry) |
|
1880 ++count; // we'll be adding a new pte entry, count it |
|
1881 if(!Mmu::IsPteReadOnly(aBlankPte)) |
|
1882 ThePager.SetWritable(*SPageInfo::FromPhysAddr(page)); |
|
1883 pte = (page&~KPageMask)|aBlankPte; |
|
1884 TRACE2(("!PTE %x=%x",pPte-1,pte)); |
|
1885 pPte[-1] = pte; |
|
1886 } |
|
1887 } |
|
1888 } |
|
1889 while(pPte!=pPteEnd); |
|
1890 |
|
1891 // clean cache... |
|
1892 CacheMaintenance::MultiplePtesUpdated((TLinAddr)aPtePtr,(TLinAddr)pPte-(TLinAddr)aPtePtr); |
|
1893 } |
|
1894 |
|
1895 done: |
|
1896 // update page counts... |
|
1897 SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPtePtr); |
|
1898 count = pti->IncPageCount(count); |
|
1899 TRACE2(("pt %x page count=%d",TLinAddr(aPtePtr)&~KPageTableMask,pti->PageCount())); |
|
1900 __NK_ASSERT_DEBUG(pti->CheckPageCount()); |
|
1901 |
|
1902 // see if page table needs freeing... |
|
1903 TUint keepPt = count | pti->PermanenceCount(); |
|
1904 |
|
1905 // rejuvenate demand paged page tables... |
|
1906 ThePager.RejuvenatePageTable(aPtePtr); |
|
1907 |
|
1908 return keepPt; |
|
1909 } |
|
1910 |
|
1911 |
|
1912 // |
|
1913 // CodeModifier |
|
1914 // |
|
1915 |
|
1916 #ifdef __DEBUGGER_SUPPORT__ |
|
1917 |
|
1918 void DoWriteCode(TUint32* aAddress, TUint32 aValue); |
|
1919 |
|
1920 #ifdef __SMP__ |
|
1921 |
|
1922 extern "C" void __e32_instruction_barrier(); |
|
1923 |
|
1924 class TCodeModifierBroadcast : public TGenericIPI |
|
1925 { |
|
1926 public: |
|
1927 TCodeModifierBroadcast(TUint32* aAddress, TUint32 aValue); |
|
1928 static void Isr(TGenericIPI*); |
|
1929 void Go(); |
|
1930 public: |
|
1931 TUint32* iAddress; |
|
1932 TUint32 iValue; |
|
1933 volatile TInt iFlag; |
|
1934 }; |
|
1935 |
|
1936 TCodeModifierBroadcast::TCodeModifierBroadcast(TUint32* aAddress, TUint32 aValue) |
|
1937 : iAddress(aAddress), iValue(aValue), iFlag(0) |
|
1938 { |
|
1939 } |
|
1940 |
|
1941 void TCodeModifierBroadcast::Isr(TGenericIPI* aPtr) |
|
1942 { |
|
1943 TCodeModifierBroadcast& a = *(TCodeModifierBroadcast*)aPtr; |
|
1944 while (!__e32_atomic_load_acq32(&a.iFlag)) |
|
1945 __chill(); |
|
1946 #ifdef __BROADCAST_CACHE_MAINTENANCE__ |
|
1947 CacheMaintenance::CodeChanged((TLinAddr)a.iAddress, sizeof (TInt), CacheMaintenance::ECodeModifier); // need to do separate Clean-D, Purge-I on each core |
|
1948 #else |
|
1949 __e32_instruction_barrier(); // synchronize instruction execution |
|
1950 #endif |
|
1951 } |
|
1952 |
|
1953 void TCodeModifierBroadcast::Go() |
|
1954 { |
|
1955 NKern::Lock(); |
|
1956 QueueAllOther(&Isr); |
|
1957 WaitEntry(); // wait for other cores to stop |
|
1958 DoWriteCode(iAddress, iValue); |
|
1959 iFlag = 1; |
|
1960 __e32_instruction_barrier(); // synchronize instruction execution |
|
1961 WaitCompletion(); // wait for other cores to resume |
|
1962 NKern::Unlock(); |
|
1963 } |
|
1964 #endif |
|
1965 |
|
1966 /** |
|
1967 @pre Calling thread must be in critical section |
|
1968 @pre CodeSeg mutex held |
|
1969 */ |
|
1970 TInt CodeModifier::SafeWriteCode(DProcess* aProcess, TLinAddr aAddress, TInt aSize, TUint aValue, void* aOldValue) |
|
1971 { |
|
1972 __ASSERT_CRITICAL; |
|
1973 Mmu& m=TheMmu; |
|
1974 RamAllocLock::Lock(); |
|
1975 MmuLock::Lock(); |
|
1976 __UNLOCK_GUARD_START(MmuLock); |
|
1977 |
|
1978 // Check aProcess is still alive by opening a reference on its os asid. |
|
1979 TInt osAsid = ((DMemModelProcess*)aProcess)->TryOpenOsAsid(); |
|
1980 if (osAsid < 0) |
|
1981 { |
|
1982 __KTRACE_OPT(KDEBUGGER,Kern::Printf("CodeModifier::SafeWriteCode - zombie process")); |
|
1983 __UNLOCK_GUARD_END(MmuLock); |
|
1984 MmuLock::Unlock(); |
|
1985 RamAllocLock::Unlock(); |
|
1986 return KErrBadDescriptor; |
|
1987 } |
|
1988 |
|
1989 // Find physical address of the page, the breakpoint belongs to |
|
1990 TPhysAddr physAddr = Mmu::LinearToPhysical(aAddress, osAsid); |
|
1991 __KTRACE_OPT(KDEBUGGER,Kern::Printf("CodeModifier::SafeWriteCode - PA:%x", physAddr)); |
|
1992 |
|
1993 |
|
1994 if (physAddr==KPhysAddrInvalid) |
|
1995 { |
|
1996 __KTRACE_OPT(KDEBUGGER,Kern::Printf("CodeModifier::SafeWriteCode - invalid VA")); |
|
1997 __UNLOCK_GUARD_END(MmuLock); |
|
1998 MmuLock::Unlock(); |
|
1999 RamAllocLock::Unlock(); |
|
2000 // The os asid is no longer required. |
|
2001 ((DMemModelProcess*)aProcess)->CloseOsAsid(); |
|
2002 return KErrBadDescriptor; |
|
2003 } |
|
2004 |
|
2005 // Temporary map physical page |
|
2006 TLinAddr tempAddr = m.MapTemp(physAddr&~KPageMask, aAddress>>KPageShift); |
|
2007 tempAddr |= aAddress & KPageMask; |
|
2008 __KTRACE_OPT(KDEBUGGER,Kern::Printf("CodeModifier::SafeWriteCode - tempAddr:%x",tempAddr)); |
|
2009 |
|
2010 TInt r = KErrBadDescriptor; |
|
2011 TUint32* ptr = (TUint32*)(tempAddr&~3); |
|
2012 TUint32 oldWord; |
|
2013 |
|
2014 if(Kern::SafeRead(ptr,&oldWord,sizeof(oldWord))==0 // safely read the original value... |
|
2015 && Kern::SafeWrite(ptr,&oldWord,sizeof(oldWord))==0 ) // and write it back |
|
2016 { |
|
2017 // We have successfully probed the memory by reading and writing to it |
|
2018 // so we assume it is now safe to access without generating exceptions. |
|
2019 // If this is wrong it will kill the system horribly. |
|
2020 |
|
2021 TUint32 newWord; |
|
2022 TUint badAlign; |
|
2023 TUint shift = (aAddress&3)*8; |
|
2024 |
|
2025 switch(aSize) |
|
2026 { |
|
2027 case 1: // 1 byte value |
|
2028 badAlign = 0; |
|
2029 *(TUint8*)aOldValue = oldWord>>shift; |
|
2030 newWord = (oldWord&~(0xff<<shift)) | ((aValue&0xff)<<shift); |
|
2031 break; |
|
2032 |
|
2033 case 2: // 2 byte value |
|
2034 badAlign = tempAddr&1; |
|
2035 if(!badAlign) |
|
2036 *(TUint16*)aOldValue = oldWord>>shift; |
|
2037 newWord = (oldWord&~(0xffff<<shift)) | ((aValue&0xffff)<<shift); |
|
2038 break; |
|
2039 |
|
2040 default: // 4 byte value |
|
2041 badAlign = tempAddr&3; |
|
2042 if(!badAlign) |
|
2043 *(TUint32*)aOldValue = oldWord; |
|
2044 newWord = aValue; |
|
2045 break; |
|
2046 } |
|
2047 |
|
2048 if(!badAlign) |
|
2049 { |
|
2050 // write the new value... |
|
2051 #ifdef __SMP__ |
|
2052 TCodeModifierBroadcast b(ptr, newWord); |
|
2053 b.Go(); |
|
2054 #else |
|
2055 DoWriteCode(ptr, newWord); |
|
2056 #endif |
|
2057 r = KErrNone; |
|
2058 } |
|
2059 } |
|
2060 |
|
2061 __UNLOCK_GUARD_END(MmuLock); |
|
2062 m.UnmapTemp(); |
|
2063 MmuLock::Unlock(); |
|
2064 RamAllocLock::Unlock(); |
|
2065 // The os asid is no longer required. |
|
2066 ((DMemModelProcess*)aProcess)->CloseOsAsid(); |
|
2067 return r; |
|
2068 } |
|
2069 |
|
2070 /** |
|
2071 @pre Calling thread must be in critical section |
|
2072 @pre CodeSeg mutex held |
|
2073 */ |
|
2074 void DoWriteCode(TUint32* aAddress, TUint32 aValue) |
|
2075 { |
|
2076 // We do not want to be interrupted by e.g. ISR that will run altered code before IMB-Range. |
|
2077 // Therefore, copy data and clean/invalidate caches with interrupts disabled. |
|
2078 TInt irq = NKern::DisableAllInterrupts(); |
|
2079 *aAddress = aValue; |
|
2080 CacheMaintenance::CodeChanged((TLinAddr)aAddress, sizeof(TUint32), CacheMaintenance::ECodeModifier); |
|
2081 NKern::RestoreInterrupts(irq); |
|
2082 } |
|
2083 |
|
2084 #endif //__DEBUGGER_SUPPORT__ |
|
2085 |
|
2086 |
|
2087 |
|
2088 // |
|
2089 // Virtual pinning |
|
2090 // |
|
2091 |
|
2092 TInt M::CreateVirtualPinObject(TVirtualPinObject*& aPinObject) |
|
2093 { |
|
2094 aPinObject = (TVirtualPinObject*)new DVirtualPinMapping; |
|
2095 return aPinObject != NULL ? KErrNone : KErrNoMemory; |
|
2096 } |
|
2097 |
|
2098 TInt M::PinVirtualMemory(TVirtualPinObject* aPinObject, TLinAddr aStart, TUint aSize, DThread* aThread) |
|
2099 { |
|
2100 NKern::ThreadEnterCS(); |
|
2101 TUint offsetInMapping; |
|
2102 TUint mapInstanceCount; |
|
2103 DMemoryMapping* mapping = MM::FindMappingInThread( (DMemModelThread*)aThread, |
|
2104 aStart, |
|
2105 aSize, |
|
2106 offsetInMapping, |
|
2107 mapInstanceCount); |
|
2108 TInt r = KErrBadDescriptor; |
|
2109 if (mapping) |
|
2110 { |
|
2111 TInt count = ((aStart & KPageMask) + aSize + KPageMask) >> KPageShift; |
|
2112 if(mapping->IsPinned()) |
|
2113 { |
|
2114 // Mapping for specified virtual address is pinned so we don't need to |
|
2115 // do anything. Also, we can't safely pin the memory in this case |
|
2116 // anyway, as pinned mappings may move between memory objects |
|
2117 r = KErrNone; |
|
2118 } |
|
2119 else |
|
2120 { |
|
2121 MmuLock::Lock(); |
|
2122 DMemoryObject* memory = mapping->Memory(); |
|
2123 if (mapInstanceCount != mapping->MapInstanceCount() || |
|
2124 !memory || !memory->IsDemandPaged()) |
|
2125 { |
|
2126 // mapping has been reused, no memory, or it's not paged, so no need to pin... |
|
2127 MmuLock::Unlock(); |
|
2128 r = KErrNone; |
|
2129 } |
|
2130 else |
|
2131 { |
|
2132 // paged memory needs pinning... |
|
2133 // Open a reference on the memory so it doesn't get deleted. |
|
2134 memory->Open(); |
|
2135 MmuLock::Unlock(); |
|
2136 |
|
2137 TUint startInMemory = (offsetInMapping >> KPageShift) + mapping->iStartIndex; |
|
2138 r = ((DVirtualPinMapping*)aPinObject)->Pin( memory, startInMemory, count, mapping->Permissions(), |
|
2139 mapping, mapInstanceCount); |
|
2140 memory->Close(); |
|
2141 } |
|
2142 } |
|
2143 mapping->Close(); |
|
2144 } |
|
2145 NKern::ThreadLeaveCS(); |
|
2146 |
|
2147 return r; |
|
2148 } |
|
2149 |
|
2150 TInt M::CreateAndPinVirtualMemory(TVirtualPinObject*& aPinObject, TLinAddr aStart, TUint aSize) |
|
2151 { |
|
2152 aPinObject = 0; |
|
2153 NKern::ThreadEnterCS(); |
|
2154 TUint offsetInMapping; |
|
2155 TUint mapInstanceCount; |
|
2156 DMemoryMapping* mapping = MM::FindMappingInThread( (DMemModelThread*)&Kern::CurrentThread(), |
|
2157 aStart, |
|
2158 aSize, |
|
2159 offsetInMapping, |
|
2160 mapInstanceCount); |
|
2161 TInt r = KErrBadDescriptor; |
|
2162 if (mapping) |
|
2163 { |
|
2164 TInt count = ((aStart & KPageMask) + aSize + KPageMask) >> KPageShift; |
|
2165 if(mapping->IsPinned()) |
|
2166 { |
|
2167 // Mapping for specified virtual address is pinned so we don't need to |
|
2168 // do anything. Also, we can't safely pin the memory in this case |
|
2169 // anyway, as pinned mappings may move between memory objects |
|
2170 r = KErrNone; |
|
2171 } |
|
2172 else |
|
2173 { |
|
2174 MmuLock::Lock(); |
|
2175 DMemoryObject* memory = mapping->Memory(); |
|
2176 if (mapInstanceCount != mapping->MapInstanceCount() || |
|
2177 !memory || !memory->IsDemandPaged()) |
|
2178 { |
|
2179 // mapping has been reused, no memory, or it's not paged, so no need to pin... |
|
2180 MmuLock::Unlock(); |
|
2181 r = KErrNone; |
|
2182 } |
|
2183 else |
|
2184 {// The memory is demand paged so create a pin object and pin it. |
|
2185 // Open a reference on the memory so it doesn't get deleted. |
|
2186 memory->Open(); |
|
2187 MmuLock::Unlock(); |
|
2188 r = CreateVirtualPinObject(aPinObject); |
|
2189 if (r == KErrNone) |
|
2190 { |
|
2191 TUint startInMemory = (offsetInMapping >> KPageShift) + mapping->iStartIndex; |
|
2192 r = ((DVirtualPinMapping*)aPinObject)->Pin( memory, startInMemory, count, mapping->Permissions(), |
|
2193 mapping, mapInstanceCount); |
|
2194 if (r != KErrNone) |
|
2195 {// Failed to pin the memory so pin object is not required. |
|
2196 DestroyVirtualPinObject(aPinObject); |
|
2197 } |
|
2198 } |
|
2199 memory->Close(); |
|
2200 } |
|
2201 } |
|
2202 mapping->Close(); |
|
2203 } |
|
2204 NKern::ThreadLeaveCS(); |
|
2205 |
|
2206 return r; |
|
2207 } |
|
2208 |
|
2209 void M::UnpinVirtualMemory(TVirtualPinObject* aPinObject) |
|
2210 { |
|
2211 DVirtualPinMapping* mapping = (DVirtualPinMapping*)aPinObject; |
|
2212 if (mapping->IsAttached()) |
|
2213 mapping->Unpin(); |
|
2214 } |
|
2215 |
|
2216 void M::DestroyVirtualPinObject(TVirtualPinObject*& aPinObject) |
|
2217 { |
|
2218 DVirtualPinMapping* mapping = (DVirtualPinMapping*)__e32_atomic_swp_ord_ptr(&aPinObject, 0); |
|
2219 if (mapping) |
|
2220 { |
|
2221 if (mapping->IsAttached()) |
|
2222 mapping->Unpin(); |
|
2223 mapping->AsyncClose(); |
|
2224 } |
|
2225 } |
|
2226 |
|
2227 TInt M::CreatePhysicalPinObject(TPhysicalPinObject*& aPinObject) |
|
2228 { |
|
2229 aPinObject = (TPhysicalPinObject*)new DPhysicalPinMapping; |
|
2230 return aPinObject != NULL ? KErrNone : KErrNoMemory; |
|
2231 } |
|
2232 |
|
2233 // |
|
2234 // Physical pinning |
|
2235 // |
|
2236 |
|
2237 TInt M::PinPhysicalMemory(TPhysicalPinObject* aPinObject, TLinAddr aStart, TUint aSize, TBool aReadOnly, |
|
2238 TPhysAddr& aAddress, TPhysAddr* aPages, TUint32& aMapAttr, TUint& aColour, DThread* aThread) |
|
2239 { |
|
2240 NKern::ThreadEnterCS(); |
|
2241 TUint offsetInMapping; |
|
2242 TUint mapInstanceCount; |
|
2243 DMemoryMapping* mapping = MM::FindMappingInThread( (DMemModelThread*)aThread, |
|
2244 aStart, |
|
2245 aSize, |
|
2246 offsetInMapping, |
|
2247 mapInstanceCount); |
|
2248 TInt r = KErrBadDescriptor; |
|
2249 if (mapping) |
|
2250 { |
|
2251 TInt count = ((aStart & KPageMask) + aSize + KPageMask) >> KPageShift; |
|
2252 |
|
2253 MmuLock::Lock(); |
|
2254 DMemoryObject* memory = mapping->Memory(); |
|
2255 if (mapInstanceCount == mapping->MapInstanceCount() && memory) |
|
2256 { |
|
2257 memory->Open(); |
|
2258 MmuLock::Unlock(); |
|
2259 |
|
2260 TUint startInMemory = (offsetInMapping >> KPageShift) + mapping->iStartIndex; |
|
2261 TMappingPermissions permissions = aReadOnly ? ESupervisorReadOnly : ESupervisorReadWrite; |
|
2262 r = ((DPhysicalPinMapping*)aPinObject)->Pin(memory, startInMemory, count, permissions); |
|
2263 if (r == KErrNone) |
|
2264 { |
|
2265 r = ((DPhysicalPinMapping*)aPinObject)->PhysAddr(0, count, aAddress, aPages); |
|
2266 if (r>=KErrNone) |
|
2267 { |
|
2268 r = KErrNone; //Do not report discontiguous memory in return value. |
|
2269 const TMappingAttributes2& mapAttr2 = |
|
2270 MM::LegacyMappingAttributes(memory->Attributes(), mapping->Permissions()); |
|
2271 *(TMappingAttributes2*)&aMapAttr = mapAttr2; |
|
2272 } |
|
2273 else |
|
2274 UnpinPhysicalMemory(aPinObject); |
|
2275 } |
|
2276 memory->Close(); |
|
2277 } |
|
2278 else // mapping has been reused or no memory... |
|
2279 { |
|
2280 MmuLock::Unlock(); |
|
2281 } |
|
2282 mapping->Close(); |
|
2283 } |
|
2284 NKern::ThreadLeaveCS(); |
|
2285 aColour = (aStart >>KPageShift) & KPageColourMask; |
|
2286 return r; |
|
2287 } |
|
2288 |
|
2289 void M::UnpinPhysicalMemory(TPhysicalPinObject* aPinObject) |
|
2290 { |
|
2291 DPhysicalPinMapping* mapping = (DPhysicalPinMapping*)aPinObject; |
|
2292 if (mapping->IsAttached()) |
|
2293 mapping->Unpin(); |
|
2294 } |
|
2295 |
|
2296 void M::DestroyPhysicalPinObject(TPhysicalPinObject*& aPinObject) |
|
2297 { |
|
2298 DPhysicalPinMapping* mapping = (DPhysicalPinMapping*)__e32_atomic_swp_ord_ptr(&aPinObject, 0); |
|
2299 if (mapping) |
|
2300 { |
|
2301 if (mapping->IsAttached()) |
|
2302 mapping->Unpin(); |
|
2303 mapping->AsyncClose(); |
|
2304 } |
|
2305 } |
|
2306 |
|
2307 |
|
2308 |
|
2309 // |
|
2310 // Cache sync operations |
|
2311 // |
|
2312 |
|
2313 //@pre As for MASK_THREAD_STANDARD |
|
2314 void Mmu::SyncPhysicalMemoryBeforeDmaWrite(TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr) |
|
2315 { |
|
2316 //Jump over the pages we do not have to sync |
|
2317 aPages += aOffset>>KPageShift; |
|
2318 aOffset &=KPageMask; |
|
2319 aColour = (aColour + (aOffset>>KPageShift)) & KPageColourMask; |
|
2320 |
|
2321 //Calculate page table entry for the temporary mapping. |
|
2322 TUint pteType = PteType(ESupervisorReadWrite,true); |
|
2323 TMappingAttributes2 mapAttr2(aMapAttr); |
|
2324 TPte pte = Mmu::BlankPte((TMemoryAttributes)mapAttr2.Type(), pteType); |
|
2325 |
|
2326 while (aSize) //A single pass of loop operates within page boundaries. |
|
2327 { |
|
2328 TUint sizeInLoopPass = Min (KPageSize, aOffset+aSize) - aOffset; //The size of the region in this pass. |
|
2329 |
|
2330 NKern::ThreadEnterCS(); |
|
2331 Kern::MutexWait(*iPhysMemSyncMutex); |
|
2332 |
|
2333 TLinAddr linAddr = iPhysMemSyncTemp.Map(*aPages, aColour, pte); |
|
2334 CacheMaintenance::MakeCPUChangesVisible(linAddr+aOffset, sizeInLoopPass, aMapAttr, *aPages+aOffset); |
|
2335 iPhysMemSyncTemp.Unmap(); |
|
2336 |
|
2337 Kern::MutexSignal(*iPhysMemSyncMutex); |
|
2338 NKern::ThreadLeaveCS(); |
|
2339 |
|
2340 aSize-=sizeInLoopPass; // Remaining bytes to sync |
|
2341 aOffset=0; // In all the pages after the first, sync will always start with zero offset. |
|
2342 aPages++; // Point to the next page |
|
2343 aColour = (aColour+1) & KPageColourMask; |
|
2344 } |
|
2345 } |
|
2346 |
|
2347 //@pre As for MASK_THREAD_STANDARD |
|
2348 void Mmu::SyncPhysicalMemoryBeforeDmaRead(TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr) |
|
2349 { |
|
2350 //Jump over the pages we do not have to sync |
|
2351 aPages += aOffset>>KPageShift; |
|
2352 aOffset &=KPageMask; |
|
2353 aColour = (aColour + (aOffset>>KPageShift)) & KPageColourMask; |
|
2354 |
|
2355 //Calculate page table entry for the temporary mapping. |
|
2356 TUint pteType = PteType(ESupervisorReadWrite,true); |
|
2357 TMappingAttributes2 mapAttr2(aMapAttr); |
|
2358 TPte pte = Mmu::BlankPte((TMemoryAttributes)mapAttr2.Type(), pteType); |
|
2359 |
|
2360 while (aSize) //A single pass of loop operates within page boundaries. |
|
2361 { |
|
2362 TUint sizeInLoopPass = Min (KPageSize, aOffset+aSize) - aOffset; //The size of the region in this pass. |
|
2363 |
|
2364 NKern::ThreadEnterCS(); |
|
2365 Kern::MutexWait(*iPhysMemSyncMutex); |
|
2366 |
|
2367 TLinAddr linAddr = iPhysMemSyncTemp.Map(*aPages, aColour, pte); |
|
2368 CacheMaintenance::PrepareMemoryForExternalWrites(linAddr+aOffset, sizeInLoopPass, aMapAttr, *aPages+aOffset); |
|
2369 iPhysMemSyncTemp.Unmap(); |
|
2370 |
|
2371 Kern::MutexSignal(*iPhysMemSyncMutex); |
|
2372 NKern::ThreadLeaveCS(); |
|
2373 |
|
2374 aSize-=sizeInLoopPass; // Remaining bytes to sync |
|
2375 aOffset=0; // In all the pages after the first, sync will always start with zero offset. |
|
2376 aPages++; // Point to the next page |
|
2377 aColour = (aColour+1) & KPageColourMask; |
|
2378 } |
|
2379 } |
|
2380 |
|
2381 //@pre As for MASK_THREAD_STANDARD |
|
2382 void Mmu::SyncPhysicalMemoryAfterDmaRead(TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr) |
|
2383 { |
|
2384 //Jump over the pages we do not have to sync |
|
2385 aPages += aOffset>>KPageShift; |
|
2386 aOffset &=KPageMask; |
|
2387 aColour = (aColour + (aOffset>>KPageShift)) & KPageColourMask; |
|
2388 |
|
2389 //Calculate page table entry for the temporary mapping. |
|
2390 TUint pteType = PteType(ESupervisorReadWrite,true); |
|
2391 TMappingAttributes2 mapAttr2(aMapAttr); |
|
2392 TPte pte = Mmu::BlankPte((TMemoryAttributes)mapAttr2.Type(), pteType); |
|
2393 |
|
2394 while (aSize) //A single pass of loop operates within page boundaries. |
|
2395 { |
|
2396 TUint sizeInLoopPass = Min (KPageSize, aOffset+aSize) - aOffset; //The size of the region in this pass. |
|
2397 |
|
2398 NKern::ThreadEnterCS(); |
|
2399 Kern::MutexWait(*iPhysMemSyncMutex); |
|
2400 |
|
2401 TLinAddr linAddr = iPhysMemSyncTemp.Map(*aPages, aColour, pte); |
|
2402 CacheMaintenance::MakeExternalChangesVisible(linAddr+aOffset, sizeInLoopPass, aMapAttr, *aPages+aOffset); |
|
2403 iPhysMemSyncTemp.Unmap(); |
|
2404 |
|
2405 Kern::MutexSignal(*iPhysMemSyncMutex); |
|
2406 NKern::ThreadLeaveCS(); |
|
2407 |
|
2408 aSize-=sizeInLoopPass; // Remaining bytes to sync |
|
2409 aOffset=0; // In all the pages after the first, sync will always start with zero offset. |
|
2410 aPages++; // Point to the next page |
|
2411 aColour = (aColour+1) & KPageColourMask; |
|
2412 } |
|
2413 } |
|
2414 |
|
2415 EXPORT_C TInt Cache::SyncPhysicalMemoryBeforeDmaWrite(TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr) |
|
2416 { |
|
2417 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Cache::SyncPhysicalMemoryBeforeDmaWrite"); |
|
2418 TheMmu.SyncPhysicalMemoryBeforeDmaWrite(aPages, aColour, aOffset, aSize, aMapAttr); |
|
2419 return KErrNone; |
|
2420 } |
|
2421 |
|
2422 EXPORT_C TInt Cache::SyncPhysicalMemoryBeforeDmaRead(TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr) |
|
2423 { |
|
2424 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Cache::SyncPhysicalMemoryBeforeDmaRead"); |
|
2425 TheMmu.SyncPhysicalMemoryBeforeDmaRead(aPages, aColour, aOffset, aSize, aMapAttr); |
|
2426 return KErrNone; |
|
2427 } |
|
2428 |
|
2429 EXPORT_C TInt Cache::SyncPhysicalMemoryAfterDmaRead(TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr) |
|
2430 { |
|
2431 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Cache::SyncPhysicalMemoryAfterDmaRead"); |
|
2432 TheMmu.SyncPhysicalMemoryAfterDmaRead(aPages, aColour, aOffset, aSize, aMapAttr); |
|
2433 return KErrNone; |
|
2434 } |