author | Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com> |
Mon, 18 Jan 2010 21:31:10 +0200 | |
changeset 36 | 538db54a451d |
parent 31 | 56f325a607ea |
child 39 | 5d2844f35677 |
permissions | -rw-r--r-- |
0 | 1 |
// Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies). |
2 |
// All rights reserved. |
|
3 |
// This component and the accompanying materials are made available |
|
4 |
// under the terms of the License "Eclipse Public License v1.0" |
|
5 |
// which accompanies this distribution, and is available |
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 |
// |
|
8 |
// Initial Contributors: |
|
9 |
// Nokia Corporation - initial contribution. |
|
10 |
// |
|
11 |
// Contributors: |
|
12 |
// |
|
13 |
// Description: |
|
14 |
// |
|
15 |
||
16 |
#include "memmodel.h" |
|
17 |
#include "kernel/cache_maintenance.inl" |
|
18 |
#include <kernel/cache.h> |
|
19 |
#include <ramalloc.h> |
|
20 |
#include <defrag.h> |
|
21 |
#include "mm.h" |
|
22 |
#include "mmu.h" |
|
23 |
#include "mpager.h" |
|
24 |
#include "mmapping.h" |
|
25 |
#include "mobject.h" |
|
26 |
#include "mmanager.h" |
|
27 |
#include "mpagearray.h" |
|
28 |
||
29 |
||
30 |
// |
|
31 |
// SPageInfo |
|
32 |
// |
|
33 |
||
34 |
// check enough space for page infos... |
|
35 |
__ASSERT_COMPILE((KPageInfoLinearEnd-KPageInfoLinearBase)/sizeof(SPageInfo)==(1<<(32-KPageShift))); |
|
36 |
||
37 |
// check KPageInfoShift... |
|
38 |
__ASSERT_COMPILE(sizeof(SPageInfo)==(1<<KPageInfoShift)); |
|
39 |
||
40 |
||
41 |
SPageInfo* SPageInfo::SafeFromPhysAddr(TPhysAddr aAddress) |
|
42 |
{ |
|
43 |
__NK_ASSERT_DEBUG((aAddress&KPageMask)==0); |
|
44 |
TUint index = aAddress>>(KPageShift+KPageShift-KPageInfoShift); |
|
45 |
TUint flags = ((TUint8*)KPageInfoMap)[index>>3]; |
|
46 |
TUint mask = 1<<(index&7); |
|
47 |
if(!(flags&mask)) |
|
48 |
return 0; // no SPageInfo for aAddress |
|
49 |
SPageInfo* info = FromPhysAddr(aAddress); |
|
50 |
if(info->iType==SPageInfo::EInvalid) |
|
51 |
return 0; |
|
52 |
return info; |
|
53 |
} |
|
54 |
||
55 |
||
56 |
#ifdef _DEBUG |
|
57 |
||
58 |
void SPageInfo::CheckAccess(const char* aMessage, TUint aFlags) |
|
59 |
{ |
|
60 |
if(K::Initialising || NKern::Crashed()) |
|
61 |
return; |
|
62 |
||
63 |
if((aFlags&ECheckNotAllocated) && (iType!=EUnknown)) |
|
64 |
{ |
|
65 |
Kern::Printf("SPageInfo[0x%08x]::CheckAccess failed, PhysAddr()=0x%08x, iType==%d : %s",this,PhysAddr(),iType,aMessage); |
|
66 |
__NK_ASSERT_DEBUG(0); |
|
67 |
goto fail; |
|
68 |
} |
|
69 |
||
70 |
if((aFlags&ECheckNotUnused) && (iType==EUnused)) |
|
71 |
{ |
|
72 |
Kern::Printf("SPageInfo[0x%08x]::CheckAccess failed, PhysAddr()=0x%08x, iType==%d : %s",this,PhysAddr(),iType,aMessage); |
|
73 |
__NK_ASSERT_DEBUG(0); |
|
74 |
goto fail; |
|
75 |
} |
|
76 |
||
77 |
if((aFlags&ECheckUnused) && (iType!=EUnused)) |
|
78 |
{ |
|
79 |
Kern::Printf("SPageInfo[0x%08x]::CheckAccess failed, PhysAddr()=0x%08x, iType==%d : %s",this,PhysAddr(),iType,aMessage); |
|
80 |
__NK_ASSERT_DEBUG(0); |
|
81 |
goto fail; |
|
82 |
} |
|
83 |
||
84 |
if((aFlags&ECheckNotPaged) && (iPagedState!=EUnpaged)) |
|
85 |
{ |
|
86 |
Kern::Printf("SPageInfo[0x%08x]::CheckAccess failed, PhysAddr()=0x%08x, iPagedState=%d : %s",this,PhysAddr(),iPagedState,aMessage); |
|
87 |
__NK_ASSERT_DEBUG(0); |
|
88 |
goto fail; |
|
89 |
} |
|
90 |
||
91 |
if((aFlags&ECheckRamAllocLock) && !RamAllocLock::IsHeld()) |
|
92 |
{ |
|
93 |
Kern::Printf("SPageInfo[0x%08x]::CheckAccess failed, PhysAddr()=0x%08x, iType==%d : %s",this,PhysAddr(),iType,aMessage); |
|
94 |
__NK_ASSERT_DEBUG(0); |
|
95 |
goto fail; |
|
96 |
} |
|
97 |
||
98 |
if((aFlags&ENoCheckMmuLock) || MmuLock::IsHeld()) |
|
99 |
return; |
|
100 |
fail: |
|
101 |
Kern::Printf("SPageInfo[0x%08x]::CheckAccess failed, PhysAddr()=0x%08x : %s",this,PhysAddr(),aMessage); |
|
102 |
Mmu::Panic(Mmu::EUnsafePageInfoAccess); |
|
103 |
} |
|
104 |
||
105 |
||
106 |
void SPageInfo::Dump() |
|
107 |
{ |
|
108 |
Kern::Printf("SPageInfo for page %x = %d,%d,%02x,0x%08x,0x%x,%d",PhysAddr(),iType,iPagedState,iFlags,iOwner,iIndex,iPinCount); |
|
109 |
} |
|
110 |
||
111 |
#endif |
|
112 |
||
113 |
||
114 |
||
115 |
// |
|
116 |
// SPageTableInfo |
|
117 |
// |
|
118 |
||
119 |
// check enough space for page table infos... |
|
120 |
__ASSERT_COMPILE((KPageTableInfoEnd-KPageTableInfoBase)/sizeof(SPageTableInfo) |
|
121 |
>=(KPageTableEnd-KPageTableBase)/KPageTableSize); |
|
122 |
||
123 |
// check KPtBlockShift... |
|
124 |
__ASSERT_COMPILE((sizeof(SPageTableInfo)<<KPtBlockShift)==KPageSize); |
|
125 |
||
126 |
||
127 |
#ifdef _DEBUG |
|
128 |
||
129 |
TBool SPageTableInfo::CheckPageCount() |
|
130 |
{ |
|
131 |
__NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
132 |
TPte* pt = PageTable(); |
|
133 |
TUint realCount = 0; |
|
134 |
do if(*pt++) ++realCount; |
|
135 |
while(TLinAddr(pt)&(KPageTableMask/sizeof(TPte)*sizeof(TPte))); |
|
136 |
if(iPageCount==realCount) |
|
137 |
return true; |
|
138 |
Kern::Printf("CheckPageCount Failed: pt=0x%08x count=%d realCount=%d",TLinAddr(pt)-KPageTableSize,iPageCount,realCount); |
|
139 |
return false; |
|
140 |
} |
|
141 |
||
142 |
||
143 |
void SPageTableInfo::CheckChangeUse(const char* aName) |
|
144 |
{ |
|
145 |
if(K::Initialising) |
|
146 |
return; |
|
147 |
if(PageTablesLockIsHeld() && MmuLock::IsHeld()) |
|
148 |
return; |
|
149 |
Kern::Printf("SPageTableInfo::CheckChangeUse failed : %s",aName); |
|
150 |
Mmu::Panic(Mmu::EUnsafePageTableInfoAccess); |
|
151 |
} |
|
152 |
||
153 |
||
154 |
void SPageTableInfo::CheckCheckUse(const char* aName) |
|
155 |
{ |
|
156 |
if(K::Initialising) |
|
157 |
return; |
|
158 |
if(PageTablesLockIsHeld() || MmuLock::IsHeld()) |
|
159 |
return; |
|
160 |
Kern::Printf("SPageTableInfo::CheckCheckUse failed : %s",aName); |
|
161 |
Mmu::Panic(Mmu::EUnsafePageTableInfoAccess); |
|
162 |
} |
|
163 |
||
164 |
||
165 |
void SPageTableInfo::CheckAccess(const char* aName) |
|
166 |
{ |
|
167 |
if(K::Initialising) |
|
168 |
return; |
|
169 |
if(MmuLock::IsHeld()) |
|
170 |
return; |
|
171 |
Kern::Printf("SPageTableInfo::CheckAccess failed : %s",aName); |
|
172 |
Mmu::Panic(Mmu::EUnsafePageTableInfoAccess); |
|
173 |
} |
|
174 |
||
175 |
||
176 |
void SPageTableInfo::CheckInit(const char* aName) |
|
177 |
{ |
|
178 |
if(K::Initialising) |
|
179 |
return; |
|
180 |
if(PageTablesLockIsHeld() && iType==EUnused) |
|
181 |
return; |
|
182 |
Kern::Printf("SPageTableInfo::CheckInit failed : %s",aName); |
|
183 |
Mmu::Panic(Mmu::EUnsafePageTableInfoAccess); |
|
184 |
} |
|
185 |
||
186 |
#endif |
|
187 |
||
188 |
||
189 |
||
190 |
// |
|
191 |
// RamAllocLock |
|
192 |
// |
|
193 |
||
194 |
_LIT(KLitRamAlloc,"RamAlloc"); |
|
195 |
_LIT(KLitPhysMemSync,"PhysMemSync"); |
|
196 |
||
197 |
void RamAllocLock::Lock() |
|
198 |
{ |
|
199 |
Mmu& m = TheMmu; |
|
200 |
Kern::MutexWait(*m.iRamAllocatorMutex); |
|
201 |
if(!m.iRamAllocLockCount++) |
|
202 |
{ |
|
203 |
// first lock, so setup memory fail data... |
|
204 |
m.iRamAllocFailed = EFalse; |
|
205 |
__NK_ASSERT_DEBUG(m.iRamAllocInitialFreePages==m.FreeRamInPages()); // free RAM shouldn't have changed whilst lock was held |
|
206 |
} |
|
207 |
} |
|
208 |
||
209 |
||
210 |
void RamAllocLock::Unlock() |
|
211 |
{ |
|
212 |
Mmu& m = TheMmu; |
|
213 |
if(--m.iRamAllocLockCount) |
|
214 |
{ |
|
215 |
Kern::MutexSignal(*m.iRamAllocatorMutex); |
|
216 |
return; |
|
217 |
} |
|
218 |
TBool failed = m.iRamAllocFailed; |
|
219 |
TUint initial = m.iRamAllocInitialFreePages; |
|
220 |
TUint final = m.FreeRamInPages(); |
|
221 |
m.iRamAllocInitialFreePages = final; // new baseline value |
|
222 |
TUint changes = K::CheckFreeMemoryLevel(initial*KPageSize,final*KPageSize,failed); |
|
223 |
if(changes) |
|
224 |
{ |
|
225 |
__KTRACE_OPT(KMMU,Kern::Printf("RamAllocLock::Unlock() changes=%x",changes)); |
|
226 |
} |
|
227 |
Kern::MutexSignal(*m.iRamAllocatorMutex); |
|
228 |
} |
|
229 |
||
230 |
||
231 |
TBool RamAllocLock::Flash() |
|
232 |
{ |
|
233 |
Unlock(); |
|
234 |
Lock(); |
|
235 |
return true; // lock was released |
|
236 |
} |
|
237 |
||
238 |
||
239 |
TBool RamAllocLock::IsHeld() |
|
240 |
{ |
|
241 |
Mmu& m = TheMmu; |
|
242 |
return m.iRamAllocatorMutex->iCleanup.iThread == &Kern::CurrentThread() && m.iRamAllocLockCount; |
|
243 |
} |
|
244 |
||
245 |
||
246 |
||
247 |
// |
|
248 |
// MmuLock |
|
249 |
// |
|
250 |
||
251 |
#ifdef _DEBUG |
|
252 |
TUint MmuLock::UnlockGuardNest =0; |
|
253 |
TUint MmuLock::UnlockGuardFail =0; |
|
254 |
#endif |
|
255 |
||
256 |
NFastMutex MmuLock::iLock; |
|
257 |
||
258 |
void MmuLock::Lock() |
|
259 |
{ |
|
260 |
NKern::FMWait(&iLock); |
|
261 |
} |
|
262 |
||
263 |
void MmuLock::Unlock() |
|
264 |
{ |
|
265 |
UnlockGuardCheck(); |
|
266 |
NKern::FMSignal(&iLock); |
|
267 |
} |
|
268 |
||
269 |
TBool MmuLock::Flash() |
|
270 |
{ |
|
271 |
UnlockGuardCheck(); |
|
272 |
return NKern::FMFlash(&iLock); |
|
273 |
} |
|
274 |
||
275 |
TBool MmuLock::IsHeld() |
|
276 |
{ |
|
277 |
NFastMutex& m = iLock; |
|
278 |
return m.HeldByCurrentThread(); |
|
279 |
} |
|
280 |
||
281 |
||
282 |
||
283 |
// |
|
284 |
// Initialisation |
|
285 |
// |
|
286 |
||
287 |
Mmu TheMmu; |
|
288 |
||
289 |
void Mmu::Init1Common() |
|
290 |
{ |
|
291 |
__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("Mmu::Init1Common")); |
|
292 |
||
293 |
// Mmu data |
|
294 |
TUint pteType = PteType(ESupervisorReadWrite,true); |
|
295 |
iTempPteCached = BlankPte((TMemoryAttributes)(EMemoryAttributeNormalCached|EMemoryAttributeDefaultShareable),pteType); |
|
296 |
iTempPteUncached = BlankPte((TMemoryAttributes)(EMemoryAttributeNormalUncached|EMemoryAttributeDefaultShareable),pteType); |
|
297 |
iTempPteCacheMaintenance = BlankPte((TMemoryAttributes)(CacheMaintenance::TemporaryMapping()|EMemoryAttributeDefaultShareable),pteType); |
|
298 |
||
299 |
// other |
|
300 |
PP::MaxUserThreadStack=0x14000; // 80K - STDLIB asks for 64K for PosixServer!!!! |
|
301 |
PP::UserThreadStackGuard=0x2000; // 8K |
|
302 |
PP::MaxStackSpacePerProcess=0x200000; // 2Mb |
|
303 |
K::SupervisorThreadStackSize=0x1000; // 4K |
|
304 |
PP::SupervisorThreadStackGuard=0x1000; // 4K |
|
305 |
K::MachineConfig=(TMachineConfig*)KMachineConfigLinAddr; |
|
306 |
PP::RamDriveStartAddress=0; |
|
307 |
PP::RamDriveRange=0; |
|
308 |
PP::RamDriveMaxSize=0x20000000; // 512MB, probably will be reduced later |
|
309 |
K::MemModelAttributes=EMemModelTypeFlexible|EMemModelAttrNonExProt|EMemModelAttrKernProt|EMemModelAttrWriteProt| |
|
310 |
EMemModelAttrVA|EMemModelAttrProcessProt|EMemModelAttrSameVA|EMemModelAttrSvKernProt| |
|
311 |
EMemModelAttrIPCKernProt|EMemModelAttrRamCodeProt; |
|
312 |
} |
|
313 |
||
314 |
||
315 |
#if 0 |
|
316 |
void Mmu::VerifyRam() |
|
317 |
{ |
|
318 |
Kern::Printf("Mmu::VerifyRam() pass 1"); |
|
319 |
RamAllocLock::Lock(); |
|
320 |
||
321 |
TPhysAddr p = 0; |
|
322 |
do |
|
323 |
{ |
|
324 |
SPageInfo* pi = SPageInfo::SafeFromPhysAddr(p); |
|
325 |
if(pi) |
|
326 |
{ |
|
327 |
Kern::Printf("%08x %d",p,pi->Type()); |
|
328 |
if(pi->Type()==SPageInfo::EUnused) |
|
329 |
{ |
|
330 |
volatile TPhysAddr* b = (volatile TPhysAddr*)MapTemp(p,0); |
|
331 |
b[0] = p; |
|
332 |
b[1] = ~p; |
|
333 |
__NK_ASSERT_DEBUG(b[0]==p); |
|
334 |
__NK_ASSERT_DEBUG(b[1]==~p); |
|
335 |
UnmapTemp(); |
|
336 |
} |
|
337 |
} |
|
338 |
p += KPageSize; |
|
339 |
} |
|
340 |
while(p); |
|
341 |
||
342 |
TBool fail = false; |
|
343 |
Kern::Printf("Mmu::VerifyRam() pass 2"); |
|
344 |
do |
|
345 |
{ |
|
346 |
SPageInfo* pi = SPageInfo::SafeFromPhysAddr(p); |
|
347 |
if(pi) |
|
348 |
{ |
|
349 |
if(pi->Type()==SPageInfo::EUnused) |
|
350 |
{ |
|
351 |
volatile TPhysAddr* b = (volatile TPhysAddr*)MapTemp(p,0); |
|
352 |
if(b[0]!=p || b[1]!=~p) |
|
353 |
{ |
|
354 |
fail = true; |
|
355 |
Kern::Printf("%08x FAILED %x %x",b[0],b[1]); |
|
356 |
} |
|
357 |
UnmapTemp(); |
|
358 |
} |
|
359 |
} |
|
360 |
p += KPageSize; |
|
361 |
} |
|
362 |
while(p); |
|
363 |
||
364 |
__NK_ASSERT_DEBUG(!fail); |
|
365 |
RamAllocLock::Unlock(); |
|
366 |
} |
|
367 |
#endif |
|
368 |
||
369 |
||
370 |
void Mmu::Init2Common() |
|
371 |
{ |
|
372 |
__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("Mmu::Init2Common")); |
|
373 |
||
374 |
// create allocator... |
|
375 |
const SRamInfo& info = *(const SRamInfo*)TheSuperPage().iRamBootData; |
|
376 |
iRamPageAllocator = DRamAllocator::New(info, iRamZones, iRamZoneCallback); |
|
377 |
||
378 |
// initialise all pages in banks as unused... |
|
379 |
const SRamBank* bank = info.iBanks; |
|
380 |
while(bank->iSize) |
|
381 |
{ |
|
382 |
TUint32 base = bank->iBase; |
|
383 |
TUint32 size = bank->iSize; |
|
384 |
__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("Found RAM bank 0x%08x size %d",base,size)); |
|
385 |
if(base+size<=base || ((base|size)&KPageMask)) |
|
386 |
Panic(EInvalidRamBankAtBoot); |
|
387 |
||
388 |
SPageInfo* pi = SPageInfo::FromPhysAddr(base); |
|
389 |
SPageInfo* piEnd = pi+(size>>KPageShift); |
|
390 |
while(pi<piEnd) |
|
391 |
(pi++)->SetUnused(); |
|
392 |
++bank; |
|
393 |
} |
|
394 |
// step over the last bank to get to the reserved banks. |
|
395 |
++bank; |
|
396 |
// mark any reserved regions as allocated... |
|
397 |
while(bank->iSize) |
|
398 |
{ |
|
399 |
TUint32 base = bank->iBase; |
|
400 |
TUint32 size = bank->iSize; |
|
401 |
__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("Found reserved bank 0x%08x size %d",base,size)); |
|
402 |
if(base+size<=base || ((base|size)&KPageMask)) |
|
403 |
Panic(EInvalidReservedBankAtBoot); |
|
404 |
||
405 |
SPageInfo* pi = SPageInfo::FromPhysAddr(base); |
|
406 |
SPageInfo* piEnd = pi+(size>>KPageShift); |
|
407 |
while(pi<piEnd) |
|
408 |
(pi++)->SetPhysAlloc(); |
|
409 |
++bank; |
|
410 |
} |
|
411 |
||
412 |
// Clear the inital (and only so far) page table info page so all unused |
|
413 |
// page tables infos will be marked as unused. |
|
414 |
__ASSERT_COMPILE(SPageTableInfo::EUnused == 0); |
|
415 |
memclr((TAny*)KPageTableInfoBase, KPageSize); |
|
416 |
||
417 |
// look for page tables - assume first page table maps page tables |
|
418 |
TPte* pPte = (TPte*)KPageTableBase; |
|
419 |
TInt i; |
|
420 |
for(i=0; i<KChunkSize/KPageSize; ++i) |
|
421 |
{ |
|
422 |
TPte pte = *pPte++; |
|
423 |
if(pte==KPteUnallocatedEntry) // after boot, page tables are contiguous |
|
424 |
break; |
|
425 |
TPhysAddr ptpgPhys = Mmu::PtePhysAddr(pte,i); |
|
426 |
__KTRACE_OPT(KBOOT,Kern::Printf("Page Table Group %08x -> Phys %08x", KPageTableBase+i*KPageSize, ptpgPhys)); |
|
427 |
SPageInfo* pi = SPageInfo::SafeFromPhysAddr(ptpgPhys); |
|
428 |
__ASSERT_ALWAYS(pi, Panic(EInvalidPageTableAtBoot)); |
|
429 |
pi->SetFixed(i); // this also sets the SPageInfo::iOffset so that linear-to-physical works |
|
430 |
} |
|
431 |
||
432 |
// look for mapped pages |
|
433 |
TPde* pd = Mmu::PageDirectory(KKernelOsAsid); |
|
434 |
for(i=0; i<(1<<(32-KChunkShift)); ++i) |
|
435 |
{ |
|
436 |
TPde pde = pd[i]; |
|
437 |
if(pde==KPdeUnallocatedEntry) |
|
438 |
continue; |
|
439 |
TPhysAddr pdePhys = Mmu::PdePhysAddr(pde); |
|
440 |
TPte* pt = 0; |
|
441 |
if(pdePhys!=KPhysAddrInvalid) |
|
442 |
{ |
|
443 |
__KTRACE_OPT(KBOOT,Kern::Printf("Addr %08x -> Whole PDE Phys %08x", i<<KChunkShift, pdePhys)); |
|
444 |
} |
|
445 |
else |
|
446 |
{ |
|
447 |
pt = Mmu::PageTableFromPde(pde); |
|
448 |
__KTRACE_OPT(KBOOT,Kern::Printf("Addr %08x -> page table %08x", i<<KChunkShift, pt)); |
|
449 |
__ASSERT_ALWAYS(pt,Panic(EInvalidPdeAtBoot)); // bad PDE |
|
450 |
} |
|
451 |
||
452 |
TInt j; |
|
453 |
TInt np = 0; |
|
454 |
for(j=0; j<KChunkSize/KPageSize; ++j) |
|
455 |
{ |
|
456 |
TBool present = ETrue; // all pages present if whole PDE mapping |
|
457 |
TPte pte = 0; |
|
458 |
if(pt) |
|
459 |
{ |
|
460 |
pte = pt[j]; |
|
461 |
present = pte!=KPteUnallocatedEntry; |
|
462 |
} |
|
463 |
if(present) |
|
464 |
{ |
|
465 |
++np; |
|
466 |
TPhysAddr pa = pt ? Mmu::PtePhysAddr(pte,j) : (pdePhys + (j<<KPageShift)); |
|
467 |
SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pa); |
|
468 |
__KTRACE_OPT(KBOOT,Kern::Printf("Addr: %08x PA=%08x", |
|
469 |
(i<<KChunkShift)+(j<<KPageShift), pa)); |
|
470 |
if(pi) // ignore non-RAM mappings |
|
471 |
{ |
|
472 |
TInt r = iRamPageAllocator->MarkPageAllocated(pa, EPageFixed); |
|
473 |
// allow KErrAlreadyExists since it's possible that a page is doubly mapped |
|
474 |
__ASSERT_ALWAYS(r==KErrNone || r==KErrAlreadyExists, Panic(EBadMappedPageAfterBoot)); |
|
475 |
if(pi->Type()==SPageInfo::EUnused) |
|
476 |
pi->SetFixed(); |
|
477 |
} |
|
478 |
} |
|
479 |
} |
|
480 |
__KTRACE_OPT(KBOOT,Kern::Printf("Addr: %08x #PTEs=%d",(i<<KChunkShift),np)); |
|
481 |
if(pt) |
|
482 |
{ |
|
483 |
SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt); |
|
484 |
pti->Boot(np); |
|
485 |
} |
|
486 |
} |
|
487 |
||
488 |
TInt r = K::MutexCreate(iRamAllocatorMutex, KLitRamAlloc, NULL, EFalse, KMutexOrdRamAlloc); |
|
489 |
if(r!=KErrNone) |
|
490 |
Panic(ERamAllocMutexCreateFailed); |
|
491 |
iRamAllocLockCount = 0; |
|
492 |
iRamAllocInitialFreePages = FreeRamInPages(); |
|
493 |
||
494 |
__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("Mmu::DoInit2")); |
|
495 |
||
496 |
for(i=0; i<KNumTempMappingSlots; ++i) |
|
497 |
iTempMap[i].Alloc(1); |
|
498 |
||
499 |
iPhysMemSyncTemp.Alloc(1); |
|
500 |
r = K::MutexCreate(iPhysMemSyncMutex, KLitPhysMemSync, NULL, EFalse, KMutexOrdSyncPhysMem); |
|
501 |
if(r!=KErrNone) |
|
502 |
Panic(EPhysMemSyncMutexCreateFailed); |
|
503 |
// VerifyRam(); |
|
504 |
} |
|
505 |
||
506 |
||
507 |
void Mmu::Init2FinalCommon() |
|
508 |
{ |
|
509 |
__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("Mmu::Init2FinalCommon")); |
|
510 |
// hack, reduce free memory to <2GB... |
|
511 |
while(FreeRamInPages()>=0x80000000/KPageSize) |
|
512 |
{ |
|
513 |
TPhysAddr dummyPage; |
|
514 |
TInt r = iRamPageAllocator->AllocRamPages(&dummyPage,1, EPageFixed); |
|
515 |
__NK_ASSERT_ALWAYS(r==KErrNone); |
|
516 |
} |
|
517 |
// hack, reduce total RAM to <2GB... |
|
518 |
if(TheSuperPage().iTotalRamSize<0) |
|
519 |
TheSuperPage().iTotalRamSize = 0x80000000-KPageSize; |
|
520 |
||
521 |
// Save current free RAM size - there can never be more free RAM than this |
|
522 |
TUint maxFreePages = FreeRamInPages(); |
|
523 |
K::MaxFreeRam = maxFreePages*KPageSize; |
|
524 |
if(maxFreePages < (TUint(PP::RamDriveMaxSize)>>KPageShift)) |
|
525 |
PP::RamDriveMaxSize = maxFreePages*KPageSize; |
|
526 |
||
527 |
// update this to stop assert triggering in RamAllocLock::Lock() |
|
528 |
iRamAllocInitialFreePages = maxFreePages; |
|
529 |
} |
|
530 |
||
531 |
||
532 |
void Mmu::Init3() |
|
533 |
{ |
|
534 |
iDefrag = new Defrag; |
|
535 |
if (!iDefrag) |
|
536 |
Panic(EDefragAllocFailed); |
|
537 |
iDefrag->Init3(TheMmu.iRamPageAllocator); |
|
538 |
} |
|
539 |
||
540 |
// |
|
541 |
// Utils |
|
542 |
// |
|
543 |
||
544 |
void Mmu::Panic(TPanic aPanic) |
|
545 |
{ |
|
546 |
Kern::Fault("MMU",aPanic); |
|
547 |
} |
|
548 |
||
549 |
||
550 |
TUint Mmu::FreeRamInPages() |
|
551 |
{ |
|
552 |
return iRamPageAllocator->FreeRamInPages()+ThePager.NumberOfFreePages(); |
|
553 |
} |
|
554 |
||
555 |
||
556 |
TUint Mmu::TotalPhysicalRamPages() |
|
557 |
{ |
|
558 |
return iRamPageAllocator->TotalPhysicalRamPages(); |
|
559 |
} |
|
560 |
||
561 |
||
562 |
const SRamZone* Mmu::RamZoneConfig(TRamZoneCallback& aCallback) const |
|
563 |
{ |
|
564 |
aCallback = iRamZoneCallback; |
|
565 |
return iRamZones; |
|
566 |
} |
|
567 |
||
568 |
||
569 |
void Mmu::SetRamZoneConfig(const SRamZone* aZones, TRamZoneCallback aCallback) |
|
570 |
{ |
|
571 |
iRamZones = aZones; |
|
572 |
iRamZoneCallback = aCallback; |
|
573 |
} |
|
574 |
||
575 |
||
576 |
TInt Mmu::ModifyRamZoneFlags(TUint aId, TUint aClearMask, TUint aSetMask) |
|
577 |
{ |
|
578 |
return iRamPageAllocator->ModifyZoneFlags(aId, aClearMask, aSetMask); |
|
579 |
} |
|
580 |
||
581 |
||
582 |
TInt Mmu::GetRamZonePageCount(TUint aId, SRamZonePageCount& aPageData) |
|
583 |
{ |
|
584 |
return iRamPageAllocator->GetZonePageCount(aId, aPageData); |
|
585 |
} |
|
586 |
||
587 |
||
588 |
TInt Mmu::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aBytes, TPhysAddr& aPhysAddr, TInt aAlign) |
|
589 |
{ |
|
590 |
__KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam(?,%d,%d,?,%d)", aZoneIdCount, aBytes, aPhysAddr, aAlign)); |
|
591 |
__NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
|
592 |
||
593 |
TInt r = iRamPageAllocator->ZoneAllocContiguousRam(aZoneIdList, aZoneIdCount, aBytes, aPhysAddr, EPageFixed, aAlign); |
|
594 |
if(r!=KErrNone) |
|
595 |
iRamAllocFailed = ETrue; |
|
596 |
else |
|
597 |
{ |
|
598 |
TUint pages = MM::RoundToPageCount(aBytes); |
|
599 |
AllocatedPhysicalRam(aPhysAddr, pages, (Mmu::TRamAllocFlags)EMemAttStronglyOrdered); |
|
600 |
} |
|
601 |
__KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam returns %d and aPhysAddr=0x%08x",r,aPhysAddr)); |
|
602 |
return r; |
|
603 |
} |
|
604 |
||
605 |
||
606 |
TInt Mmu::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aNumPages, TPhysAddr* aPageList) |
|
607 |
{ |
|
608 |
__KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam(?,%d,%d,?)", aZoneIdCount, aNumPages)); |
|
609 |
__NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
|
610 |
||
611 |
TInt r = iRamPageAllocator->ZoneAllocRamPages(aZoneIdList, aZoneIdCount, aPageList, aNumPages, EPageFixed); |
|
612 |
if(r!=KErrNone) |
|
613 |
iRamAllocFailed = ETrue; |
|
614 |
else |
|
615 |
{ |
|
616 |
PagesAllocated(aPageList, aNumPages, (Mmu::TRamAllocFlags)EMemAttStronglyOrdered); |
|
617 |
||
618 |
// update page infos... |
|
619 |
TUint flash = 0; |
|
620 |
TPhysAddr* pageEnd = aPageList + aNumPages; |
|
621 |
MmuLock::Lock(); |
|
622 |
TPhysAddr* page = aPageList; |
|
623 |
while (page < pageEnd) |
|
624 |
{ |
|
625 |
MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo/2); |
|
626 |
TPhysAddr pagePhys = *page++; |
|
627 |
__NK_ASSERT_DEBUG(pagePhys != KPhysAddrInvalid); |
|
628 |
SPageInfo::FromPhysAddr(pagePhys)->SetPhysAlloc(); |
|
629 |
} |
|
630 |
MmuLock::Unlock(); |
|
631 |
} |
|
632 |
__KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam returns %d",r)); |
|
633 |
return r; |
|
634 |
} |
|
635 |
||
636 |
||
637 |
TInt Mmu::RamHalFunction(TInt aFunction, TAny* a1, TAny* a2) |
|
638 |
{ |
|
639 |
// This function should only be registered with hal and therefore can only |
|
640 |
// be invoked after the ram allocator has been created. |
|
641 |
__NK_ASSERT_DEBUG(iRamPageAllocator); |
|
642 |
return iRamPageAllocator->HalFunction(aFunction, a1, a2); |
|
643 |
} |
|
644 |
||
645 |
||
646 |
void Mmu::ChangePageType(SPageInfo* aPageInfo, TZonePageType aOldPageType, TZonePageType aNewPageType) |
|
647 |
{ |
|
648 |
iRamPageAllocator->ChangePageType(aPageInfo, aOldPageType, aNewPageType); |
|
649 |
} |
|
650 |
||
651 |
TInt Mmu::HandlePageFault(TLinAddr aPc, TLinAddr aFaultAddress, TUint aAccessPermissions, TAny* aExceptionInfo) |
|
652 |
{ |
|
653 |
TRACE(("Mmu::HandlePageFault(0x%08x,0x%08x,%d)",aPc,aFaultAddress,aAccessPermissions)); |
|
654 |
||
655 |
DMemModelThread* thread = (DMemModelThread*)TheCurrentThread; |
|
656 |
// Get the os asid of the process taking the fault, no need to open a reference |
|
657 |
// as it is the current thread's process so can't be freed. |
|
658 |
TUint faultOsAsid = ((DMemModelProcess*)thread->iNThread.iAddressSpace)->OsAsid(); |
|
659 |
||
660 |
// check if any fast mutexes held... |
|
661 |
NFastMutex* fm = NKern::HeldFastMutex(); |
|
662 |
TPagingExcTrap* trap = thread->iPagingExcTrap; |
|
663 |
if(fm) |
|
664 |
{ |
|
665 |
// check there is an XTRAP_PAGING in effect... |
|
666 |
if(!trap) |
|
667 |
{ |
|
668 |
// oops, kill system... |
|
669 |
__KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("Fault with FM Held! addr=0x%08x (%O pc=%x)",aFaultAddress,thread,aPc)); |
|
670 |
Exc::Fault(aExceptionInfo); |
|
671 |
} |
|
672 |
||
673 |
// release the fast mutex... |
|
674 |
NKern::FMSignal(fm); |
|
675 |
} |
|
676 |
||
677 |
NKern::ThreadEnterCS(); |
|
678 |
||
679 |
// work out address space for aFaultAddress... |
|
680 |
TUint osAsid = faultOsAsid; |
|
681 |
TLinAddr addr = aFaultAddress; |
|
682 |
if(thread->iAliasLinAddr && TUint(addr - thread->iAliasLinAddr) < TUint(KPageSize)) |
|
683 |
{ |
|
684 |
// Address in aliased memory... |
|
685 |
addr = (addr - thread->iAliasLinAddr) + thread->iAliasTarget; |
|
686 |
// Get the os asid of the process thread is aliasing, no need to open |
|
687 |
// a reference on it as one was already opened when the alias was created. |
|
688 |
osAsid = thread->iAliasProcess->OsAsid(); |
|
689 |
} |
|
690 |
else if(addr>=KGlobalMemoryBase) |
|
691 |
{ |
|
692 |
// Address in global region, so look it up in kernel's address space... |
|
693 |
osAsid = KKernelOsAsid; |
|
694 |
} |
|
695 |
||
696 |
// NOTE, osAsid will remain valid for duration of this function because it is either |
|
697 |
// - The current thread's address space, which can't go away whilst the thread |
|
698 |
// is running. |
|
699 |
// - The address space of another thread which we are aliasing memory from, |
|
700 |
// and we would only do this if we have a reference on this other thread, |
|
701 |
// which has a reference on it's process, which should own the address space! |
|
702 |
||
703 |
#ifdef __BROADCAST_CACHE_MAINTENANCE__ |
|
36
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
704 |
TInt aliasAsid = -1; |
0 | 705 |
if (thread->iAliasLinAddr) |
706 |
{ |
|
707 |
// If an alias is in effect, the the thread will be locked to the current CPU, |
|
708 |
// but we need to be able to migrate between CPUs for cache maintainance. This |
|
709 |
// must be dealt with by removing the alias and restoring it with a paging trap |
|
710 |
// handler. |
|
711 |
if(!trap) |
|
712 |
{ |
|
713 |
// oops, kill system... |
|
714 |
__KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("Fault with thread locked to current CPU! addr=0x%08x (%O pc=%x)",aFaultAddress,thread,aPc)); |
|
715 |
Exc::Fault(aExceptionInfo); |
|
716 |
} |
|
36
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
717 |
// Open a reference on the aliased process's os asid before removing the alias |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
718 |
// so that the address space can't be freed while we try to access its members. |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
719 |
aliasAsid = thread->iAliasProcess->TryOpenOsAsid(); |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
720 |
// This should never fail as until we remove the alias there will |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
721 |
// always be at least one reference on the os asid. |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
722 |
__NK_ASSERT_DEBUG(aliasAsid >= 0); |
0 | 723 |
thread->RemoveAlias(); |
724 |
} |
|
725 |
#endif |
|
726 |
||
727 |
// find mapping... |
|
728 |
TUint offsetInMapping; |
|
729 |
TUint mapInstanceCount; |
|
730 |
DMemoryMapping* mapping = MM::FindMappingInAddressSpace(osAsid, addr, 1, offsetInMapping, mapInstanceCount); |
|
731 |
// TRACE(("%O mapping=0x%08x",TheCurrentThread,mapping)); |
|
732 |
TInt r = KErrNotFound; |
|
733 |
||
734 |
if(mapping) |
|
735 |
{ |
|
736 |
// Pinning mappings should not be found from within an address space. |
|
737 |
__NK_ASSERT_DEBUG(!mapping->IsPinned()); |
|
738 |
MmuLock::Lock(); |
|
739 |
||
740 |
// check if we need to process page fault... |
|
741 |
if(!Mmu::CheckPteTypePermissions(mapping->PteType(),aAccessPermissions) || |
|
742 |
mapInstanceCount != mapping->MapInstanceCount()) |
|
743 |
{ |
|
744 |
// Invalid access to the page. |
|
745 |
MmuLock::Unlock(); |
|
746 |
r = KErrAbort; |
|
747 |
} |
|
748 |
else |
|
749 |
{ |
|
750 |
// we do need to handle fault so is this a demand paging or page moving fault |
|
751 |
DMemoryObject* memory = mapping->Memory(); |
|
752 |
if(!memory) |
|
753 |
MmuLock::Unlock(); |
|
754 |
else |
|
755 |
{ |
|
756 |
TUint faultIndex = (offsetInMapping >> KPageShift) + mapping->iStartIndex; |
|
757 |
memory->Open(); |
|
758 |
||
759 |
// This is safe as we have the instance count so can detect the mapping |
|
760 |
// being reused and we have a reference to the memory object so it can't |
|
761 |
// be deleted. |
|
762 |
MmuLock::Unlock(); |
|
763 |
||
764 |
if(memory->IsDemandPaged()) |
|
765 |
{ |
|
766 |
// Let the pager handle the fault... |
|
767 |
r = ThePager.HandlePageFault( aPc, aFaultAddress, faultOsAsid, faultIndex, |
|
768 |
aAccessPermissions, memory, mapping, mapInstanceCount, |
|
769 |
thread, aExceptionInfo); |
|
770 |
} |
|
771 |
else |
|
772 |
{// The page could be being moved so verify that with its manager. |
|
773 |
DMemoryManager* manager = memory->iManager; |
|
774 |
r = manager->HandleFault(memory, faultIndex, mapping, mapInstanceCount, aAccessPermissions); |
|
775 |
} |
|
776 |
if (r == KErrNone) |
|
777 |
{// alias PDE needs updating because page tables have changed... |
|
778 |
thread->RefreshAlias(); |
|
779 |
} |
|
780 |
memory->Close(); |
|
781 |
} |
|
782 |
} |
|
783 |
mapping->Close(); |
|
784 |
} |
|
785 |
||
786 |
if (trap) |
|
787 |
{ |
|
788 |
// restore address space (because the trap will bypass any code |
|
789 |
// which would have done this.)... |
|
790 |
DMemModelThread::RestoreAddressSpace(); |
|
791 |
} |
|
792 |
||
36
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
793 |
#ifdef __BROADCAST_CACHE_MAINTENANCE__ |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
794 |
// Close any reference on the aliased process's os asid before we leave the |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
795 |
// critical section. |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
796 |
if (aliasAsid >= 0) |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
797 |
{ |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
798 |
thread->iAliasProcess->CloseOsAsid(); |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
799 |
} |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
800 |
#endif |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
801 |
|
0 | 802 |
NKern::ThreadLeaveCS(); // thread will die now if CheckRealtimeThreadFault caused a panic |
803 |
||
804 |
// deal with XTRAP_PAGING... |
|
805 |
if(trap) |
|
806 |
{ |
|
807 |
// re-acquire any fast mutex which was held before the page fault... |
|
808 |
if(fm) |
|
809 |
NKern::FMWait(fm); |
|
810 |
if (r == KErrNone) |
|
811 |
{ |
|
812 |
trap->Exception(1); // return from exception trap with result '1' (value>0) |
|
813 |
// code doesn't continue beyond this point. |
|
814 |
__NK_ASSERT_DEBUG(0); |
|
815 |
} |
|
816 |
} |
|
817 |
||
818 |
return r; |
|
819 |
} |
|
820 |
||
821 |
||
822 |
// |
|
823 |
// Memory allocation |
|
824 |
// |
|
825 |
||
826 |
TInt Mmu::AllocRam( TPhysAddr* aPages, TUint aCount, TRamAllocFlags aFlags, TZonePageType aZonePageType, |
|
827 |
TUint aBlockZoneId, TBool aBlockRest) |
|
828 |
{ |
|
829 |
__KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocRam(?,%d,%x)",aCount,aFlags)); |
|
830 |
__NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
|
831 |
#ifdef _DEBUG |
|
832 |
if(K::CheckForSimulatedAllocFail()) |
|
833 |
{ |
|
834 |
__KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocRam returns simulated OOM %d",KErrNoMemory)); |
|
835 |
return KErrNoMemory; |
|
836 |
} |
|
837 |
#endif |
|
838 |
TInt missing = iRamPageAllocator->AllocRamPages(aPages, aCount, aZonePageType, aBlockZoneId, aBlockRest); |
|
839 |
if(missing && !(aFlags&EAllocNoPagerReclaim) && ThePager.GetFreePages(missing)) |
|
840 |
missing = iRamPageAllocator->AllocRamPages(aPages, aCount, aZonePageType, aBlockZoneId, aBlockRest); |
|
841 |
TInt r = missing ? KErrNoMemory : KErrNone; |
|
842 |
if(r!=KErrNone) |
|
843 |
iRamAllocFailed = ETrue; |
|
844 |
else |
|
845 |
PagesAllocated(aPages,aCount,aFlags); |
|
846 |
__KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocRam returns %d",r)); |
|
847 |
return r; |
|
848 |
} |
|
849 |
||
850 |
||
851 |
void Mmu::FreeRam(TPhysAddr* aPages, TUint aCount, TZonePageType aZonePageType) |
|
852 |
{ |
|
853 |
__KTRACE_OPT(KMMU,Kern::Printf("Mmu::FreeRam(?,%d)",aCount)); |
|
854 |
__NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
|
855 |
||
856 |
// update page infos... |
|
857 |
TPhysAddr* pages = aPages; |
|
858 |
TPhysAddr* pagesEnd = pages+aCount; |
|
859 |
TPhysAddr* pagesOut = aPages; |
|
860 |
MmuLock::Lock(); |
|
861 |
TUint flash = 0; |
|
862 |
while(pages<pagesEnd) |
|
863 |
{ |
|
864 |
MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo/2); |
|
865 |
TPhysAddr pagePhys = *pages++; |
|
866 |
__NK_ASSERT_DEBUG(pagePhys!=KPhysAddrInvalid); |
|
867 |
SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys); |
|
868 |
PageFreed(pi); |
|
869 |
||
870 |
// If this is an old page of a page being moved that was previously pinned |
|
871 |
// then make sure it is freed as discardable otherwise despite DPager::DonatePages() |
|
872 |
// having marked it as discardable it would be freed as movable. |
|
873 |
__NK_ASSERT_DEBUG(pi->PagedState() != SPageInfo::EPagedPinnedMoved || aCount == 1); |
|
874 |
if (pi->PagedState() == SPageInfo::EPagedPinnedMoved) |
|
875 |
aZonePageType = EPageDiscard; |
|
876 |
||
877 |
if(ThePager.PageFreed(pi)==KErrNone) |
|
878 |
--aCount; // pager has dealt with this page, so one less for us |
|
879 |
else |
|
880 |
{ |
|
881 |
// All paged pages should have been dealt with by the pager above. |
|
882 |
__NK_ASSERT_DEBUG(pi->PagedState() == SPageInfo::EUnpaged); |
|
883 |
*pagesOut++ = pagePhys; // store page address for freeing later |
|
884 |
} |
|
885 |
} |
|
886 |
MmuLock::Unlock(); |
|
887 |
||
888 |
iRamPageAllocator->FreeRamPages(aPages, aCount, aZonePageType); |
|
889 |
} |
|
890 |
||
891 |
||
892 |
TInt Mmu::AllocContiguousRam(TPhysAddr& aPhysAddr, TUint aCount, TUint aAlign, TRamAllocFlags aFlags) |
|
893 |
{ |
|
894 |
__KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocContiguousRam(?,0x%x,%d,%x)",aCount,aAlign,aFlags)); |
|
895 |
__NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
|
896 |
#ifdef _DEBUG |
|
897 |
if(K::CheckForSimulatedAllocFail()) |
|
898 |
{ |
|
899 |
__KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocContiguousRam returns simulated OOM %d",KErrNoMemory)); |
|
900 |
return KErrNoMemory; |
|
901 |
} |
|
902 |
// Only the page sets EAllocNoPagerReclaim and it shouldn't allocate contiguous ram. |
|
903 |
__NK_ASSERT_DEBUG(!(aFlags&EAllocNoPagerReclaim)); |
|
904 |
#endif |
|
905 |
TInt r = iRamPageAllocator->AllocContiguousRam(aCount, aPhysAddr, EPageFixed, aAlign+KPageShift); |
|
906 |
if(r==KErrNoMemory && aCount > KMaxFreeableContiguousPages) |
|
907 |
{ |
|
908 |
// flush paging cache and retry... |
|
909 |
ThePager.FlushAll(); |
|
910 |
r = iRamPageAllocator->AllocContiguousRam(aCount, aPhysAddr, EPageFixed, aAlign+KPageShift); |
|
911 |
} |
|
912 |
if(r!=KErrNone) |
|
913 |
iRamAllocFailed = ETrue; |
|
914 |
else |
|
915 |
PagesAllocated((TPhysAddr*)(aPhysAddr|1), aCount, aFlags); |
|
916 |
__KTRACE_OPT(KMMU,Kern::Printf("AllocContiguouseRam returns %d and aPhysAddr=0x%08x",r,aPhysAddr)); |
|
917 |
return r; |
|
918 |
} |
|
919 |
||
920 |
||
921 |
void Mmu::FreeContiguousRam(TPhysAddr aPhysAddr, TUint aCount) |
|
922 |
{ |
|
923 |
__KTRACE_OPT(KMMU,Kern::Printf("Mmu::FreeContiguousRam(0x%08x,0x%x)",aPhysAddr,aCount)); |
|
924 |
__NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
|
925 |
__NK_ASSERT_DEBUG((aPhysAddr&KPageMask)==0); |
|
926 |
||
927 |
TUint pageCount = aCount; |
|
928 |
||
929 |
// update page infos... |
|
930 |
SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr); |
|
931 |
SPageInfo* piEnd = pi+pageCount; |
|
932 |
TUint flash = 0; |
|
933 |
MmuLock::Lock(); |
|
934 |
while(pi<piEnd) |
|
935 |
{ |
|
936 |
MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo); |
|
937 |
PageFreed(pi++); |
|
938 |
} |
|
939 |
MmuLock::Unlock(); |
|
940 |
||
941 |
// free pages... |
|
942 |
while(pageCount) |
|
943 |
{ |
|
944 |
iRamPageAllocator->FreeRamPage(aPhysAddr, EPageFixed); |
|
945 |
aPhysAddr += KPageSize; |
|
946 |
--pageCount; |
|
947 |
} |
|
948 |
} |
|
949 |
||
950 |
||
951 |
TInt Mmu::AllocPhysicalRam(TPhysAddr* aPages, TUint aCount, TRamAllocFlags aFlags) |
|
952 |
{ |
|
953 |
__KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocPhysicalRam(?,%d,%x)",aCount,aFlags)); |
|
954 |
__NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
|
955 |
// Allocate fixed pages as physically allocated pages aren't movable or discardable. |
|
956 |
TInt r = AllocRam(aPages, aCount, aFlags, EPageFixed); |
|
957 |
if (r!=KErrNone) |
|
958 |
return r; |
|
959 |
||
960 |
// update page infos... |
|
961 |
TPhysAddr* pages = aPages; |
|
962 |
TPhysAddr* pagesEnd = pages+aCount; |
|
963 |
MmuLock::Lock(); |
|
964 |
TUint flash = 0; |
|
965 |
while(pages<pagesEnd) |
|
966 |
{ |
|
967 |
MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo/2); |
|
968 |
TPhysAddr pagePhys = *pages++; |
|
969 |
__NK_ASSERT_DEBUG(pagePhys!=KPhysAddrInvalid); |
|
970 |
SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys); |
|
971 |
pi->SetPhysAlloc(); |
|
972 |
} |
|
973 |
MmuLock::Unlock(); |
|
974 |
||
975 |
return KErrNone; |
|
976 |
} |
|
977 |
||
978 |
||
979 |
void Mmu::FreePhysicalRam(TPhysAddr* aPages, TUint aCount) |
|
980 |
{ |
|
981 |
__KTRACE_OPT(KMMU,Kern::Printf("Mmu::FreePhysicalRam(?,%d)",aCount)); |
|
982 |
__NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
|
983 |
||
984 |
// update page infos... |
|
985 |
TPhysAddr* pages = aPages; |
|
986 |
TPhysAddr* pagesEnd = pages+aCount; |
|
987 |
MmuLock::Lock(); |
|
988 |
TUint flash = 0; |
|
989 |
while(pages<pagesEnd) |
|
990 |
{ |
|
991 |
MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo/2); |
|
992 |
TPhysAddr pagePhys = *pages++; |
|
993 |
__NK_ASSERT_DEBUG(pagePhys!=KPhysAddrInvalid); |
|
994 |
SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys); |
|
995 |
__ASSERT_ALWAYS(pi->Type()==SPageInfo::EPhysAlloc, Panic(EBadFreePhysicalRam)); |
|
996 |
__ASSERT_ALWAYS(!pi->UseCount(), Panic(EBadFreePhysicalRam)); |
|
997 |
pi->SetUnused(); |
|
998 |
} |
|
999 |
MmuLock::Unlock(); |
|
1000 |
||
1001 |
iRamPageAllocator->FreeRamPages(aPages,aCount, EPageFixed); |
|
1002 |
} |
|
1003 |
||
1004 |
||
1005 |
TInt Mmu::AllocPhysicalRam(TPhysAddr& aPhysAddr, TUint aCount, TUint aAlign, TRamAllocFlags aFlags) |
|
1006 |
{ |
|
1007 |
__KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocPhysicalRam(?,0x%x,d,%x)",aCount,aAlign,aFlags)); |
|
1008 |
TInt r = AllocContiguousRam(aPhysAddr,aCount,aAlign,aFlags); |
|
1009 |
if (r!=KErrNone) |
|
1010 |
return r; |
|
1011 |
||
1012 |
// update page infos... |
|
1013 |
SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr); |
|
1014 |
SPageInfo* piEnd = pi+aCount; |
|
1015 |
TUint flash = 0; |
|
1016 |
MmuLock::Lock(); |
|
1017 |
while(pi<piEnd) |
|
1018 |
{ |
|
1019 |
MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo); |
|
1020 |
pi->SetPhysAlloc(); |
|
1021 |
++pi; |
|
1022 |
} |
|
1023 |
MmuLock::Unlock(); |
|
1024 |
||
1025 |
return KErrNone; |
|
1026 |
} |
|
1027 |
||
1028 |
||
1029 |
void Mmu::FreePhysicalRam(TPhysAddr aPhysAddr, TUint aCount) |
|
1030 |
{ |
|
1031 |
__KTRACE_OPT(KMMU,Kern::Printf("Mmu::FreePhysicalRam(0x%08x,0x%x)",aPhysAddr,aCount)); |
|
1032 |
||
1033 |
// update page infos... |
|
1034 |
SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr); |
|
1035 |
SPageInfo* piEnd = pi+aCount; |
|
1036 |
TUint flash = 0; |
|
1037 |
MmuLock::Lock(); |
|
1038 |
while(pi<piEnd) |
|
1039 |
{ |
|
1040 |
MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo); |
|
1041 |
__ASSERT_ALWAYS(pi->Type()==SPageInfo::EPhysAlloc, Panic(EBadFreePhysicalRam)); |
|
1042 |
__ASSERT_ALWAYS(!pi->UseCount(), Panic(EBadFreePhysicalRam)); |
|
1043 |
pi->SetUnused(); |
|
1044 |
++pi; |
|
1045 |
} |
|
1046 |
MmuLock::Unlock(); |
|
1047 |
||
1048 |
iRamPageAllocator->FreePhysicalRam(aPhysAddr, aCount << KPageShift); |
|
1049 |
} |
|
1050 |
||
1051 |
||
1052 |
TInt Mmu::ClaimPhysicalRam(TPhysAddr aPhysAddr, TUint aCount, TRamAllocFlags aFlags) |
|
1053 |
{ |
|
1054 |
__KTRACE_OPT(KMMU,Kern::Printf("Mmu::ClaimPhysicalRam(0x%08x,0x%x,0x%08x)",aPhysAddr,aCount,aFlags)); |
|
1055 |
aPhysAddr &= ~KPageMask; |
|
31
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
1056 |
TInt r = iRamPageAllocator->ClaimPhysicalRam(aPhysAddr,(aCount << KPageShift)); |
0 | 1057 |
if(r!=KErrNone) |
1058 |
return r; |
|
1059 |
||
1060 |
PagesAllocated((TPhysAddr*)(aPhysAddr|1), aCount, aFlags); |
|
1061 |
||
1062 |
// update page infos... |
|
1063 |
SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr); |
|
1064 |
SPageInfo* piEnd = pi+aCount; |
|
1065 |
TUint flash = 0; |
|
1066 |
MmuLock::Lock(); |
|
1067 |
while(pi<piEnd) |
|
1068 |
{ |
|
1069 |
MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo); |
|
1070 |
pi->SetPhysAlloc(); |
|
1071 |
++pi; |
|
1072 |
} |
|
1073 |
MmuLock::Unlock(); |
|
1074 |
||
1075 |
return KErrNone; |
|
1076 |
} |
|
1077 |
||
1078 |
||
1079 |
void Mmu::AllocatedPhysicalRam(TPhysAddr aPhysAddr, TUint aCount, TRamAllocFlags aFlags) |
|
1080 |
{ |
|
1081 |
__KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocatedPhysicalRam(0x%08x,0x%x,d,%x)",aPhysAddr,aCount,aFlags)); |
|
1082 |
||
1083 |
PagesAllocated((TPhysAddr*)(aPhysAddr|1), aCount, aFlags); |
|
1084 |
||
1085 |
// update page infos... |
|
1086 |
SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr); |
|
1087 |
SPageInfo* piEnd = pi+aCount; |
|
1088 |
TUint flash = 0; |
|
1089 |
MmuLock::Lock(); |
|
1090 |
while(pi<piEnd) |
|
1091 |
{ |
|
1092 |
MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo); |
|
1093 |
pi->SetPhysAlloc(); |
|
1094 |
++pi; |
|
1095 |
} |
|
1096 |
MmuLock::Unlock(); |
|
1097 |
} |
|
1098 |
||
1099 |
||
1100 |
// |
|
1101 |
// Misc |
|
1102 |
// |
|
1103 |
||
1104 |
#ifdef _DEBUG |
|
1105 |
/** |
|
1106 |
Perform a page table walk to return the physical address of |
|
1107 |
the memory mapped at virtual address \a aLinAddr in the |
|
1108 |
address space \a aOsAsid. |
|
1109 |
||
1110 |
If the page table used was not one allocated by the kernel |
|
1111 |
then the results are unpredictable and may cause a system fault. |
|
1112 |
||
1113 |
@pre #MmuLock held. |
|
1114 |
*/ |
|
1115 |
TPhysAddr Mmu::LinearToPhysical(TLinAddr aLinAddr, TInt aOsAsid) |
|
1116 |
{ |
|
1117 |
__NK_ASSERT_DEBUG(MmuLock::IsHeld() || K::Initialising); |
|
1118 |
return UncheckedLinearToPhysical(aLinAddr,aOsAsid); |
|
1119 |
} |
|
1120 |
#endif |
|
1121 |
||
1122 |
||
1123 |
/** |
|
1124 |
Next virtual address available for allocation by TTempMapping. |
|
1125 |
This is initialised to #KTempAddr and addresses may be allocated |
|
1126 |
until they reach #KTempAddrEnd. |
|
1127 |
*/ |
|
1128 |
TLinAddr Mmu::TTempMapping::iNextLinAddr = KTempAddr; |
|
1129 |
||
1130 |
||
1131 |
/** |
|
1132 |
Allocate virtual address space required to map a given number of memory pages. |
|
1133 |
||
1134 |
The actual size of allocated virtual allocated needs to accommodate \a aNumPages |
|
1135 |
number of pages of any colour. For example: if \a aNumPages == 4 and #KPageColourCount == 4, |
|
1136 |
then at least 7 pages are required. |
|
1137 |
||
1138 |
@param aNumPages Maximum number of pages that can be mapped into this temporary mapping. |
|
1139 |
||
1140 |
@pre Called in single threaded content (boot) only. |
|
1141 |
||
1142 |
@pre #iNextLinAddr points to virtual page with zero colour. |
|
1143 |
@post #iNextLinAddr points to virtual page with zero colour. |
|
1144 |
*/ |
|
1145 |
void Mmu::TTempMapping::Alloc(TUint aNumPages) |
|
1146 |
{ |
|
1147 |
__NK_ASSERT_DEBUG(aNumPages<=(KTempAddrEnd-KTempAddr)/KPageSize); |
|
1148 |
||
1149 |
// This runs during the boot only (single threaded context) so the access to iNextLinAddr is not guarded by any mutex. |
|
1150 |
TLinAddr tempAddr = iNextLinAddr; |
|
1151 |
TUint numPages = (KPageColourMask+aNumPages+KPageColourMask)&~KPageColourMask; |
|
1152 |
iNextLinAddr = tempAddr+numPages*KPageSize; |
|
1153 |
||
1154 |
__NK_ASSERT_ALWAYS(iNextLinAddr<=KTempAddrEnd); |
|
1155 |
||
1156 |
__NK_ASSERT_DEBUG(iSize==0); |
|
1157 |
iLinAddr = tempAddr; |
|
1158 |
MmuLock::Lock(); |
|
1159 |
iPtePtr = Mmu::PtePtrFromLinAddr(tempAddr,KKernelOsAsid); |
|
1160 |
__NK_ASSERT_DEBUG(iPtePtr); |
|
1161 |
MmuLock::Unlock(); |
|
1162 |
iBlankPte = TheMmu.iTempPteCached; |
|
1163 |
iSize = aNumPages; |
|
1164 |
iCount = 0; |
|
1165 |
||
1166 |
TRACEB(("Mmu::TTempMapping::Alloc(%d) iLinAddr=0x%08x, iPtePtr=0x%08x",aNumPages,iLinAddr,iPtePtr)); |
|
1167 |
} |
|
1168 |
||
1169 |
||
1170 |
/** |
|
1171 |
Map a single physical page into this temporary mapping. |
|
1172 |
||
1173 |
Supervisor read/write access and EMemoryAttributeStandard memory attributes apply. |
|
1174 |
||
1175 |
@param aPage The physical page to map. |
|
1176 |
@param aColour The required colour for the mapping. |
|
1177 |
||
1178 |
@return The linear address at which the page is mapped. |
|
1179 |
*/ |
|
1180 |
TLinAddr Mmu::TTempMapping::Map(TPhysAddr aPage, TUint aColour) |
|
1181 |
{ |
|
1182 |
__NK_ASSERT_DEBUG(iSize>=1); |
|
1183 |
__NK_ASSERT_DEBUG(iCount==0); |
|
1184 |
||
1185 |
TUint colour = aColour&KPageColourMask; |
|
1186 |
TLinAddr addr = iLinAddr+(colour<<KPageShift); |
|
1187 |
TPte* pPte = iPtePtr+colour; |
|
1188 |
iColour = colour; |
|
1189 |
||
1190 |
__ASSERT_DEBUG(*pPte==KPteUnallocatedEntry,MM::Panic(MM::ETempMappingAlreadyInUse)); |
|
1191 |
*pPte = (aPage&~KPageMask) | iBlankPte; |
|
1192 |
CacheMaintenance::SinglePteUpdated((TLinAddr)pPte); |
|
1193 |
InvalidateTLBForPage(addr|KKernelOsAsid); |
|
1194 |
||
1195 |
iCount = 1; |
|
1196 |
return addr; |
|
1197 |
} |
|
1198 |
||
1199 |
/** |
|
1200 |
Map a single physical page into this temporary mapping using the given page table entry (PTE) value. |
|
1201 |
||
1202 |
@param aPage The physical page to map. |
|
1203 |
@param aColour The required colour for the mapping. |
|
1204 |
@param aBlankPte The PTE value to use for mapping the page, |
|
1205 |
with the physical address component equal to zero. |
|
1206 |
||
1207 |
@return The linear address at which the page is mapped. |
|
1208 |
*/ |
|
1209 |
TLinAddr Mmu::TTempMapping::Map(TPhysAddr aPage, TUint aColour, TPte aBlankPte) |
|
1210 |
{ |
|
1211 |
__NK_ASSERT_DEBUG(iSize>=1); |
|
1212 |
__NK_ASSERT_DEBUG(iCount==0); |
|
1213 |
||
1214 |
TUint colour = aColour&KPageColourMask; |
|
1215 |
TLinAddr addr = iLinAddr+(colour<<KPageShift); |
|
1216 |
TPte* pPte = iPtePtr+colour; |
|
1217 |
iColour = colour; |
|
1218 |
||
1219 |
__ASSERT_DEBUG(*pPte==KPteUnallocatedEntry,MM::Panic(MM::ETempMappingAlreadyInUse)); |
|
1220 |
*pPte = (aPage&~KPageMask) | aBlankPte; |
|
1221 |
CacheMaintenance::SinglePteUpdated((TLinAddr)pPte); |
|
1222 |
InvalidateTLBForPage(addr|KKernelOsAsid); |
|
1223 |
||
1224 |
iCount = 1; |
|
1225 |
return addr; |
|
1226 |
} |
|
1227 |
||
1228 |
||
1229 |
/** |
|
1230 |
Map a number of physical pages into this temporary mapping. |
|
1231 |
||
1232 |
Supervisor read/write access and EMemoryAttributeStandard memory attributes apply. |
|
1233 |
||
1234 |
@param aPages The array of physical pages to map. |
|
1235 |
@param aCount The number of pages to map. |
|
1236 |
@param aColour The required colour for the first page. |
|
1237 |
Consecutive pages will be coloured accordingly. |
|
1238 |
||
1239 |
@return The linear address at which the first page is mapped. |
|
1240 |
*/ |
|
1241 |
TLinAddr Mmu::TTempMapping::Map(TPhysAddr* aPages, TUint aCount, TUint aColour) |
|
1242 |
{ |
|
1243 |
__NK_ASSERT_DEBUG(iSize>=aCount); |
|
1244 |
__NK_ASSERT_DEBUG(iCount==0); |
|
1245 |
||
1246 |
TUint colour = aColour&KPageColourMask; |
|
1247 |
TLinAddr addr = iLinAddr+(colour<<KPageShift); |
|
1248 |
TPte* pPte = iPtePtr+colour; |
|
1249 |
iColour = colour; |
|
1250 |
||
1251 |
for(TUint i=0; i<aCount; ++i) |
|
1252 |
{ |
|
1253 |
__ASSERT_DEBUG(pPte[i]==KPteUnallocatedEntry,MM::Panic(MM::ETempMappingAlreadyInUse)); |
|
1254 |
pPte[i] = (aPages[i]&~KPageMask) | iBlankPte; |
|
1255 |
CacheMaintenance::SinglePteUpdated((TLinAddr)&pPte[i]); |
|
1256 |
InvalidateTLBForPage((addr+i*KPageSize)|KKernelOsAsid); |
|
1257 |
} |
|
1258 |
||
1259 |
iCount = aCount; |
|
1260 |
return addr; |
|
1261 |
} |
|
1262 |
||
1263 |
||
1264 |
/** |
|
1265 |
Unmap all pages from this temporary mapping. |
|
1266 |
||
1267 |
@param aIMBRequired True if IMB barrier is required prior unmapping. |
|
1268 |
*/ |
|
1269 |
void Mmu::TTempMapping::Unmap(TBool aIMBRequired) |
|
1270 |
{ |
|
1271 |
__NK_ASSERT_DEBUG(iSize>=1); |
|
1272 |
if(aIMBRequired) |
|
1273 |
CacheMaintenance::CodeChanged(iLinAddr+iColour*KPageSize,iCount*KPageSize); |
|
1274 |
Unmap(); |
|
1275 |
} |
|
1276 |
||
1277 |
||
1278 |
/** |
|
1279 |
Unmap all pages from this temporary mapping. |
|
1280 |
*/ |
|
1281 |
void Mmu::TTempMapping::Unmap() |
|
1282 |
{ |
|
1283 |
__NK_ASSERT_DEBUG(iSize>=1); |
|
1284 |
||
1285 |
TUint colour = iColour; |
|
1286 |
TLinAddr addr = iLinAddr+(colour<<KPageShift); |
|
1287 |
TPte* pPte = iPtePtr+colour; |
|
1288 |
TUint count = iCount; |
|
1289 |
||
1290 |
while(count) |
|
1291 |
{ |
|
1292 |
*pPte = KPteUnallocatedEntry; |
|
1293 |
CacheMaintenance::SinglePteUpdated((TLinAddr)pPte); |
|
1294 |
InvalidateTLBForPage(addr|KKernelOsAsid); |
|
1295 |
addr += KPageSize; |
|
1296 |
++pPte; |
|
1297 |
--count; |
|
1298 |
} |
|
1299 |
||
1300 |
iCount = 0; |
|
1301 |
} |
|
1302 |
||
36
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1303 |
#ifdef __SMP__ |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1304 |
/** |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1305 |
Dummy IPI to be invoked when a thread's alias pde members are updated remotely |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1306 |
by another thread. |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1307 |
|
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1308 |
@internalComponent |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1309 |
*/ |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1310 |
class TAliasIPI : public TGenericIPI |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1311 |
{ |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1312 |
public: |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1313 |
static void RefreshIsr(TGenericIPI*); |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1314 |
void RefreshAlias(); |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1315 |
}; |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1316 |
|
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1317 |
|
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1318 |
/** |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1319 |
Dummy isr method. |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1320 |
*/ |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1321 |
void TAliasIPI::RefreshIsr(TGenericIPI*) |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1322 |
{ |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1323 |
TRACE2(("TAliasIPI")); |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1324 |
} |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1325 |
|
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1326 |
|
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1327 |
/** |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1328 |
Queue the dummy IPI on all other processors. This ensures that DoProcessSwitch will |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1329 |
have completed updating iAliasPdePtr once this method returns. |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1330 |
*/ |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1331 |
void TAliasIPI::RefreshAlias() |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1332 |
{ |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1333 |
NKern::Lock(); |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1334 |
QueueAllOther(&RefreshIsr); |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1335 |
NKern::Unlock(); |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1336 |
WaitCompletion(); |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1337 |
} |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1338 |
|
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1339 |
|
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1340 |
/** |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1341 |
Perform a dummy ipi on all the other processors to ensure if any of them are |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1342 |
executing DoProcessSwitch they will see the new value of iAliasPde before they |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1343 |
update iAliasPdePtr or will finish updating iAliasPdePtr before we continue. |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1344 |
This works as DoProcessSwitch() has interrupts disabled while reading iAliasPde |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1345 |
and updating iAliasPdePtr. |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1346 |
*/ |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1347 |
void BroadcastAliasRefresh() |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1348 |
{ |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1349 |
TAliasIPI ipi; |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1350 |
ipi.RefreshAlias(); |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1351 |
} |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1352 |
#endif //__SMP__ |
0 | 1353 |
|
1354 |
/** |
|
1355 |
Remove any thread IPC aliases which use the specified page table. |
|
1356 |
This is used by the page table allocator when a page table is freed. |
|
1357 |
||
1358 |
@pre #PageTablesLockIsHeld |
|
1359 |
*/ |
|
1360 |
void Mmu::RemoveAliasesForPageTable(TPhysAddr aPageTable) |
|
1361 |
{ |
|
1362 |
__NK_ASSERT_DEBUG(PageTablesLockIsHeld()); |
|
1363 |
||
1364 |
MmuLock::Lock(); |
|
1365 |
||
1366 |
SDblQue checkedList; |
|
1367 |
||
1368 |
TUint ptId = aPageTable>>KPageTableShift; |
|
1369 |
while(!iAliasList.IsEmpty()) |
|
1370 |
{ |
|
1371 |
SDblQueLink* next = iAliasList.First()->Deque(); |
|
1372 |
checkedList.Add(next); |
|
1373 |
DMemModelThread* thread = (DMemModelThread*)((TInt)next-_FOFF(DMemModelThread,iAliasLink)); |
|
1374 |
if((thread->iAliasPde>>KPageTableShift)==ptId) |
|
1375 |
{ |
|
1376 |
// the page table is being aliased by the thread, so remove it... |
|
1377 |
TRACE2(("Thread %O RemoveAliasesForPageTable", this)); |
|
1378 |
thread->iAliasPde = KPdeUnallocatedEntry; |
|
1379 |
#ifdef __SMP__ // we need to also unmap the page table in case thread is running on another core... |
|
36
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1380 |
|
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1381 |
// Ensure other processors see the update to iAliasPde. |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1382 |
BroadcastAliasRefresh(); |
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1383 |
|
0 | 1384 |
*thread->iAliasPdePtr = KPdeUnallocatedEntry; |
36
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1385 |
|
0 | 1386 |
SinglePdeUpdated(thread->iAliasPdePtr); |
1387 |
__NK_ASSERT_DEBUG((thread->iAliasLinAddr&KPageMask)==0); |
|
1388 |
// Invalidate the tlb for the page using os asid of the process that created the alias |
|
1389 |
// this is safe as the os asid will be valid as thread must be running otherwise the alias |
|
1390 |
// would have been removed. |
|
1391 |
InvalidateTLBForPage(thread->iAliasLinAddr | ((DMemModelProcess*)thread->iOwningProcess)->OsAsid()); |
|
1392 |
// note, race condition with 'thread' updating its iAliasLinAddr is |
|
1393 |
// not a problem because 'thread' will not the be accessing the aliased |
|
1394 |
// region and will take care of invalidating the TLB. |
|
1395 |
#endif |
|
1396 |
} |
|
1397 |
MmuLock::Flash(); |
|
1398 |
} |
|
1399 |
||
1400 |
// copy checkedList back to iAliasList |
|
1401 |
iAliasList.MoveFrom(&checkedList); |
|
1402 |
||
1403 |
MmuLock::Unlock(); |
|
1404 |
} |
|
1405 |
||
1406 |
||
1407 |
void DMemModelThread::RefreshAlias() |
|
1408 |
{ |
|
1409 |
if(iAliasLinAddr) |
|
1410 |
{ |
|
1411 |
TRACE2(("Thread %O RefreshAlias", this)); |
|
1412 |
// Get the os asid, this is the current thread so no need to open a reference. |
|
1413 |
TUint thisAsid = ((DMemModelProcess*)iOwningProcess)->OsAsid(); |
|
1414 |
MmuLock::Lock(); |
|
1415 |
TInt osAsid = iAliasProcess->OsAsid(); |
|
1416 |
TPde pde = *Mmu::PageDirectoryEntry(osAsid,iAliasTarget); |
|
1417 |
iAliasPde = pde; |
|
1418 |
*iAliasPdePtr = pde; |
|
1419 |
SinglePdeUpdated(iAliasPdePtr); |
|
1420 |
InvalidateTLBForPage(iAliasLinAddr|thisAsid); |
|
1421 |
MmuLock::Unlock(); |
|
1422 |
} |
|
1423 |
} |
|
1424 |
||
1425 |
||
1426 |
||
1427 |
// |
|
1428 |
// Mapping/unmapping functions |
|
1429 |
// |
|
1430 |
||
1431 |
||
1432 |
/** |
|
1433 |
Modify page table entries (PTEs) so they map the given memory pages. |
|
1434 |
Entries are only updated if the current state of the corresponding page |
|
1435 |
is RPageArray::ECommitted. |
|
1436 |
||
1437 |
@param aPtePtr Pointer into a page table for the PTE of the first page. |
|
1438 |
@param aCount The number of pages to modify. |
|
1439 |
@param aPages Pointer to the entry for the first page in a memory object's #RPageArray. |
|
1440 |
Each entry contains the physical address of a page together with its |
|
1441 |
current state (RPageArray::TState). |
|
1442 |
@param aBlankPte The value to use for each PTE, with the physical address component equal |
|
1443 |
to zero. |
|
1444 |
||
1445 |
@return False, if the page table no longer maps any entries and may be freed. |
|
1446 |
True otherwise, to indicate that the page table is still needed. |
|
1447 |
||
1448 |
@pre #MmuLock held. |
|
1449 |
@post #MmuLock held and has not been released by this function. |
|
1450 |
*/ |
|
1451 |
TBool Mmu::MapPages(TPte* const aPtePtr, const TUint aCount, TPhysAddr* aPages, TPte aBlankPte) |
|
1452 |
{ |
|
1453 |
__NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
1454 |
__NK_ASSERT_DEBUG(aCount); |
|
1455 |
__NK_ASSERT_DEBUG(aBlankPte!=KPteUnallocatedEntry); |
|
1456 |
||
1457 |
TUint count = 0; |
|
1458 |
if(aCount==1) |
|
1459 |
{ |
|
1460 |
// get page to map... |
|
1461 |
TPhysAddr pagePhys = *aPages; |
|
1462 |
TPte pte = *aPtePtr; |
|
1463 |
if(!RPageArray::TargetStateIsCommitted(pagePhys)) |
|
1464 |
goto done; // page no longer needs mapping |
|
1465 |
||
1466 |
// clear type flags... |
|
1467 |
pagePhys &= ~KPageMask; |
|
1468 |
||
1469 |
// check nobody has already mapped the page... |
|
1470 |
if(pte!=KPteUnallocatedEntry) |
|
1471 |
{ |
|
1472 |
// already mapped... |
|
1473 |
#ifdef _DEBUG |
|
1474 |
if((pte^pagePhys)>=TPte(KPageSize)) |
|
1475 |
{ |
|
1476 |
// but different! |
|
1477 |
Kern::Printf("Mmu::MapPages already mapped %x->%x",pagePhys,pte); |
|
1478 |
__NK_ASSERT_DEBUG(0); |
|
1479 |
} |
|
1480 |
#endif |
|
1481 |
return true; // return true to keep page table (it already had at least page mapped) |
|
1482 |
} |
|
1483 |
||
1484 |
// map page... |
|
1485 |
pte = pagePhys|aBlankPte; |
|
1486 |
TRACE2(("!PTE %x=%x",aPtePtr,pte)); |
|
1487 |
*aPtePtr = pte; |
|
1488 |
count = 1; |
|
1489 |
||
1490 |
// clean cache... |
|
1491 |
CacheMaintenance::SinglePteUpdated((TLinAddr)aPtePtr); |
|
1492 |
} |
|
1493 |
else |
|
1494 |
{ |
|
1495 |
// check we are only updating a single page table... |
|
1496 |
__NK_ASSERT_DEBUG(((TLinAddr(aPtePtr)^TLinAddr(aPtePtr+aCount-1))>>KPageTableShift)==0); |
|
1497 |
||
1498 |
// map pages... |
|
1499 |
TPte* pPte = aPtePtr; |
|
1500 |
TPte* pPteEnd = aPtePtr+aCount; |
|
1501 |
do |
|
1502 |
{ |
|
1503 |
// map page... |
|
1504 |
TPhysAddr pagePhys = *aPages++; |
|
1505 |
TPte pte = *pPte++; |
|
1506 |
if(RPageArray::TargetStateIsCommitted(pagePhys)) |
|
1507 |
{ |
|
1508 |
// clear type flags... |
|
1509 |
pagePhys &= ~KPageMask; |
|
1510 |
||
1511 |
// page not being freed, so try and map it... |
|
1512 |
if(pte!=KPteUnallocatedEntry) |
|
1513 |
{ |
|
1514 |
// already mapped... |
|
1515 |
#ifdef _DEBUG |
|
1516 |
if((pte^pagePhys)>=TPte(KPageSize)) |
|
1517 |
{ |
|
1518 |
// but different! |
|
1519 |
Kern::Printf("Mmu::MapPages already mapped %x->%x",pagePhys,pte); |
|
1520 |
__NK_ASSERT_DEBUG(0); |
|
1521 |
} |
|
1522 |
#endif |
|
1523 |
} |
|
1524 |
else |
|
1525 |
{ |
|
1526 |
// map page... |
|
1527 |
pte = pagePhys|aBlankPte; |
|
1528 |
TRACE2(("!PTE %x=%x",pPte-1,pte)); |
|
1529 |
pPte[-1] = pte; |
|
1530 |
++count; |
|
1531 |
} |
|
1532 |
} |
|
1533 |
} |
|
1534 |
while(pPte!=pPteEnd); |
|
1535 |
||
1536 |
// clean cache... |
|
1537 |
CacheMaintenance::MultiplePtesUpdated((TLinAddr)aPtePtr,(TLinAddr)pPte-(TLinAddr)aPtePtr); |
|
1538 |
} |
|
1539 |
||
1540 |
done: |
|
1541 |
// update page counts... |
|
1542 |
SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPtePtr); |
|
1543 |
count = pti->IncPageCount(count); |
|
1544 |
TRACE2(("pt %x page count=%d",TLinAddr(aPtePtr)&~KPageTableMask,pti->PageCount())); |
|
1545 |
__NK_ASSERT_DEBUG(pti->CheckPageCount()); |
|
1546 |
||
1547 |
// see if page table needs freeing... |
|
1548 |
TUint keepPt = count | pti->PermanenceCount(); |
|
1549 |
||
1550 |
__NK_ASSERT_DEBUG(!pti->IsDemandPaged()); // check not demand paged page table |
|
1551 |
||
1552 |
return keepPt; |
|
1553 |
} |
|
1554 |
||
1555 |
||
1556 |
/** |
|
1557 |
Modify page table entries (PTEs) so they map a new page. |
|
1558 |
Entries are only updated if the current state of the corresponding page |
|
1559 |
is RPageArray::ECommitted or RPageArray::EMoving. |
|
1560 |
||
1561 |
@param aPtePtr Pointer into a page table for the PTE of the page. |
|
1562 |
@param aPage Pointer to the entry for the page in a memory object's #RPageArray. |
|
1563 |
The entry contains the physical address of a page together with its |
|
1564 |
current state (RPageArray::TState). |
|
1565 |
@param aBlankPte The value to use for each PTE, with the physical address component equal |
|
1566 |
to zero. |
|
1567 |
||
1568 |
@pre #MmuLock held. |
|
1569 |
@post #MmuLock held and has not been released by this function. |
|
1570 |
*/ |
|
1571 |
void Mmu::RemapPage(TPte* const aPtePtr, TPhysAddr& aPage, TPte aBlankPte) |
|
1572 |
{ |
|
1573 |
__NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
1574 |
__NK_ASSERT_DEBUG(aBlankPte!=KPteUnallocatedEntry); |
|
1575 |
||
1576 |
// get page to remap... |
|
1577 |
TPhysAddr pagePhys = aPage; |
|
1578 |
||
1579 |
// Only remap the page if it is committed or it is being moved and |
|
1580 |
// no other operation has been performed on the page. |
|
1581 |
if(!RPageArray::TargetStateIsCommitted(pagePhys)) |
|
1582 |
return; // page no longer needs mapping |
|
1583 |
||
1584 |
// Only remap the page if it is currently mapped, i.e. doesn't have an unallocated pte. |
|
1585 |
// This will only be true if a new mapping is being added but it hasn't yet updated |
|
1586 |
// all the ptes for the pages that it maps. |
|
1587 |
TPte pte = *aPtePtr; |
|
1588 |
if (pte == KPteUnallocatedEntry) |
|
1589 |
return; |
|
1590 |
||
1591 |
// clear type flags... |
|
1592 |
pagePhys &= ~KPageMask; |
|
1593 |
||
1594 |
SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pagePhys); |
|
1595 |
if (pi) |
|
1596 |
{ |
|
1597 |
SPageInfo::TPagedState pagedState = pi->PagedState(); |
|
1598 |
if (pagedState != SPageInfo::EUnpaged) |
|
1599 |
{ |
|
1600 |
// The page is demand paged. Only remap the page if it is pinned or is currently |
|
1601 |
// accessible but to the old physical page. |
|
1602 |
if (pagedState != SPageInfo::EPagedPinned && |
|
1603 |
(Mmu::IsPteInaccessible(pte) || (pte^pagePhys) < TPte(KPageSize))) |
|
1604 |
return; |
|
1605 |
if (!pi->IsDirty()) |
|
1606 |
{ |
|
1607 |
// Ensure that the page is mapped as read only to prevent pages being marked dirty |
|
1608 |
// by page moving despite not having been written to |
|
1609 |
Mmu::MakePteInaccessible(aBlankPte, EFalse); |
|
1610 |
} |
|
1611 |
} |
|
1612 |
} |
|
1613 |
||
1614 |
// Map the page in the page array entry as this is always the physical |
|
1615 |
// page that the memory object's page should be mapped to. |
|
1616 |
pte = pagePhys|aBlankPte; |
|
1617 |
TRACE2(("!PTE %x=%x",aPtePtr,pte)); |
|
1618 |
*aPtePtr = pte; |
|
1619 |
||
1620 |
// clean cache... |
|
1621 |
CacheMaintenance::SinglePteUpdated((TLinAddr)aPtePtr); |
|
1622 |
} |
|
1623 |
||
1624 |
||
1625 |
/** |
|
1626 |
Modify page table entries (PTEs) so they no longer map any memory pages. |
|
1627 |
||
1628 |
@param aPtePtr Pointer into a page table for the PTE of the first page. |
|
1629 |
@param aCount The number of pages to modify. |
|
1630 |
||
1631 |
@return False, if the page table no longer maps any entries and may be freed. |
|
1632 |
True otherwise, to indicate that the page table is still needed. |
|
1633 |
||
1634 |
@pre #MmuLock held. |
|
1635 |
@post #MmuLock held and has not been released by this function. |
|
1636 |
*/ |
|
1637 |
TBool Mmu::UnmapPages(TPte* const aPtePtr, TUint aCount) |
|
1638 |
{ |
|
1639 |
__NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
1640 |
__NK_ASSERT_DEBUG(aCount); |
|
1641 |
||
1642 |
TUint count = 0; |
|
1643 |
if(aCount==1) |
|
1644 |
{ |
|
1645 |
if(*aPtePtr==KPteUnallocatedEntry) |
|
1646 |
return true; // page already unmapped |
|
1647 |
||
1648 |
// unmap page... |
|
1649 |
++count; |
|
1650 |
TPte pte = KPteUnallocatedEntry; |
|
1651 |
TRACE2(("!PTE %x=%x",aPtePtr,pte)); |
|
1652 |
*aPtePtr = pte; |
|
1653 |
||
1654 |
// clean cache... |
|
1655 |
CacheMaintenance::SinglePteUpdated((TLinAddr)aPtePtr); |
|
1656 |
} |
|
1657 |
else |
|
1658 |
{ |
|
1659 |
// check we are only updating a single page table... |
|
1660 |
__NK_ASSERT_DEBUG(((TLinAddr(aPtePtr)^TLinAddr(aPtePtr+aCount-1))>>KPageTableShift)==0); |
|
1661 |
||
1662 |
// unmap pages... |
|
1663 |
TPte* pPte = aPtePtr; |
|
1664 |
TPte* pPteEnd = aPtePtr+aCount; |
|
1665 |
do |
|
1666 |
{ |
|
1667 |
if(*pPte!=KPteUnallocatedEntry) |
|
1668 |
{ |
|
1669 |
// unmap page... |
|
1670 |
++count; |
|
1671 |
TPte pte = KPteUnallocatedEntry; |
|
1672 |
TRACE2(("!PTE %x=%x",pPte,pte)); |
|
1673 |
*pPte = pte; |
|
1674 |
} |
|
1675 |
} |
|
1676 |
while(++pPte<pPteEnd); |
|
1677 |
||
1678 |
if(!count) |
|
1679 |
return true; // no PTEs changed, so nothing more to do |
|
1680 |
||
1681 |
// clean cache... |
|
1682 |
CacheMaintenance::MultiplePtesUpdated((TLinAddr)aPtePtr,(TLinAddr)pPte-(TLinAddr)aPtePtr); |
|
1683 |
} |
|
1684 |
||
1685 |
// update page table info... |
|
1686 |
SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPtePtr); |
|
1687 |
count = pti->DecPageCount(count); |
|
1688 |
TRACE2(("pt %x page count=%d",TLinAddr(aPtePtr)&~KPageTableMask,count)); |
|
1689 |
__NK_ASSERT_DEBUG(pti->CheckPageCount()); |
|
1690 |
||
1691 |
// see if page table needs freeing... |
|
1692 |
TUint keepPt = count | pti->PermanenceCount(); |
|
1693 |
||
1694 |
return keepPt; |
|
1695 |
} |
|
1696 |
||
1697 |
||
1698 |
/** |
|
1699 |
Modify page table entries (PTEs) so they no longer map the given memory pages. |
|
1700 |
Entries are only updated if the current state of the corresponding page |
|
1701 |
is 'decommitted' i.e. RPageArray::TargetStateIsDecommitted returns true. |
|
1702 |
||
1703 |
@param aPtePtr Pointer into a page table for the PTE of the first page. |
|
1704 |
@param aCount The number of pages to modify. |
|
1705 |
@param aPages Pointer to the entry for the first page in a memory object's #RPageArray. |
|
1706 |
Each entry contains the physical address of a page together with its |
|
1707 |
current state (RPageArray::TState). |
|
1708 |
||
1709 |
@return False, if the page table no longer maps any entries and may be freed. |
|
1710 |
True otherwise, to indicate that the page table is still needed. |
|
1711 |
||
1712 |
@pre #MmuLock held. |
|
1713 |
@post #MmuLock held and has not been released by this function. |
|
1714 |
*/ |
|
1715 |
TBool Mmu::UnmapPages(TPte* const aPtePtr, TUint aCount, TPhysAddr* aPages) |
|
1716 |
{ |
|
1717 |
__NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
1718 |
__NK_ASSERT_DEBUG(aCount); |
|
1719 |
||
1720 |
TUint count = 0; |
|
1721 |
if(aCount==1) |
|
1722 |
{ |
|
1723 |
if(*aPtePtr==KPteUnallocatedEntry) |
|
1724 |
return true; // page already unmapped |
|
1725 |
||
1726 |
if(!RPageArray::TargetStateIsDecommitted(*aPages)) |
|
1727 |
return true; // page has been reallocated |
|
1728 |
||
1729 |
// unmap page... |
|
1730 |
++count; |
|
1731 |
TPte pte = KPteUnallocatedEntry; |
|
1732 |
TRACE2(("!PTE %x=%x",aPtePtr,pte)); |
|
1733 |
*aPtePtr = pte; |
|
1734 |
||
1735 |
// clean cache... |
|
1736 |
CacheMaintenance::SinglePteUpdated((TLinAddr)aPtePtr); |
|
1737 |
} |
|
1738 |
else |
|
1739 |
{ |
|
1740 |
// check we are only updating a single page table... |
|
1741 |
__NK_ASSERT_DEBUG(((TLinAddr(aPtePtr)^TLinAddr(aPtePtr+aCount-1))>>KPageTableShift)==0); |
|
1742 |
||
1743 |
// unmap pages... |
|
1744 |
TPte* pPte = aPtePtr; |
|
1745 |
TPte* pPteEnd = aPtePtr+aCount; |
|
1746 |
do |
|
1747 |
{ |
|
1748 |
if(RPageArray::TargetStateIsDecommitted(*aPages++) && *pPte!=KPteUnallocatedEntry) |
|
1749 |
{ |
|
1750 |
// unmap page... |
|
1751 |
++count; |
|
1752 |
TPte pte = KPteUnallocatedEntry; |
|
1753 |
TRACE2(("!PTE %x=%x",pPte,pte)); |
|
1754 |
*pPte = pte; |
|
1755 |
} |
|
1756 |
} |
|
1757 |
while(++pPte<pPteEnd); |
|
1758 |
||
1759 |
if(!count) |
|
1760 |
return true; // no PTEs changed, so nothing more to do |
|
1761 |
||
1762 |
// clean cache... |
|
1763 |
CacheMaintenance::MultiplePtesUpdated((TLinAddr)aPtePtr,(TLinAddr)pPte-(TLinAddr)aPtePtr); |
|
1764 |
} |
|
1765 |
||
1766 |
// update page table info... |
|
1767 |
SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPtePtr); |
|
1768 |
count = pti->DecPageCount(count); |
|
1769 |
TRACE2(("pt %x page count=%d",TLinAddr(aPtePtr)&~KPageTableMask,count)); |
|
1770 |
__NK_ASSERT_DEBUG(pti->CheckPageCount()); |
|
1771 |
||
1772 |
// see if page table needs freeing... |
|
1773 |
TUint keepPt = count | pti->PermanenceCount(); |
|
1774 |
||
1775 |
return keepPt; |
|
1776 |
} |
|
1777 |
||
1778 |
||
1779 |
/** |
|
1780 |
Modify page table entries (PTEs) so the given memory pages are not accessible. |
|
1781 |
Entries are only updated if the current state of the corresponding page |
|
1782 |
is RPageArray::ERestrictingNA. |
|
1783 |
||
1784 |
@param aPtePtr Pointer into a page table for the PTE of the first page. |
|
1785 |
@param aCount The number of pages to modify. |
|
1786 |
@param aPages Pointer to the entry for the first page in a memory object's #RPageArray. |
|
1787 |
Each entry contains the physical address of a page together with its |
|
1788 |
current state (RPageArray::TState). |
|
1789 |
||
1790 |
@pre #MmuLock held. |
|
1791 |
@post #MmuLock held and has not been released by this function. |
|
1792 |
*/ |
|
1793 |
void Mmu::RestrictPagesNA(TPte* const aPtePtr, TUint aCount, TPhysAddr* aPages) |
|
1794 |
{ |
|
1795 |
__NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
1796 |
__NK_ASSERT_DEBUG(aCount); |
|
1797 |
||
1798 |
if(aCount==1) |
|
1799 |
{ |
|
1800 |
TPhysAddr page = *aPages; |
|
1801 |
TPte pte = *aPtePtr; |
|
1802 |
RPageArray::TState state = RPageArray::State(page); |
|
1803 |
if(state != RPageArray::ERestrictingNA && state != RPageArray::EMoving) |
|
1804 |
return; // page no longer needs restricting |
|
1805 |
||
1806 |
if(pte==KPteUnallocatedEntry) |
|
1807 |
return; // page gone |
|
1808 |
||
1809 |
// restrict page... |
|
1810 |
pte = Mmu::MakePteInaccessible(pte,false); |
|
1811 |
TRACE2(("!PTE %x=%x",aPtePtr,pte)); |
|
1812 |
*aPtePtr = pte; |
|
1813 |
||
1814 |
// clean cache... |
|
1815 |
CacheMaintenance::SinglePteUpdated((TLinAddr)aPtePtr); |
|
1816 |
} |
|
1817 |
else |
|
1818 |
{ |
|
1819 |
// check we are only updating a single page table... |
|
1820 |
__NK_ASSERT_DEBUG(((TLinAddr(aPtePtr)^TLinAddr(aPtePtr+aCount-1))>>KPageTableShift)==0); |
|
1821 |
||
1822 |
// restrict pages... |
|
1823 |
TPte* pPte = aPtePtr; |
|
1824 |
TPte* pPteEnd = aPtePtr+aCount; |
|
1825 |
do |
|
1826 |
{ |
|
1827 |
TPhysAddr page = *aPages++; |
|
1828 |
TPte pte = *pPte++; |
|
1829 |
if(RPageArray::State(page)==RPageArray::ERestrictingNA && pte!=KPteUnallocatedEntry) |
|
1830 |
{ |
|
1831 |
pte = Mmu::MakePteInaccessible(pte,false); |
|
1832 |
TRACE2(("!PTE %x=%x",pPte-1,pte)); |
|
1833 |
pPte[-1] = pte; |
|
1834 |
} |
|
1835 |
} |
|
1836 |
while(pPte<pPteEnd); |
|
1837 |
||
1838 |
// clean cache... |
|
1839 |
CacheMaintenance::MultiplePtesUpdated((TLinAddr)aPtePtr,(TLinAddr)pPte-(TLinAddr)aPtePtr); |
|
1840 |
} |
|
1841 |
} |
|
1842 |
||
1843 |
||
1844 |
/** |
|
1845 |
Modify page table entries (PTEs) so they map the given demand paged memory pages. |
|
1846 |
||
1847 |
Entries are only updated if the current state of the corresponding page |
|
1848 |
is RPageArray::ECommitted. |
|
1849 |
||
1850 |
This function is used for demand paged memory when handling a page fault or |
|
1851 |
memory pinning operation. It will widen the access permission of existing entries |
|
1852 |
if required to match \a aBlankPte and will 'rejuvenate' the page table. |
|
1853 |
||
1854 |
@param aPtePtr Pointer into a page table for the PTE of the first page. |
|
1855 |
@param aCount The number of pages to modify. |
|
1856 |
@param aPages Pointer to the entry for the first page in a memory object's #RPageArray. |
|
1857 |
Each entry contains the physical address of a page together with its |
|
1858 |
current state (RPageArray::TState). |
|
1859 |
@param aBlankPte The value to use for each PTE, with the physical address component equal |
|
1860 |
to zero. |
|
1861 |
||
1862 |
@return False, if the page table no longer maps any entries and may be freed. |
|
1863 |
True otherwise, to indicate that the page table is still needed. |
|
1864 |
||
1865 |
@pre #MmuLock held. |
|
1866 |
@post MmuLock held (but may have been released by this function) |
|
1867 |
*/ |
|
1868 |
TBool Mmu::PageInPages(TPte* const aPtePtr, const TUint aCount, TPhysAddr* aPages, TPte aBlankPte) |
|
1869 |
{ |
|
1870 |
__NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
1871 |
__NK_ASSERT_DEBUG(aCount); |
|
1872 |
__NK_ASSERT_DEBUG(aBlankPte!=KPteUnallocatedEntry); |
|
1873 |
||
1874 |
TUint count = 0; |
|
1875 |
||
1876 |
if(aCount==1) |
|
1877 |
{ |
|
1878 |
// get page to map... |
|
1879 |
TPhysAddr page = *aPages; |
|
1880 |
TPte pte = *aPtePtr; |
|
1881 |
if(!RPageArray::TargetStateIsCommitted(page)) |
|
1882 |
goto done; // page no longer needs mapping |
|
1883 |
||
1884 |
#ifdef _DEBUG |
|
1885 |
if(pte!=KPteUnallocatedEntry) |
|
1886 |
{ |
|
1887 |
if ((pte^page)>=TPte(KPageSize) && !Mmu::IsPteInaccessible(pte) && |
|
1888 |
!Mmu::IsPteReadOnly(pte)) |
|
1889 |
{ |
|
1890 |
// Page has been mapped before but the physical address is different |
|
1891 |
// and the page hasn't been moved as it is not inaccessible. |
|
1892 |
Kern::Printf("Mmu::PageInPages already mapped %x->%x",page,pte); |
|
1893 |
__NK_ASSERT_DEBUG(0); |
|
1894 |
} |
|
1895 |
} |
|
1896 |
#endif |
|
1897 |
if(!Mmu::IsPteMoreAccessible(aBlankPte,pte)) |
|
1898 |
return true; // return true to keep page table (it already had at least page mapped) |
|
1899 |
||
1900 |
// remap page with new increased permissions... |
|
1901 |
if(pte==KPteUnallocatedEntry) |
|
1902 |
count = 1; // we'll be adding a new pte entry, count it |
|
1903 |
if(!Mmu::IsPteReadOnly(aBlankPte)) |
|
1904 |
ThePager.SetWritable(*SPageInfo::FromPhysAddr(page)); |
|
1905 |
pte = (page&~KPageMask)|aBlankPte; |
|
1906 |
TRACE2(("!PTE %x=%x",aPtePtr,pte)); |
|
1907 |
*aPtePtr = pte; |
|
1908 |
||
1909 |
// clean cache... |
|
1910 |
CacheMaintenance::SinglePteUpdated((TLinAddr)aPtePtr); |
|
1911 |
} |
|
1912 |
else |
|
1913 |
{ |
|
1914 |
// check we are only updating a single page table... |
|
1915 |
__NK_ASSERT_DEBUG(((TLinAddr(aPtePtr)^TLinAddr(aPtePtr+aCount-1))>>KPageTableShift)==0); |
|
1916 |
||
1917 |
// map pages... |
|
1918 |
TPte* pPte = aPtePtr; |
|
1919 |
TPte* pPteEnd = aPtePtr+aCount; |
|
1920 |
do |
|
1921 |
{ |
|
1922 |
// map page... |
|
1923 |
TPhysAddr page = *aPages++; |
|
1924 |
TPte pte = *pPte++; |
|
1925 |
if(RPageArray::TargetStateIsCommitted(page)) |
|
1926 |
{ |
|
1927 |
#ifdef _DEBUG |
|
1928 |
if(pte!=KPteUnallocatedEntry) |
|
1929 |
{ |
|
1930 |
if ((pte^page)>=TPte(KPageSize) && !Mmu::IsPteInaccessible(pte) && |
|
1931 |
!Mmu::IsPteReadOnly(pte)) |
|
1932 |
{ |
|
1933 |
// Page has been mapped before but the physical address is different |
|
1934 |
// and the page hasn't been moved as it is not inaccessible. |
|
1935 |
Kern::Printf("Mmu::PageInPages already mapped %x->%x",page,pte); |
|
1936 |
__NK_ASSERT_DEBUG(0); |
|
1937 |
} |
|
1938 |
} |
|
1939 |
#endif |
|
1940 |
if(Mmu::IsPteMoreAccessible(aBlankPte,pte)) |
|
1941 |
{ |
|
1942 |
// remap page with new increased permissions... |
|
1943 |
if(pte==KPteUnallocatedEntry) |
|
1944 |
++count; // we'll be adding a new pte entry, count it |
|
1945 |
if(!Mmu::IsPteReadOnly(aBlankPte)) |
|
1946 |
ThePager.SetWritable(*SPageInfo::FromPhysAddr(page)); |
|
1947 |
pte = (page&~KPageMask)|aBlankPte; |
|
1948 |
TRACE2(("!PTE %x=%x",pPte-1,pte)); |
|
1949 |
pPte[-1] = pte; |
|
1950 |
} |
|
1951 |
} |
|
1952 |
} |
|
1953 |
while(pPte!=pPteEnd); |
|
1954 |
||
1955 |
// clean cache... |
|
1956 |
CacheMaintenance::MultiplePtesUpdated((TLinAddr)aPtePtr,(TLinAddr)pPte-(TLinAddr)aPtePtr); |
|
1957 |
} |
|
1958 |
||
1959 |
done: |
|
1960 |
// update page counts... |
|
1961 |
SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPtePtr); |
|
1962 |
count = pti->IncPageCount(count); |
|
1963 |
TRACE2(("pt %x page count=%d",TLinAddr(aPtePtr)&~KPageTableMask,pti->PageCount())); |
|
1964 |
__NK_ASSERT_DEBUG(pti->CheckPageCount()); |
|
1965 |
||
1966 |
// see if page table needs freeing... |
|
1967 |
TUint keepPt = count | pti->PermanenceCount(); |
|
1968 |
||
1969 |
// rejuvenate demand paged page tables... |
|
1970 |
ThePager.RejuvenatePageTable(aPtePtr); |
|
1971 |
||
1972 |
return keepPt; |
|
1973 |
} |
|
1974 |
||
1975 |
||
1976 |
// |
|
1977 |
// CodeModifier |
|
1978 |
// |
|
1979 |
||
1980 |
#ifdef __DEBUGGER_SUPPORT__ |
|
1981 |
||
1982 |
void DoWriteCode(TUint32* aAddress, TUint32 aValue); |
|
1983 |
||
1984 |
#ifdef __SMP__ |
|
1985 |
||
1986 |
extern "C" void __e32_instruction_barrier(); |
|
1987 |
||
1988 |
class TCodeModifierBroadcast : public TGenericIPI |
|
1989 |
{ |
|
1990 |
public: |
|
1991 |
TCodeModifierBroadcast(TUint32* aAddress, TUint32 aValue); |
|
1992 |
static void Isr(TGenericIPI*); |
|
1993 |
void Go(); |
|
1994 |
public: |
|
1995 |
TUint32* iAddress; |
|
1996 |
TUint32 iValue; |
|
1997 |
volatile TInt iFlag; |
|
1998 |
}; |
|
1999 |
||
2000 |
TCodeModifierBroadcast::TCodeModifierBroadcast(TUint32* aAddress, TUint32 aValue) |
|
2001 |
: iAddress(aAddress), iValue(aValue), iFlag(0) |
|
2002 |
{ |
|
2003 |
} |
|
2004 |
||
2005 |
void TCodeModifierBroadcast::Isr(TGenericIPI* aPtr) |
|
2006 |
{ |
|
2007 |
TCodeModifierBroadcast& a = *(TCodeModifierBroadcast*)aPtr; |
|
2008 |
while (!__e32_atomic_load_acq32(&a.iFlag)) |
|
2009 |
__chill(); |
|
2010 |
#ifdef __BROADCAST_CACHE_MAINTENANCE__ |
|
2011 |
CacheMaintenance::CodeChanged((TLinAddr)a.iAddress, sizeof (TInt), CacheMaintenance::ECodeModifier); // need to do separate Clean-D, Purge-I on each core |
|
2012 |
#else |
|
2013 |
__e32_instruction_barrier(); // synchronize instruction execution |
|
2014 |
#endif |
|
2015 |
} |
|
2016 |
||
2017 |
void TCodeModifierBroadcast::Go() |
|
2018 |
{ |
|
2019 |
NKern::Lock(); |
|
2020 |
QueueAllOther(&Isr); |
|
2021 |
WaitEntry(); // wait for other cores to stop |
|
2022 |
DoWriteCode(iAddress, iValue); |
|
2023 |
iFlag = 1; |
|
2024 |
__e32_instruction_barrier(); // synchronize instruction execution |
|
2025 |
WaitCompletion(); // wait for other cores to resume |
|
2026 |
NKern::Unlock(); |
|
2027 |
} |
|
2028 |
#endif |
|
2029 |
||
2030 |
/** |
|
2031 |
@pre Calling thread must be in critical section |
|
2032 |
@pre CodeSeg mutex held |
|
2033 |
*/ |
|
2034 |
TInt CodeModifier::SafeWriteCode(DProcess* aProcess, TLinAddr aAddress, TInt aSize, TUint aValue, void* aOldValue) |
|
2035 |
{ |
|
2036 |
__ASSERT_CRITICAL; |
|
2037 |
Mmu& m=TheMmu; |
|
2038 |
RamAllocLock::Lock(); |
|
2039 |
MmuLock::Lock(); |
|
2040 |
__UNLOCK_GUARD_START(MmuLock); |
|
2041 |
||
2042 |
// Check aProcess is still alive by opening a reference on its os asid. |
|
2043 |
TInt osAsid = ((DMemModelProcess*)aProcess)->TryOpenOsAsid(); |
|
2044 |
if (osAsid < 0) |
|
2045 |
{ |
|
2046 |
__KTRACE_OPT(KDEBUGGER,Kern::Printf("CodeModifier::SafeWriteCode - zombie process")); |
|
2047 |
__UNLOCK_GUARD_END(MmuLock); |
|
2048 |
MmuLock::Unlock(); |
|
2049 |
RamAllocLock::Unlock(); |
|
2050 |
return KErrBadDescriptor; |
|
2051 |
} |
|
2052 |
||
2053 |
// Find physical address of the page, the breakpoint belongs to |
|
2054 |
TPhysAddr physAddr = Mmu::LinearToPhysical(aAddress, osAsid); |
|
2055 |
__KTRACE_OPT(KDEBUGGER,Kern::Printf("CodeModifier::SafeWriteCode - PA:%x", physAddr)); |
|
2056 |
||
2057 |
||
2058 |
if (physAddr==KPhysAddrInvalid) |
|
2059 |
{ |
|
2060 |
__KTRACE_OPT(KDEBUGGER,Kern::Printf("CodeModifier::SafeWriteCode - invalid VA")); |
|
2061 |
__UNLOCK_GUARD_END(MmuLock); |
|
2062 |
MmuLock::Unlock(); |
|
2063 |
RamAllocLock::Unlock(); |
|
2064 |
// The os asid is no longer required. |
|
2065 |
((DMemModelProcess*)aProcess)->CloseOsAsid(); |
|
2066 |
return KErrBadDescriptor; |
|
2067 |
} |
|
2068 |
||
2069 |
// Temporary map physical page |
|
2070 |
TLinAddr tempAddr = m.MapTemp(physAddr&~KPageMask, aAddress>>KPageShift); |
|
2071 |
tempAddr |= aAddress & KPageMask; |
|
2072 |
__KTRACE_OPT(KDEBUGGER,Kern::Printf("CodeModifier::SafeWriteCode - tempAddr:%x",tempAddr)); |
|
2073 |
||
2074 |
TInt r = KErrBadDescriptor; |
|
2075 |
TUint32* ptr = (TUint32*)(tempAddr&~3); |
|
2076 |
TUint32 oldWord; |
|
2077 |
||
2078 |
if(Kern::SafeRead(ptr,&oldWord,sizeof(oldWord))==0 // safely read the original value... |
|
2079 |
&& Kern::SafeWrite(ptr,&oldWord,sizeof(oldWord))==0 ) // and write it back |
|
2080 |
{ |
|
2081 |
// We have successfully probed the memory by reading and writing to it |
|
2082 |
// so we assume it is now safe to access without generating exceptions. |
|
2083 |
// If this is wrong it will kill the system horribly. |
|
2084 |
||
2085 |
TUint32 newWord; |
|
2086 |
TUint badAlign; |
|
2087 |
TUint shift = (aAddress&3)*8; |
|
2088 |
||
2089 |
switch(aSize) |
|
2090 |
{ |
|
2091 |
case 1: // 1 byte value |
|
2092 |
badAlign = 0; |
|
2093 |
*(TUint8*)aOldValue = oldWord>>shift; |
|
2094 |
newWord = (oldWord&~(0xff<<shift)) | ((aValue&0xff)<<shift); |
|
2095 |
break; |
|
2096 |
||
2097 |
case 2: // 2 byte value |
|
2098 |
badAlign = tempAddr&1; |
|
2099 |
if(!badAlign) |
|
2100 |
*(TUint16*)aOldValue = oldWord>>shift; |
|
2101 |
newWord = (oldWord&~(0xffff<<shift)) | ((aValue&0xffff)<<shift); |
|
2102 |
break; |
|
2103 |
||
2104 |
default: // 4 byte value |
|
2105 |
badAlign = tempAddr&3; |
|
2106 |
if(!badAlign) |
|
2107 |
*(TUint32*)aOldValue = oldWord; |
|
2108 |
newWord = aValue; |
|
2109 |
break; |
|
2110 |
} |
|
2111 |
||
2112 |
if(!badAlign) |
|
2113 |
{ |
|
2114 |
// write the new value... |
|
2115 |
#ifdef __SMP__ |
|
2116 |
TCodeModifierBroadcast b(ptr, newWord); |
|
2117 |
b.Go(); |
|
2118 |
#else |
|
2119 |
DoWriteCode(ptr, newWord); |
|
2120 |
#endif |
|
2121 |
r = KErrNone; |
|
2122 |
} |
|
2123 |
} |
|
2124 |
||
2125 |
__UNLOCK_GUARD_END(MmuLock); |
|
2126 |
m.UnmapTemp(); |
|
2127 |
MmuLock::Unlock(); |
|
2128 |
RamAllocLock::Unlock(); |
|
2129 |
// The os asid is no longer required. |
|
2130 |
((DMemModelProcess*)aProcess)->CloseOsAsid(); |
|
2131 |
return r; |
|
2132 |
} |
|
2133 |
||
2134 |
/** |
|
2135 |
@pre Calling thread must be in critical section |
|
2136 |
@pre CodeSeg mutex held |
|
2137 |
*/ |
|
2138 |
void DoWriteCode(TUint32* aAddress, TUint32 aValue) |
|
2139 |
{ |
|
2140 |
// We do not want to be interrupted by e.g. ISR that will run altered code before IMB-Range. |
|
2141 |
// Therefore, copy data and clean/invalidate caches with interrupts disabled. |
|
2142 |
TInt irq = NKern::DisableAllInterrupts(); |
|
2143 |
*aAddress = aValue; |
|
2144 |
CacheMaintenance::CodeChanged((TLinAddr)aAddress, sizeof(TUint32), CacheMaintenance::ECodeModifier); |
|
2145 |
NKern::RestoreInterrupts(irq); |
|
2146 |
} |
|
2147 |
||
2148 |
#endif //__DEBUGGER_SUPPORT__ |
|
2149 |
||
2150 |
||
2151 |
||
2152 |
// |
|
2153 |
// Virtual pinning |
|
2154 |
// |
|
2155 |
||
2156 |
TInt M::CreateVirtualPinObject(TVirtualPinObject*& aPinObject) |
|
2157 |
{ |
|
2158 |
aPinObject = (TVirtualPinObject*)new DVirtualPinMapping; |
|
2159 |
return aPinObject != NULL ? KErrNone : KErrNoMemory; |
|
2160 |
} |
|
2161 |
||
2162 |
TInt M::PinVirtualMemory(TVirtualPinObject* aPinObject, TLinAddr aStart, TUint aSize, DThread* aThread) |
|
2163 |
{ |
|
2164 |
NKern::ThreadEnterCS(); |
|
2165 |
TUint offsetInMapping; |
|
2166 |
TUint mapInstanceCount; |
|
2167 |
DMemoryMapping* mapping = MM::FindMappingInThread( (DMemModelThread*)aThread, |
|
2168 |
aStart, |
|
2169 |
aSize, |
|
2170 |
offsetInMapping, |
|
2171 |
mapInstanceCount); |
|
2172 |
TInt r = KErrBadDescriptor; |
|
2173 |
if (mapping) |
|
2174 |
{ |
|
2175 |
TInt count = ((aStart & KPageMask) + aSize + KPageMask) >> KPageShift; |
|
2176 |
if(mapping->IsPinned()) |
|
2177 |
{ |
|
2178 |
// Mapping for specified virtual address is pinned so we don't need to |
|
2179 |
// do anything. Also, we can't safely pin the memory in this case |
|
2180 |
// anyway, as pinned mappings may move between memory objects |
|
2181 |
r = KErrNone; |
|
2182 |
} |
|
2183 |
else |
|
2184 |
{ |
|
2185 |
MmuLock::Lock(); |
|
2186 |
DMemoryObject* memory = mapping->Memory(); |
|
2187 |
if (mapInstanceCount != mapping->MapInstanceCount() || |
|
2188 |
!memory || !memory->IsDemandPaged()) |
|
2189 |
{ |
|
2190 |
// mapping has been reused, no memory, or it's not paged, so no need to pin... |
|
2191 |
MmuLock::Unlock(); |
|
2192 |
r = KErrNone; |
|
2193 |
} |
|
2194 |
else |
|
2195 |
{ |
|
2196 |
// paged memory needs pinning... |
|
2197 |
// Open a reference on the memory so it doesn't get deleted. |
|
2198 |
memory->Open(); |
|
2199 |
MmuLock::Unlock(); |
|
2200 |
||
2201 |
TUint startInMemory = (offsetInMapping >> KPageShift) + mapping->iStartIndex; |
|
2202 |
r = ((DVirtualPinMapping*)aPinObject)->Pin( memory, startInMemory, count, mapping->Permissions(), |
|
2203 |
mapping, mapInstanceCount); |
|
2204 |
memory->Close(); |
|
2205 |
} |
|
2206 |
} |
|
2207 |
mapping->Close(); |
|
2208 |
} |
|
2209 |
NKern::ThreadLeaveCS(); |
|
2210 |
||
2211 |
return r; |
|
2212 |
} |
|
2213 |
||
2214 |
TInt M::CreateAndPinVirtualMemory(TVirtualPinObject*& aPinObject, TLinAddr aStart, TUint aSize) |
|
2215 |
{ |
|
2216 |
aPinObject = 0; |
|
2217 |
NKern::ThreadEnterCS(); |
|
2218 |
TUint offsetInMapping; |
|
2219 |
TUint mapInstanceCount; |
|
2220 |
DMemoryMapping* mapping = MM::FindMappingInThread( (DMemModelThread*)&Kern::CurrentThread(), |
|
2221 |
aStart, |
|
2222 |
aSize, |
|
2223 |
offsetInMapping, |
|
2224 |
mapInstanceCount); |
|
2225 |
TInt r = KErrBadDescriptor; |
|
2226 |
if (mapping) |
|
2227 |
{ |
|
2228 |
TInt count = ((aStart & KPageMask) + aSize + KPageMask) >> KPageShift; |
|
2229 |
if(mapping->IsPinned()) |
|
2230 |
{ |
|
2231 |
// Mapping for specified virtual address is pinned so we don't need to |
|
2232 |
// do anything. Also, we can't safely pin the memory in this case |
|
2233 |
// anyway, as pinned mappings may move between memory objects |
|
2234 |
r = KErrNone; |
|
2235 |
} |
|
2236 |
else |
|
2237 |
{ |
|
2238 |
MmuLock::Lock(); |
|
2239 |
DMemoryObject* memory = mapping->Memory(); |
|
2240 |
if (mapInstanceCount != mapping->MapInstanceCount() || |
|
2241 |
!memory || !memory->IsDemandPaged()) |
|
2242 |
{ |
|
2243 |
// mapping has been reused, no memory, or it's not paged, so no need to pin... |
|
2244 |
MmuLock::Unlock(); |
|
2245 |
r = KErrNone; |
|
2246 |
} |
|
2247 |
else |
|
2248 |
{// The memory is demand paged so create a pin object and pin it. |
|
2249 |
// Open a reference on the memory so it doesn't get deleted. |
|
2250 |
memory->Open(); |
|
2251 |
MmuLock::Unlock(); |
|
2252 |
r = CreateVirtualPinObject(aPinObject); |
|
2253 |
if (r == KErrNone) |
|
2254 |
{ |
|
2255 |
TUint startInMemory = (offsetInMapping >> KPageShift) + mapping->iStartIndex; |
|
2256 |
r = ((DVirtualPinMapping*)aPinObject)->Pin( memory, startInMemory, count, mapping->Permissions(), |
|
2257 |
mapping, mapInstanceCount); |
|
2258 |
if (r != KErrNone) |
|
2259 |
{// Failed to pin the memory so pin object is not required. |
|
2260 |
DestroyVirtualPinObject(aPinObject); |
|
2261 |
} |
|
2262 |
} |
|
2263 |
memory->Close(); |
|
2264 |
} |
|
2265 |
} |
|
2266 |
mapping->Close(); |
|
2267 |
} |
|
2268 |
NKern::ThreadLeaveCS(); |
|
2269 |
||
2270 |
return r; |
|
2271 |
} |
|
2272 |
||
2273 |
void M::UnpinVirtualMemory(TVirtualPinObject* aPinObject) |
|
2274 |
{ |
|
2275 |
DVirtualPinMapping* mapping = (DVirtualPinMapping*)aPinObject; |
|
2276 |
if (mapping->IsAttached()) |
|
2277 |
mapping->Unpin(); |
|
2278 |
} |
|
2279 |
||
2280 |
void M::DestroyVirtualPinObject(TVirtualPinObject*& aPinObject) |
|
2281 |
{ |
|
2282 |
DVirtualPinMapping* mapping = (DVirtualPinMapping*)__e32_atomic_swp_ord_ptr(&aPinObject, 0); |
|
2283 |
if (mapping) |
|
2284 |
{ |
|
2285 |
if (mapping->IsAttached()) |
|
2286 |
mapping->Unpin(); |
|
2287 |
mapping->AsyncClose(); |
|
2288 |
} |
|
2289 |
} |
|
2290 |
||
2291 |
TInt M::CreatePhysicalPinObject(TPhysicalPinObject*& aPinObject) |
|
2292 |
{ |
|
2293 |
aPinObject = (TPhysicalPinObject*)new DPhysicalPinMapping; |
|
2294 |
return aPinObject != NULL ? KErrNone : KErrNoMemory; |
|
2295 |
} |
|
2296 |
||
2297 |
// |
|
2298 |
// Physical pinning |
|
2299 |
// |
|
2300 |
||
2301 |
TInt M::PinPhysicalMemory(TPhysicalPinObject* aPinObject, TLinAddr aStart, TUint aSize, TBool aReadOnly, |
|
2302 |
TPhysAddr& aAddress, TPhysAddr* aPages, TUint32& aMapAttr, TUint& aColour, DThread* aThread) |
|
2303 |
{ |
|
2304 |
NKern::ThreadEnterCS(); |
|
2305 |
TUint offsetInMapping; |
|
2306 |
TUint mapInstanceCount; |
|
2307 |
DMemoryMapping* mapping = MM::FindMappingInThread( (DMemModelThread*)aThread, |
|
2308 |
aStart, |
|
2309 |
aSize, |
|
2310 |
offsetInMapping, |
|
2311 |
mapInstanceCount); |
|
2312 |
TInt r = KErrBadDescriptor; |
|
2313 |
if (mapping) |
|
2314 |
{ |
|
2315 |
TInt count = ((aStart & KPageMask) + aSize + KPageMask) >> KPageShift; |
|
2316 |
||
2317 |
MmuLock::Lock(); |
|
2318 |
DMemoryObject* memory = mapping->Memory(); |
|
2319 |
if (mapInstanceCount == mapping->MapInstanceCount() && memory) |
|
2320 |
{ |
|
2321 |
memory->Open(); |
|
2322 |
MmuLock::Unlock(); |
|
2323 |
||
2324 |
TUint startInMemory = (offsetInMapping >> KPageShift) + mapping->iStartIndex; |
|
2325 |
TMappingPermissions permissions = aReadOnly ? ESupervisorReadOnly : ESupervisorReadWrite; |
|
2326 |
r = ((DPhysicalPinMapping*)aPinObject)->Pin(memory, startInMemory, count, permissions); |
|
2327 |
if (r == KErrNone) |
|
2328 |
{ |
|
2329 |
r = ((DPhysicalPinMapping*)aPinObject)->PhysAddr(0, count, aAddress, aPages); |
|
2330 |
if (r>=KErrNone) |
|
2331 |
{ |
|
2332 |
r = KErrNone; //Do not report discontiguous memory in return value. |
|
2333 |
const TMappingAttributes2& mapAttr2 = |
|
2334 |
MM::LegacyMappingAttributes(memory->Attributes(), mapping->Permissions()); |
|
2335 |
*(TMappingAttributes2*)&aMapAttr = mapAttr2; |
|
2336 |
} |
|
2337 |
else |
|
2338 |
UnpinPhysicalMemory(aPinObject); |
|
2339 |
} |
|
2340 |
memory->Close(); |
|
2341 |
} |
|
2342 |
else // mapping has been reused or no memory... |
|
2343 |
{ |
|
2344 |
MmuLock::Unlock(); |
|
2345 |
} |
|
2346 |
mapping->Close(); |
|
2347 |
} |
|
2348 |
NKern::ThreadLeaveCS(); |
|
2349 |
aColour = (aStart >>KPageShift) & KPageColourMask; |
|
2350 |
return r; |
|
2351 |
} |
|
2352 |
||
2353 |
void M::UnpinPhysicalMemory(TPhysicalPinObject* aPinObject) |
|
2354 |
{ |
|
2355 |
DPhysicalPinMapping* mapping = (DPhysicalPinMapping*)aPinObject; |
|
2356 |
if (mapping->IsAttached()) |
|
2357 |
mapping->Unpin(); |
|
2358 |
} |
|
2359 |
||
2360 |
void M::DestroyPhysicalPinObject(TPhysicalPinObject*& aPinObject) |
|
2361 |
{ |
|
2362 |
DPhysicalPinMapping* mapping = (DPhysicalPinMapping*)__e32_atomic_swp_ord_ptr(&aPinObject, 0); |
|
2363 |
if (mapping) |
|
2364 |
{ |
|
2365 |
if (mapping->IsAttached()) |
|
2366 |
mapping->Unpin(); |
|
2367 |
mapping->AsyncClose(); |
|
2368 |
} |
|
2369 |
} |
|
2370 |
||
2371 |
||
2372 |
||
2373 |
// |
|
2374 |
// Cache sync operations |
|
2375 |
// |
|
2376 |
||
2377 |
//@pre As for MASK_THREAD_STANDARD |
|
2378 |
void Mmu::SyncPhysicalMemoryBeforeDmaWrite(TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr) |
|
2379 |
{ |
|
2380 |
//Jump over the pages we do not have to sync |
|
2381 |
aPages += aOffset>>KPageShift; |
|
2382 |
aOffset &=KPageMask; |
|
2383 |
aColour = (aColour + (aOffset>>KPageShift)) & KPageColourMask; |
|
2384 |
||
2385 |
//Calculate page table entry for the temporary mapping. |
|
2386 |
TUint pteType = PteType(ESupervisorReadWrite,true); |
|
2387 |
TMappingAttributes2 mapAttr2(aMapAttr); |
|
2388 |
TPte pte = Mmu::BlankPte((TMemoryAttributes)mapAttr2.Type(), pteType); |
|
2389 |
||
2390 |
while (aSize) //A single pass of loop operates within page boundaries. |
|
2391 |
{ |
|
2392 |
TUint sizeInLoopPass = Min (KPageSize, aOffset+aSize) - aOffset; //The size of the region in this pass. |
|
2393 |
||
2394 |
NKern::ThreadEnterCS(); |
|
2395 |
Kern::MutexWait(*iPhysMemSyncMutex); |
|
2396 |
||
2397 |
TLinAddr linAddr = iPhysMemSyncTemp.Map(*aPages, aColour, pte); |
|
2398 |
CacheMaintenance::MakeCPUChangesVisible(linAddr+aOffset, sizeInLoopPass, aMapAttr, *aPages+aOffset); |
|
2399 |
iPhysMemSyncTemp.Unmap(); |
|
2400 |
||
2401 |
Kern::MutexSignal(*iPhysMemSyncMutex); |
|
2402 |
NKern::ThreadLeaveCS(); |
|
2403 |
||
2404 |
aSize-=sizeInLoopPass; // Remaining bytes to sync |
|
2405 |
aOffset=0; // In all the pages after the first, sync will always start with zero offset. |
|
2406 |
aPages++; // Point to the next page |
|
2407 |
aColour = (aColour+1) & KPageColourMask; |
|
2408 |
} |
|
2409 |
} |
|
2410 |
||
2411 |
//@pre As for MASK_THREAD_STANDARD |
|
2412 |
void Mmu::SyncPhysicalMemoryBeforeDmaRead(TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr) |
|
2413 |
{ |
|
2414 |
//Jump over the pages we do not have to sync |
|
2415 |
aPages += aOffset>>KPageShift; |
|
2416 |
aOffset &=KPageMask; |
|
2417 |
aColour = (aColour + (aOffset>>KPageShift)) & KPageColourMask; |
|
2418 |
||
2419 |
//Calculate page table entry for the temporary mapping. |
|
2420 |
TUint pteType = PteType(ESupervisorReadWrite,true); |
|
2421 |
TMappingAttributes2 mapAttr2(aMapAttr); |
|
2422 |
TPte pte = Mmu::BlankPte((TMemoryAttributes)mapAttr2.Type(), pteType); |
|
2423 |
||
2424 |
while (aSize) //A single pass of loop operates within page boundaries. |
|
2425 |
{ |
|
2426 |
TUint sizeInLoopPass = Min (KPageSize, aOffset+aSize) - aOffset; //The size of the region in this pass. |
|
2427 |
||
2428 |
NKern::ThreadEnterCS(); |
|
2429 |
Kern::MutexWait(*iPhysMemSyncMutex); |
|
2430 |
||
2431 |
TLinAddr linAddr = iPhysMemSyncTemp.Map(*aPages, aColour, pte); |
|
2432 |
CacheMaintenance::PrepareMemoryForExternalWrites(linAddr+aOffset, sizeInLoopPass, aMapAttr, *aPages+aOffset); |
|
2433 |
iPhysMemSyncTemp.Unmap(); |
|
2434 |
||
2435 |
Kern::MutexSignal(*iPhysMemSyncMutex); |
|
2436 |
NKern::ThreadLeaveCS(); |
|
2437 |
||
2438 |
aSize-=sizeInLoopPass; // Remaining bytes to sync |
|
2439 |
aOffset=0; // In all the pages after the first, sync will always start with zero offset. |
|
2440 |
aPages++; // Point to the next page |
|
2441 |
aColour = (aColour+1) & KPageColourMask; |
|
2442 |
} |
|
2443 |
} |
|
2444 |
||
2445 |
//@pre As for MASK_THREAD_STANDARD |
|
2446 |
void Mmu::SyncPhysicalMemoryAfterDmaRead(TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr) |
|
2447 |
{ |
|
2448 |
//Jump over the pages we do not have to sync |
|
2449 |
aPages += aOffset>>KPageShift; |
|
2450 |
aOffset &=KPageMask; |
|
2451 |
aColour = (aColour + (aOffset>>KPageShift)) & KPageColourMask; |
|
2452 |
||
2453 |
//Calculate page table entry for the temporary mapping. |
|
2454 |
TUint pteType = PteType(ESupervisorReadWrite,true); |
|
2455 |
TMappingAttributes2 mapAttr2(aMapAttr); |
|
2456 |
TPte pte = Mmu::BlankPte((TMemoryAttributes)mapAttr2.Type(), pteType); |
|
2457 |
||
2458 |
while (aSize) //A single pass of loop operates within page boundaries. |
|
2459 |
{ |
|
2460 |
TUint sizeInLoopPass = Min (KPageSize, aOffset+aSize) - aOffset; //The size of the region in this pass. |
|
2461 |
||
2462 |
NKern::ThreadEnterCS(); |
|
2463 |
Kern::MutexWait(*iPhysMemSyncMutex); |
|
2464 |
||
2465 |
TLinAddr linAddr = iPhysMemSyncTemp.Map(*aPages, aColour, pte); |
|
2466 |
CacheMaintenance::MakeExternalChangesVisible(linAddr+aOffset, sizeInLoopPass, aMapAttr, *aPages+aOffset); |
|
2467 |
iPhysMemSyncTemp.Unmap(); |
|
2468 |
||
2469 |
Kern::MutexSignal(*iPhysMemSyncMutex); |
|
2470 |
NKern::ThreadLeaveCS(); |
|
2471 |
||
2472 |
aSize-=sizeInLoopPass; // Remaining bytes to sync |
|
2473 |
aOffset=0; // In all the pages after the first, sync will always start with zero offset. |
|
2474 |
aPages++; // Point to the next page |
|
2475 |
aColour = (aColour+1) & KPageColourMask; |
|
2476 |
} |
|
2477 |
} |
|
2478 |
||
2479 |
EXPORT_C TInt Cache::SyncPhysicalMemoryBeforeDmaWrite(TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr) |
|
2480 |
{ |
|
2481 |
CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Cache::SyncPhysicalMemoryBeforeDmaWrite"); |
|
2482 |
TheMmu.SyncPhysicalMemoryBeforeDmaWrite(aPages, aColour, aOffset, aSize, aMapAttr); |
|
2483 |
return KErrNone; |
|
2484 |
} |
|
2485 |
||
2486 |
EXPORT_C TInt Cache::SyncPhysicalMemoryBeforeDmaRead(TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr) |
|
2487 |
{ |
|
2488 |
CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Cache::SyncPhysicalMemoryBeforeDmaRead"); |
|
2489 |
TheMmu.SyncPhysicalMemoryBeforeDmaRead(aPages, aColour, aOffset, aSize, aMapAttr); |
|
2490 |
return KErrNone; |
|
2491 |
} |
|
2492 |
||
2493 |
EXPORT_C TInt Cache::SyncPhysicalMemoryAfterDmaRead(TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr) |
|
2494 |
{ |
|
2495 |
CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Cache::SyncPhysicalMemoryAfterDmaRead"); |
|
2496 |
TheMmu.SyncPhysicalMemoryAfterDmaRead(aPages, aColour, aOffset, aSize, aMapAttr); |
|
2497 |
return KErrNone; |
|
2498 |
} |