|
1 // Copyright (c) 1997-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of the License "Eclipse Public License v1.0" |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: |
|
14 // e32\memmodel\epoc\multiple\x86\xmmu.cpp |
|
15 // |
|
16 // |
|
17 |
|
18 #include <x86_mem.h> |
|
19 #include <mmubase.inl> |
|
20 #include <ramcache.h> |
|
21 #include "execs.h" |
|
22 #include <defrag.h> |
|
23 |
|
24 extern "C" void DoTotalInvalidateTLB(); |
|
25 |
|
26 // Constants for X86 MMU |
|
27 const TUint32 KPdePtePresent=0x01; |
|
28 const TUint32 KPdePteWrite=0x02; |
|
29 const TUint32 KPdePteUser=0x04; |
|
30 const TUint32 KPdePteWriteThrough=0x08; |
|
31 const TUint32 KPdePteUncached=0x10; |
|
32 const TUint32 KPdePteAccessed=0x20; |
|
33 const TUint32 KPdePteDirty=0x40; |
|
34 const TUint32 KPdeLargePage=0x80; // Pentium and above, not 486 |
|
35 const TUint32 KPdePteGlobal=0x100; // P6 and above, not 486 or Pentium |
|
36 const TUint32 KPdePtePhysAddrMask=0xfffff000u; |
|
37 const TUint32 KPdeLargePagePhysAddrMask=0xffc00000u; // Pentium and above, not 486 |
|
38 |
|
39 const TPde KPdPdePerm=KPdePtePresent|KPdePteWrite; |
|
40 const TPte KPdPtePerm=KPdePtePresent|KPdePteWrite; |
|
41 const TPde KPtPdePerm=KPdePtePresent|KPdePteWrite; |
|
42 const TPte KPtPtePerm=KPdePtePresent|KPdePteWrite; |
|
43 const TPde KPtInfoPdePerm=KPdePtePresent|KPdePteWrite; |
|
44 const TPte KPtInfoPtePerm=KPdePtePresent|KPdePteWrite; |
|
45 const TPde KRomPdePerm=KPdePtePresent|KPdePteWrite|KPdePteUser; |
|
46 const TPte KRomPtePerm=KPdePtePresent|KPdePteUser; |
|
47 const TPde KShadowPdePerm=KPdePtePresent|KPdePteWrite|KPdePteUser; |
|
48 const TPte KShadowPtePerm=KPdePtePresent|KPdePteWrite|KPdePteUser; // unfortunately there's no RWRO |
|
49 |
|
50 // Permissions for each chunk type |
|
51 |
|
52 const TPde KStandardPtePerm=KPdePtePresent|KPdePteWrite|KPdePteUser; |
|
53 const TPte KPdePermNONO=KPdePtePresent|KPdePteWrite|KPdePteUser; |
|
54 const TPte KPdePermRONO=KPdePtePresent; |
|
55 const TPte KPdePermRORO=KPdePtePresent|KPdePteUser; |
|
56 const TPte KPdePermRWNO=KPdePtePresent|KPdePteWrite; |
|
57 const TPte KPdePermRWRW=KPdePtePresent|KPdePteWrite|KPdePteUser; |
|
58 |
|
59 LOCAL_D const TPte ChunkPtePermissions[ENumChunkTypes] = |
|
60 { |
|
61 KStandardPtePerm|KPdePteGlobal, // EKernelData |
|
62 KStandardPtePerm|KPdePteGlobal, // EKernelStack |
|
63 KPdePermRWNO|KPdePteGlobal, // EKernelCode - loading |
|
64 KPdePermRWNO, // EDll (used for global code) - loading |
|
65 KPdePermRORO, // EUserCode |
|
66 KStandardPtePerm, // ERamDrive |
|
67 KStandardPtePerm, // EUserData |
|
68 KStandardPtePerm, // EDllData |
|
69 KStandardPtePerm, // EUserSelfModCode |
|
70 KStandardPtePerm, // ESharedKernelSingle |
|
71 KStandardPtePerm, // ESharedKernelMultiple |
|
72 KStandardPtePerm, // ESharedIo |
|
73 KStandardPtePerm|KPdePteGlobal, // ESharedKernelMirror |
|
74 KStandardPtePerm|KPdePteGlobal, // EKernelMessage |
|
75 }; |
|
76 |
|
77 LOCAL_D const TPde ChunkPdePermissions[ENumChunkTypes] = |
|
78 { |
|
79 KPdePermRWNO, // EKernelData |
|
80 KPdePermRWNO, // EKernelStack |
|
81 KPdePermRWNO, // EKernelCode |
|
82 KPdePermRWRW, // EDll |
|
83 KPdePermRWRW, // EUserCode |
|
84 KPdePermRWRW, // ERamDrive |
|
85 KPdePermRWRW, // EUserData |
|
86 KPdePermRWRW, // EDllData |
|
87 KPdePermRWRW, // EUserSelfModCode |
|
88 KPdePermRWRW, // ESharedKernelSingle |
|
89 KPdePermRWRW, // ESharedKernelMultiple |
|
90 KPdePermRWRW, // ESharedIo |
|
91 KPdePermRWNO, // ESharedKernelMirror |
|
92 KPdePermRWNO, // EKernelMessage |
|
93 }; |
|
94 |
|
95 #if defined(KMMU) |
|
96 extern "C" void __DebugMsgFlushTLB() |
|
97 { |
|
98 __KTRACE_OPT(KMMU,Kern::Printf("FlushTLB")); |
|
99 } |
|
100 |
|
101 extern "C" void __DebugMsgLocalFlushTLB() |
|
102 { |
|
103 __KTRACE_OPT(KMMU,Kern::Printf("FlushTLB")); |
|
104 } |
|
105 |
|
106 extern "C" void __DebugMsgTotalFlushTLB() |
|
107 { |
|
108 __KTRACE_OPT(KMMU,Kern::Printf("TotalFlushTLB")); |
|
109 } |
|
110 |
|
111 extern "C" void __DebugMsgINVLPG(int a) |
|
112 { |
|
113 __KTRACE_OPT(KMMU,Kern::Printf("INVLPG(%08x)",a)); |
|
114 } |
|
115 #endif |
|
116 |
|
117 // Inline functions for simple transformations |
|
118 inline TLinAddr PageTableLinAddr(TInt aId) |
|
119 { |
|
120 return (KPageTableBase+(aId<<KPageTableShift)); |
|
121 } |
|
122 |
|
123 inline TPte* PageTable(TInt aId) |
|
124 { |
|
125 return (TPte*)(KPageTableBase+(aId<<KPageTableShift)); |
|
126 } |
|
127 |
|
128 inline TLinAddr PageDirectoryLinAddr(TInt aOsAsid) |
|
129 { |
|
130 return (KPageDirectoryBase+(aOsAsid<<KPageTableShift)); |
|
131 } |
|
132 |
|
133 extern "C" { |
|
134 |
|
135 void __fastcall DoInvalidateTLBForPage(TLinAddr /*aLinAddr*/); |
|
136 void DoInvalidateTLB(); |
|
137 void DoLocalInvalidateTLB(); |
|
138 |
|
139 } |
|
140 |
|
141 |
|
142 #ifdef __SMP__ |
|
143 |
|
144 TSpinLock ShadowSpinLock(TSpinLock::EOrderGenericPreHigh0); // Used when stopping other CPUs |
|
145 |
|
146 class TTLBIPI : public TGenericIPI |
|
147 { |
|
148 public: |
|
149 TTLBIPI(); |
|
150 |
|
151 static void InvalidateForPagesIsr(TGenericIPI*); |
|
152 static void LocalInvalidateIsr(TGenericIPI*); |
|
153 static void TotalInvalidateIsr(TGenericIPI*); |
|
154 static void InvalidateIsr(TGenericIPI*); |
|
155 static void WaitAndInvalidateIsr(TGenericIPI*); |
|
156 void AddAddress(TLinAddr aAddr); |
|
157 void InvalidateList(); |
|
158 public: |
|
159 volatile TInt iFlag; |
|
160 TInt iCount; |
|
161 TLinAddr iAddr[KMaxPages]; |
|
162 }; |
|
163 |
|
164 TTLBIPI::TTLBIPI() |
|
165 : iFlag(0), iCount(0) |
|
166 { |
|
167 } |
|
168 |
|
169 void TTLBIPI::LocalInvalidateIsr(TGenericIPI*) |
|
170 { |
|
171 __KTRACE_OPT(KMMU2,Kern::Printf("TLBLocInv")); |
|
172 DoLocalInvalidateTLB(); |
|
173 } |
|
174 |
|
175 void TTLBIPI::TotalInvalidateIsr(TGenericIPI*) |
|
176 { |
|
177 __KTRACE_OPT(KMMU2,Kern::Printf("TLBTotInv")); |
|
178 DoTotalInvalidateTLB(); |
|
179 } |
|
180 |
|
181 void TTLBIPI::InvalidateIsr(TGenericIPI*) |
|
182 { |
|
183 __KTRACE_OPT(KMMU2,Kern::Printf("TLBInv")); |
|
184 DoInvalidateTLB(); |
|
185 } |
|
186 |
|
187 void TTLBIPI::WaitAndInvalidateIsr(TGenericIPI* aTLBIPI) |
|
188 { |
|
189 __KTRACE_OPT(KMMU2,Kern::Printf("TLBWtInv")); |
|
190 TTLBIPI& a = *(TTLBIPI*)aTLBIPI; |
|
191 while (!a.iFlag) |
|
192 {} |
|
193 if (a.iCount == 1) |
|
194 DoInvalidateTLBForPage(a.iAddr[0]); |
|
195 else |
|
196 DoInvalidateTLB(); |
|
197 } |
|
198 |
|
199 void TTLBIPI::InvalidateForPagesIsr(TGenericIPI* aTLBIPI) |
|
200 { |
|
201 TTLBIPI& a = *(TTLBIPI*)aTLBIPI; |
|
202 TInt i; |
|
203 for (i=0; i<a.iCount; ++i) |
|
204 { |
|
205 __KTRACE_OPT(KMMU2,Kern::Printf("TLBInv %08x", a.iAddr[i])); |
|
206 DoInvalidateTLBForPage(a.iAddr[i]); |
|
207 } |
|
208 } |
|
209 |
|
210 void TTLBIPI::AddAddress(TLinAddr aAddr) |
|
211 { |
|
212 iAddr[iCount] = aAddr; |
|
213 if (++iCount == KMaxPages) |
|
214 InvalidateList(); |
|
215 } |
|
216 |
|
217 void TTLBIPI::InvalidateList() |
|
218 { |
|
219 NKern::Lock(); |
|
220 InvalidateForPagesIsr(this); |
|
221 QueueAllOther(&InvalidateForPagesIsr); |
|
222 NKern::Unlock(); |
|
223 WaitCompletion(); |
|
224 iCount = 0; |
|
225 } |
|
226 |
|
227 void LocalInvalidateTLB() |
|
228 { |
|
229 TTLBIPI ipi; |
|
230 NKern::Lock(); |
|
231 DoLocalInvalidateTLB(); |
|
232 ipi.QueueAllOther(&TTLBIPI::LocalInvalidateIsr); |
|
233 NKern::Unlock(); |
|
234 ipi.WaitCompletion(); |
|
235 } |
|
236 |
|
237 void TotalInvalidateTLB() |
|
238 { |
|
239 TTLBIPI ipi; |
|
240 NKern::Lock(); |
|
241 DoTotalInvalidateTLB(); |
|
242 ipi.QueueAllOther(&TTLBIPI::TotalInvalidateIsr); |
|
243 NKern::Unlock(); |
|
244 ipi.WaitCompletion(); |
|
245 } |
|
246 |
|
247 void InvalidateTLB() |
|
248 { |
|
249 TTLBIPI ipi; |
|
250 NKern::Lock(); |
|
251 DoInvalidateTLB(); |
|
252 ipi.QueueAllOther(&TTLBIPI::InvalidateIsr); |
|
253 NKern::Unlock(); |
|
254 ipi.WaitCompletion(); |
|
255 } |
|
256 |
|
257 void InvalidateTLBForPage(TLinAddr aAddr) |
|
258 { |
|
259 TTLBIPI ipi; |
|
260 ipi.AddAddress(aAddr); |
|
261 ipi.InvalidateList(); |
|
262 } |
|
263 |
|
264 #else |
|
265 #define InvalidateTLBForPage(a) DoInvalidateTLBForPage(a) |
|
266 #define LocalInvalidateTLB() DoLocalInvalidateTLB() |
|
267 #define TotalInvalidateTLB() TotalInvalidateTLB() |
|
268 #define InvalidateTLB() DoInvalidateTLB() |
|
269 #endif |
|
270 |
|
271 |
|
272 TPte* SafePageTableFromPde(TPde aPde) |
|
273 { |
|
274 if (aPde&KPdePtePresent) |
|
275 { |
|
276 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(aPde); |
|
277 if (pi) |
|
278 { |
|
279 TInt id=pi->Offset(); // assumes page table size = page size |
|
280 return PageTable(id); |
|
281 } |
|
282 } |
|
283 return 0; |
|
284 } |
|
285 |
|
286 TPte* SafePtePtrFromLinAddr(TLinAddr aAddress, TInt aOsAsid=0) |
|
287 { |
|
288 TPde pde = PageDirectory(aOsAsid)[aAddress>>KChunkShift]; |
|
289 TPte* pt = SafePageTableFromPde(pde); |
|
290 if(pt) |
|
291 pt += (aAddress>>KPageShift)&(KChunkMask>>KPageShift); |
|
292 return pt; |
|
293 } |
|
294 |
|
295 TPte* PtePtrFromLinAddr(TLinAddr aAddress, TInt aOsAsid=0) |
|
296 { |
|
297 TPde pde = PageDirectory(aOsAsid)[aAddress>>KChunkShift]; |
|
298 SPageInfo* pi = SPageInfo::FromPhysAddr(pde); |
|
299 TInt id = (pi->Offset()<<KPtClusterShift) | ((pde>>KPageTableShift)&KPtClusterMask); |
|
300 TPte* pt = PageTable(id); |
|
301 pt += (aAddress>>KPageShift)&(KChunkMask>>KPageShift); |
|
302 return pt; |
|
303 } |
|
304 |
|
305 TInt X86Mmu::LinearToPhysical(TLinAddr aAddr, TInt aSize, TPhysAddr& aPhysicalAddress, TPhysAddr* aPhysicalPageList, TInt aOsAsid) |
|
306 { |
|
307 TPhysAddr physStart = LinearToPhysical(aAddr,aOsAsid); |
|
308 |
|
309 TInt pageShift = iPageShift; |
|
310 TUint32 page = aAddr>>pageShift<<pageShift; |
|
311 TUint32 lastPage = (aAddr+aSize-1)>>pageShift<<pageShift; |
|
312 TUint32* pageList = aPhysicalPageList; |
|
313 TUint32 nextPhys = LinearToPhysical(page,aOsAsid); |
|
314 TUint32 pageSize = 1<<pageShift; |
|
315 while(page<=lastPage) |
|
316 { |
|
317 TPhysAddr phys = LinearToPhysical(page,aOsAsid); |
|
318 if(pageList) |
|
319 *pageList++ = phys; |
|
320 if(phys!=nextPhys) |
|
321 nextPhys = KPhysAddrInvalid; |
|
322 else |
|
323 nextPhys += pageSize; |
|
324 page += pageSize; |
|
325 } |
|
326 if(nextPhys==KPhysAddrInvalid) |
|
327 { |
|
328 // Memory is discontiguous... |
|
329 aPhysicalAddress = KPhysAddrInvalid; |
|
330 return 1; |
|
331 } |
|
332 else |
|
333 { |
|
334 // Memory is contiguous... |
|
335 aPhysicalAddress = physStart; |
|
336 return KErrNone; |
|
337 } |
|
338 return KErrNone; |
|
339 } |
|
340 |
|
341 TPhysAddr X86Mmu::LinearToPhysical(TLinAddr aLinAddr, TInt aOsAsid) |
|
342 // |
|
343 // Find the physical address corresponding to a given linear address in a specified OS |
|
344 // address space. Call with system locked. |
|
345 // |
|
346 { |
|
347 __KTRACE_OPT(KMMU2,Kern::Printf("X86Mmu::LinearToPhysical(%08x,%d)",aLinAddr,aOsAsid)); |
|
348 TInt pdeIndex=aLinAddr>>KChunkShift; |
|
349 TPde pde=PageDirectory(aOsAsid)[pdeIndex]; |
|
350 TPhysAddr pa=KPhysAddrInvalid; |
|
351 if (pde & KPdePtePresent) |
|
352 { |
|
353 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pde); |
|
354 if (pi) |
|
355 { |
|
356 TInt id=pi->Offset(); // assumes page table size = page size |
|
357 TPte* pPte=PageTable(id); |
|
358 TPte pte=pPte[(aLinAddr&KChunkMask)>>KPageShift]; |
|
359 if (pte & KPdePtePresent) |
|
360 { |
|
361 pa=(pte&KPdePtePhysAddrMask)+(aLinAddr&KPageMask); |
|
362 __KTRACE_OPT(KMMU2,Kern::Printf("Mapped with page table - returning %08x",pa)); |
|
363 } |
|
364 } |
|
365 } |
|
366 return pa; |
|
367 } |
|
368 |
|
369 |
|
370 TInt X86Mmu::PreparePagesForDMA(TLinAddr /*aLinAddr*/, TInt /*aSize*/, TInt /*aOsAsid*/, TPhysAddr* /*aPhysicalPageList*/) |
|
371 { |
|
372 return KErrNotSupported; |
|
373 } |
|
374 |
|
375 TInt X86Mmu::ReleasePagesFromDMA(TPhysAddr* /*aPhysicalPageList*/, TInt /*aPageCount*/) |
|
376 { |
|
377 return KErrNotSupported; |
|
378 } |
|
379 |
|
380 static const TInt PermissionLookup[8]= |
|
381 { |
|
382 0, |
|
383 EMapAttrReadSup|EMapAttrExecSup, |
|
384 0, |
|
385 EMapAttrWriteSup|EMapAttrReadSup|EMapAttrExecSup, |
|
386 0, |
|
387 EMapAttrReadUser|EMapAttrExecUser, |
|
388 0, |
|
389 EMapAttrWriteUser|EMapAttrReadUser|EMapAttrExecUser |
|
390 }; |
|
391 |
|
392 TInt X86Mmu::PageTableId(TLinAddr aAddr, TInt aOsAsid) |
|
393 { |
|
394 TInt id=-1; |
|
395 __KTRACE_OPT(KMMU,Kern::Printf("X86Mmu::PageTableId(%08x,%d)",aAddr,aOsAsid)); |
|
396 TInt pdeIndex=aAddr>>KChunkShift; |
|
397 TPde pde=PageDirectory(aOsAsid)[pdeIndex]; |
|
398 if (pde & KPdePtePresent) |
|
399 { |
|
400 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pde); |
|
401 if (pi) |
|
402 id=pi->Offset(); // assumes page table size = page size |
|
403 } |
|
404 __KTRACE_OPT(KMMU,Kern::Printf("ID=%d",id)); |
|
405 return id; |
|
406 } |
|
407 |
|
408 // Used only during boot for recovery of RAM drive |
|
409 TInt X86Mmu::BootPageTableId(TLinAddr aAddr, TPhysAddr& aPtPhys) |
|
410 { |
|
411 TInt id=KErrNotFound; |
|
412 __KTRACE_OPT(KMMU,Kern::Printf("X86Mmu:BootPageTableId(%08x,&)",aAddr)); |
|
413 TPde* kpd=(TPde*)KPageDirectoryBase; // kernel page directory |
|
414 TInt pdeIndex=aAddr>>KChunkShift; |
|
415 TPde pde = kpd[pdeIndex]; |
|
416 if (pde & KPdePtePresent) |
|
417 { |
|
418 aPtPhys = pde & KPdePtePhysAddrMask; |
|
419 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pde); |
|
420 if (pi) |
|
421 { |
|
422 SPageInfo::TType type = pi->Type(); |
|
423 if (type == SPageInfo::EPageTable) |
|
424 id=pi->Offset(); // assumes page table size = page size |
|
425 else if (type == SPageInfo::EUnused) |
|
426 id = KErrUnknown; |
|
427 } |
|
428 } |
|
429 __KTRACE_OPT(KMMU,Kern::Printf("ID=%d",id)); |
|
430 return id; |
|
431 } |
|
432 |
|
433 TBool X86Mmu::PteIsPresent(TPte aPte) |
|
434 { |
|
435 return aPte & KPdePtePresent; |
|
436 } |
|
437 |
|
438 TPhysAddr X86Mmu::PtePhysAddr(TPte aPte, TInt /*aPteIndex*/) |
|
439 { |
|
440 return aPte & KPdePtePhysAddrMask; |
|
441 } |
|
442 |
|
443 TPhysAddr X86Mmu::PdePhysAddr(TLinAddr aAddr) |
|
444 { |
|
445 TPde* kpd = (TPde*)KPageDirectoryBase; // kernel page directory |
|
446 TPde pde = kpd[aAddr>>KChunkShift]; |
|
447 if (pde & (KPdePtePresent|KPdeLargePage) == (KPdePtePresent|KPdeLargePage)) |
|
448 return pde & KPdeLargePagePhysAddrMask; |
|
449 return KPhysAddrInvalid; |
|
450 } |
|
451 |
|
452 void X86Mmu::Init1() |
|
453 { |
|
454 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("X86Mmu::Init1")); |
|
455 |
|
456 TUint pge = TheSuperPage().iCpuId & EX86Feat_PGE; |
|
457 iPteGlobal = pge ? KPdePteGlobal : 0; |
|
458 X86_UseGlobalPTEs = pge!=0; |
|
459 |
|
460 // MmuBase data |
|
461 iPageSize=KPageSize; |
|
462 iPageMask=KPageMask; |
|
463 iPageShift=KPageShift; |
|
464 iChunkSize=KChunkSize; |
|
465 iChunkMask=KChunkMask; |
|
466 iChunkShift=KChunkShift; |
|
467 iPageTableSize=KPageTableSize; |
|
468 iPageTableMask=KPageTableMask; |
|
469 iPageTableShift=KPageTableShift; |
|
470 iPtClusterSize=KPtClusterSize; |
|
471 iPtClusterMask=KPtClusterMask; |
|
472 iPtClusterShift=KPtClusterShift; |
|
473 iPtBlockSize=KPtBlockSize; |
|
474 iPtBlockMask=KPtBlockMask; |
|
475 iPtBlockShift=KPtBlockShift; |
|
476 iPtGroupSize=KChunkSize/KPageTableSize; |
|
477 iPtGroupMask=iPtGroupSize-1; |
|
478 iPtGroupShift=iChunkShift-iPageTableShift; |
|
479 //TInt* iPtBlockCount; // dynamically allocated - Init2 |
|
480 //TInt* iPtGroupCount; // dynamically allocated - Init2 |
|
481 iPtInfo=(SPageTableInfo*)KPageTableInfoBase; |
|
482 iPageTableLinBase=KPageTableBase; |
|
483 //iRamPageAllocator; // dynamically allocated - Init2 |
|
484 //iAsyncFreeList; // dynamically allocated - Init2 |
|
485 //iPageTableAllocator; // dynamically allocated - Init2 |
|
486 //iPageTableLinearAllocator;// dynamically allocated - Init2 |
|
487 iPtInfoPtePerm=KPtInfoPtePerm|iPteGlobal; |
|
488 iPtPtePerm=KPtPtePerm|iPteGlobal; |
|
489 iPtPdePerm=KPtPdePerm; |
|
490 iUserCodeLoadPtePerm=KPdePermRWNO; |
|
491 iKernelCodePtePerm=KPdePermRONO|iPteGlobal; |
|
492 iTempAddr=KTempAddr; |
|
493 iSecondTempAddr=KSecondTempAddr; |
|
494 |
|
495 TUint pse = TheSuperPage().iCpuId & EX86Feat_PSE; |
|
496 iMapSizes = pse ? KPageSize|KChunkSize : KPageSize; |
|
497 |
|
498 iDecommitThreshold=0; // no cache consistency issues on decommit |
|
499 iRomLinearBase = ::RomHeaderAddress; |
|
500 iRomLinearEnd = KRomLinearEnd; |
|
501 iShadowPtePerm = KShadowPtePerm; |
|
502 iShadowPdePerm = KShadowPdePerm; |
|
503 |
|
504 // Mmu data |
|
505 TInt total_ram=TheSuperPage().iTotalRamSize; |
|
506 |
|
507 iNumOsAsids=1024; |
|
508 iNumGlobalPageDirs=1; |
|
509 //iOsAsidAllocator; // dynamically allocated - Init2 |
|
510 iGlobalPdSize=KPageTableSize; |
|
511 iGlobalPdShift=KPageTableShift; |
|
512 iLocalPdSize=0; |
|
513 iLocalPdShift=0; |
|
514 iAsidGroupSize=KChunkSize/KPageTableSize; |
|
515 iAsidGroupMask=iAsidGroupSize-1; |
|
516 iAsidGroupShift=iChunkShift-iGlobalPdShift; |
|
517 iAliasSize=KPageSize; |
|
518 iAliasMask=KPageMask; |
|
519 iAliasShift=KPageShift; |
|
520 iUserLocalBase=KUserLocalDataBase; |
|
521 iUserSharedBase=KUserSharedDataBase; |
|
522 iAsidInfo=(TUint32*)KAsidInfoBase; |
|
523 iPdeBase=KPageDirectoryBase; |
|
524 iPdPtePerm=KPdPtePerm|iPteGlobal; |
|
525 iPdPdePerm=KPdPdePerm; |
|
526 iRamDriveMask=0x00f00000; |
|
527 iGlobalCodePtePerm=KPdePermRORO|iPteGlobal; |
|
528 |
|
529 iMaxDllDataSize=Min(total_ram/2, 0x08000000); // phys RAM/2 up to 128Mb |
|
530 iMaxDllDataSize=(iMaxDllDataSize+iChunkMask)&~iChunkMask; // round up to chunk size |
|
531 iMaxUserCodeSize=Min(total_ram, 0x10000000); // phys RAM up to 256Mb |
|
532 iMaxUserCodeSize=(iMaxUserCodeSize+iChunkMask)&~iChunkMask; // round up to chunk size |
|
533 iUserLocalEnd=iUserSharedBase-iMaxDllDataSize; |
|
534 iUserSharedEnd=KUserSharedDataEnd-iMaxUserCodeSize; |
|
535 iDllDataBase=iUserLocalEnd; |
|
536 iUserCodeBase=iUserSharedEnd; |
|
537 __KTRACE_OPT(KMMU,Kern::Printf("ULB %08x ULE %08x USB %08x USE %08x",iUserLocalBase,iUserLocalEnd, |
|
538 iUserSharedBase,iUserSharedEnd)); |
|
539 __KTRACE_OPT(KMMU,Kern::Printf("DDB %08x UCB %08x",iDllDataBase,iUserCodeBase)); |
|
540 |
|
541 // X86Mmu data |
|
542 |
|
543 // other |
|
544 PP::MaxUserThreadStack=0x14000; // 80K - STDLIB asks for 64K for PosixServer!!!! |
|
545 PP::UserThreadStackGuard=0x2000; // 8K |
|
546 PP::MaxStackSpacePerProcess=0x200000; // 2Mb |
|
547 K::SupervisorThreadStackSize=0x1000; // 4K |
|
548 PP::SupervisorThreadStackGuard=0x1000; // 4K |
|
549 K::MachineConfig=(TMachineConfig*)KMachineConfigLinAddr; |
|
550 PP::RamDriveStartAddress=KRamDriveStartAddress; |
|
551 PP::RamDriveRange=KRamDriveMaxSize; |
|
552 PP::RamDriveMaxSize=KRamDriveMaxSize; // may be reduced later |
|
553 K::MemModelAttributes=EMemModelTypeMultiple|EMemModelAttrNonExProt|EMemModelAttrKernProt|EMemModelAttrWriteProt| |
|
554 EMemModelAttrVA|EMemModelAttrProcessProt|EMemModelAttrSameVA|EMemModelAttrSvKernProt| |
|
555 EMemModelAttrIPCKernProt|EMemModelAttrRamCodeProt; |
|
556 |
|
557 #ifdef __SMP__ |
|
558 ApTrampolinePage = KApTrampolinePageLin; |
|
559 |
|
560 TInt i; |
|
561 for (i=0; i<KMaxCpus; ++i) |
|
562 { |
|
563 TSubScheduler& ss = TheSubSchedulers[i]; |
|
564 TLinAddr a = KIPCAlias + (i<<KChunkShift); |
|
565 ss.i_AliasLinAddr = (TAny*)a; |
|
566 ss.i_AliasPdePtr = (TAny*)(KPageDirectoryBase + (a>>KChunkShift)*sizeof(TPde)); |
|
567 } |
|
568 #endif |
|
569 |
|
570 Mmu::Init1(); |
|
571 } |
|
572 |
|
573 void X86Mmu::DoInit2() |
|
574 { |
|
575 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("X86Mmu::DoInit2")); |
|
576 iTempPte=PageTable(PageTableId(iTempAddr,0))+((iTempAddr&KChunkMask)>>KPageShift); |
|
577 iSecondTempPte=PageTable(PageTableId(iSecondTempAddr,0))+((iSecondTempAddr&KChunkMask)>>KPageShift); |
|
578 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("iTempAddr=%08x, iTempPte=%08x, iSecondTempAddr=%08x, iSecondTempPte=%08x", |
|
579 iTempAddr, iTempPte, iSecondTempAddr, iSecondTempPte)); |
|
580 CreateKernelSection(KKernelSectionEnd, iAliasShift); |
|
581 CreateUserGlobalSection(KUserGlobalDataBase, KUserGlobalDataEnd); |
|
582 iUserHwChunkAllocator=THwChunkAddressAllocator::New(0, iUserGlobalSection); |
|
583 __ASSERT_ALWAYS(iUserHwChunkAllocator, Panic(ECreateUserGlobalSectionFailed)); |
|
584 Mmu::DoInit2(); |
|
585 } |
|
586 |
|
587 #ifndef __MMU_MACHINE_CODED__ |
|
588 void X86Mmu::MapRamPages(TInt aId, SPageInfo::TType aType, TAny* aPtr, TUint32 aOffset, const TPhysAddr* aPageList, TInt aNumPages, TPte aPtePerm) |
|
589 // |
|
590 // Map a list of physical RAM pages into a specified page table with specified PTE permissions. |
|
591 // Update the page information array. |
|
592 // Call this with the system locked. |
|
593 // |
|
594 { |
|
595 __KTRACE_OPT(KMMU,Kern::Printf("X86Mmu::MapRamPages() id=%d type=%d ptr=%08x off=%08x n=%d perm=%08x", |
|
596 aId, aType, aPtr, aOffset, aNumPages, aPtePerm)); |
|
597 |
|
598 SPageTableInfo& ptinfo=iPtInfo[aId]; |
|
599 ptinfo.iCount+=aNumPages; |
|
600 aOffset>>=KPageShift; |
|
601 TInt ptOffset=aOffset & KPagesInPDEMask; // entry number in page table |
|
602 TPte* pPte=PageTable(aId)+ptOffset; // address of first PTE |
|
603 while(aNumPages--) |
|
604 { |
|
605 TPhysAddr pa = *aPageList++; |
|
606 *pPte++ = pa | aPtePerm; // insert PTE |
|
607 __KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x",pPte[-1],pPte-1)); |
|
608 if (aType!=SPageInfo::EInvalid) |
|
609 { |
|
610 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pa); |
|
611 if(pi) |
|
612 { |
|
613 pi->Set(aType,aPtr,aOffset); |
|
614 __KTRACE_OPT(KMMU,Kern::Printf("I: %d %08x %08x",aType,aPtr,aOffset)); |
|
615 ++aOffset; // increment offset for next page |
|
616 } |
|
617 } |
|
618 } |
|
619 __DRAIN_WRITE_BUFFER; |
|
620 } |
|
621 |
|
622 void X86Mmu::MapPhysicalPages(TInt aId, SPageInfo::TType aType, TAny* aPtr, TUint32 aOffset, TPhysAddr aPhysAddr, TInt aNumPages, TPte aPtePerm) |
|
623 // |
|
624 // Map consecutive physical pages into a specified page table with specified PTE permissions. |
|
625 // Update the page information array if RAM pages are being mapped. |
|
626 // Call this with the system locked. |
|
627 // |
|
628 { |
|
629 __KTRACE_OPT(KMMU,Kern::Printf("X86Mmu::MapPhysicalPages() id=%d type=%d ptr=%08x off=%08x phys=%08x n=%d perm=%08x", |
|
630 aId, aType, aPtr, aOffset, aPhysAddr, aNumPages, aPtePerm)); |
|
631 SPageTableInfo& ptinfo=iPtInfo[aId]; |
|
632 ptinfo.iCount+=aNumPages; |
|
633 aOffset>>=KPageShift; |
|
634 TInt ptOffset=aOffset & KPagesInPDEMask; // entry number in page table |
|
635 TPte* pPte=(TPte*)(PageTableLinAddr(aId))+ptOffset; // address of first PTE |
|
636 SPageInfo* pi; |
|
637 if(aType==SPageInfo::EInvalid) |
|
638 pi = NULL; |
|
639 else |
|
640 pi = SPageInfo::SafeFromPhysAddr(aPhysAddr); |
|
641 while(aNumPages--) |
|
642 { |
|
643 *pPte++ = aPhysAddr|aPtePerm; // insert PTE |
|
644 aPhysAddr+=KPageSize; |
|
645 __KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x",pPte[-1],pPte-1)); |
|
646 if (pi) |
|
647 { |
|
648 pi->Set(aType,aPtr,aOffset); |
|
649 ++aOffset; // increment offset for next page |
|
650 __KTRACE_OPT(KMMU,Kern::Printf("I: %d %08x %08x",aType,aPtr,aOffset)); |
|
651 ++pi; |
|
652 } |
|
653 } |
|
654 __DRAIN_WRITE_BUFFER; |
|
655 } |
|
656 |
|
657 void X86Mmu::MapVirtual(TInt /*aId*/, TInt /*aNumPages*/) |
|
658 // |
|
659 // Used in the implementation of demand paging - not supported on x86 |
|
660 // |
|
661 { |
|
662 MM::Panic(MM::EOperationNotSupported); |
|
663 } |
|
664 |
|
665 void X86Mmu::RemapPage(TInt /*aId*/, TUint32 /*aAddr*/, TPhysAddr /*aOldAddr*/, TPhysAddr /*aNewAddr*/, TPte /*aPtePerm*/, DProcess* /*aProcess*/) |
|
666 { |
|
667 MM::Panic(MM::EOperationNotSupported); |
|
668 } |
|
669 |
|
670 void X86Mmu::RemapPageByAsid(TBitMapAllocator* /*aOsAsids*/, TLinAddr /*aLinAddr*/, TPhysAddr /*aOldAddr*/, TPhysAddr /*aNewAddr*/, TPte /*aPtePerm*/) |
|
671 { |
|
672 MM::Panic(MM::EOperationNotSupported); |
|
673 } |
|
674 |
|
675 TInt X86Mmu::UnmapPages(TInt aId, TUint32 aAddr, TInt aNumPages, TPhysAddr* aPageList, TBool aSetPagesFree, TInt& aNumPtes, TInt& aNumFree, DProcess*) |
|
676 // |
|
677 // Unmap a specified area at address aAddr in page table aId. Place physical addresses of unmapped |
|
678 // pages into aPageList, and count of unmapped pages into aNumPtes. |
|
679 // Return number of pages still mapped using this page table. |
|
680 // Call this with the system locked. |
|
681 { |
|
682 __KTRACE_OPT(KMMU,Kern::Printf("X86Mmu::UnmapPages() id=%d off=%08x n=%d pl=%08x set-free=%08x",aId,aAddr,aNumPages,aPageList,aSetPagesFree)); |
|
683 TInt ptOffset=(aAddr&KChunkMask)>>KPageShift; // entry number in page table |
|
684 TPte* pPte=PageTable(aId)+ptOffset; // address of first PTE |
|
685 TInt np=0; |
|
686 TInt nf=0; |
|
687 #ifdef __SMP__ |
|
688 TTLBIPI ipi; |
|
689 #endif |
|
690 while(aNumPages--) |
|
691 { |
|
692 TPte pte=*pPte; // get original PTE |
|
693 *pPte++=0; // clear PTE |
|
694 if (pte & KPdePtePresent) |
|
695 { |
|
696 #ifdef __SMP__ |
|
697 ipi.AddAddress(aAddr); |
|
698 #else |
|
699 InvalidateTLBForPage(aAddr); // flush any corresponding TLB entry |
|
700 #endif |
|
701 ++np; // count unmapped pages |
|
702 TPhysAddr pa=pte & KPdePtePhysAddrMask; // physical address of unmapped page |
|
703 if (aSetPagesFree) |
|
704 { |
|
705 SPageInfo* pi = SPageInfo::FromPhysAddr(pa); |
|
706 if(iRamCache->PageUnmapped(pi)) |
|
707 { |
|
708 pi->SetUnused(); // mark page as unused |
|
709 if (pi->LockCount()==0) |
|
710 { |
|
711 *aPageList++=pa; // store in page list |
|
712 ++nf; // count free pages |
|
713 } |
|
714 } |
|
715 } |
|
716 else |
|
717 *aPageList++=pa; // store in page list |
|
718 } |
|
719 aAddr+=KPageSize; |
|
720 } |
|
721 #ifdef __SMP__ |
|
722 ipi.InvalidateList(); |
|
723 #endif |
|
724 aNumPtes=np; |
|
725 aNumFree=nf; |
|
726 SPageTableInfo& ptinfo=iPtInfo[aId]; |
|
727 TInt r=(ptinfo.iCount-=np); |
|
728 __DRAIN_WRITE_BUFFER; |
|
729 __KTRACE_OPT(KMMU,Kern::Printf("Pages recovered %d Pages remaining %d NF=%d",np,r,nf)); |
|
730 return r; // return number of pages remaining in this page table |
|
731 } |
|
732 |
|
733 TInt X86Mmu::UnmapUnownedPages(TInt aId, TUint32 aAddr, TInt aNumPages, TPhysAddr* aPageList, TLinAddr* aLAPageList, TInt& aNumPtes, TInt& aNumFree, DProcess*) |
|
734 // |
|
735 // Unmap a specified area at address aAddr in page table aId. Place physical addresses of unmapped |
|
736 // pages into aPageList, and count of unmapped pages into aNumPtes. |
|
737 // Return number of pages still mapped using this page table. |
|
738 // Call this with the system locked. |
|
739 { |
|
740 __KTRACE_OPT(KMMU,Kern::Printf("X86Mmu::UnmapPages() id=%d off=%08x n=%d pl=%08x",aId,aAddr,aNumPages,aPageList)); |
|
741 TInt ptOffset=(aAddr&KChunkMask)>>KPageShift; // entry number in page table |
|
742 TPte* pPte=PageTable(aId)+ptOffset; // address of first PTE |
|
743 TInt np=0; |
|
744 TInt nf=0; |
|
745 #ifdef __SMP__ |
|
746 TTLBIPI ipi; |
|
747 #endif |
|
748 while(aNumPages--) |
|
749 { |
|
750 TPte pte=*pPte; // get original PTE |
|
751 *pPte++=0; // clear PTE |
|
752 if (pte & KPdePtePresent) |
|
753 { |
|
754 #ifdef __SMP__ |
|
755 ipi.AddAddress(aAddr); |
|
756 #else |
|
757 InvalidateTLBForPage(aAddr); // flush any corresponding TLB entry |
|
758 #endif |
|
759 ++np; // count unmapped pages |
|
760 TPhysAddr pa=pte & KPdePtePhysAddrMask; // physical address of unmapped page |
|
761 |
|
762 nf++; |
|
763 *aPageList++=pa; // store in page list |
|
764 *aLAPageList++ = aAddr; |
|
765 } |
|
766 aAddr+=KPageSize; |
|
767 } |
|
768 #ifdef __SMP__ |
|
769 ipi.InvalidateList(); |
|
770 #endif |
|
771 aNumPtes=np; |
|
772 aNumFree=nf; |
|
773 SPageTableInfo& ptinfo=iPtInfo[aId]; |
|
774 TInt r=(ptinfo.iCount-=np); |
|
775 __DRAIN_WRITE_BUFFER; |
|
776 __KTRACE_OPT(KMMU,Kern::Printf("Pages recovered %d Pages remaining %d NF=%d",np,r,nf)); |
|
777 return r; // return number of pages remaining in this page table |
|
778 } |
|
779 |
|
780 TInt X86Mmu::UnmapVirtual(TInt /*aId*/, TUint32 /*aAddr*/, TInt /*aNumPages*/, TPhysAddr* /*aPageList*/, TBool /*aSetPagesFree*/, TInt& /*aNumPtes*/, TInt& /*aNumFree*/, DProcess* /*aProcess*/) |
|
781 // |
|
782 // Used in the implementation of demand paging - not supported on x86 |
|
783 // |
|
784 { |
|
785 MM::Panic(MM::EOperationNotSupported); |
|
786 return 0; // keep compiler happy |
|
787 } |
|
788 |
|
789 TInt X86Mmu::UnmapUnownedVirtual(TInt /*aId*/, TUint32 /*aAddr*/, TInt /*aNumPages*/, TPhysAddr* /*aPageList*/, TLinAddr* /*aLALinAddr*/, TInt& /*aNumPtes*/, TInt& /*aNumFree*/, DProcess* /*aProcess*/) |
|
790 // |
|
791 // Used in the implementation of demand paging - not supported on x86 |
|
792 // |
|
793 { |
|
794 MM::Panic(MM::EOperationNotSupported); |
|
795 return 0; // keep compiler happy |
|
796 } |
|
797 |
|
798 void X86Mmu::DoAssignPageTable(TInt aId, TLinAddr aAddr, TPde aPdePerm, const TAny* aOsAsids) |
|
799 // |
|
800 // Assign an allocated page table to map a given linear address with specified permissions. |
|
801 // This should be called with the system unlocked and the MMU mutex held. |
|
802 // |
|
803 { |
|
804 __KTRACE_OPT(KMMU,Kern::Printf("X86Mmu::DoAssignPageTable %d to %08x perm %08x asid %08x",aId,aAddr,aPdePerm,aOsAsids)); |
|
805 TLinAddr ptLin=PageTableLinAddr(aId); |
|
806 TPhysAddr ptPhys=LinearToPhysical(ptLin,0); |
|
807 TInt pdeIndex=TInt(aAddr>>KChunkShift); |
|
808 TInt os_asid=(TInt)aOsAsids; |
|
809 if (TUint32(os_asid)<TUint32(iNumOsAsids)) |
|
810 { |
|
811 // single OS ASID |
|
812 TPde* pageDir=PageDirectory(os_asid); |
|
813 NKern::LockSystem(); |
|
814 pageDir[pdeIndex]=ptPhys|aPdePerm; |
|
815 NKern::UnlockSystem(); |
|
816 __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x",ptPhys|aPdePerm,pageDir+pdeIndex)); |
|
817 } |
|
818 else |
|
819 { |
|
820 // selection of OS ASIDs or all OS ASIDs |
|
821 const TBitMapAllocator* pB=(const TBitMapAllocator*)aOsAsids; |
|
822 if (os_asid==-1) |
|
823 pB=iOsAsidAllocator; // 0's in positions which exist |
|
824 TInt num_os_asids=pB->iSize-pB->iAvail; |
|
825 for (os_asid=0; num_os_asids; ++os_asid) |
|
826 { |
|
827 if (pB->NotAllocated(os_asid,1)) |
|
828 continue; // os_asid is not needed |
|
829 TPde* pageDir=PageDirectory(os_asid); |
|
830 NKern::LockSystem(); |
|
831 pageDir[pdeIndex]=ptPhys|aPdePerm; |
|
832 NKern::UnlockSystem(); |
|
833 __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x",ptPhys|aPdePerm,pageDir+pdeIndex)); |
|
834 --num_os_asids; |
|
835 } |
|
836 } |
|
837 __DRAIN_WRITE_BUFFER; |
|
838 } |
|
839 |
|
840 void X86Mmu::RemapPageTableSingle(TPhysAddr aOld, TPhysAddr aNew, TLinAddr aAddr, TInt aOsAsid) |
|
841 { |
|
842 MM::Panic(MM::EOperationNotSupported); |
|
843 } |
|
844 |
|
845 void X86Mmu::RemapPageTableGlobal(TPhysAddr aOld, TPhysAddr aNew, TLinAddr aAddr) |
|
846 { |
|
847 MM::Panic(MM::EOperationNotSupported); |
|
848 } |
|
849 |
|
850 void X86Mmu::RemapPageTableMultiple(TPhysAddr aOld, TPhysAddr aNew, TLinAddr aAddr, const TAny* aOsAsids) |
|
851 { |
|
852 MM::Panic(MM::EOperationNotSupported); |
|
853 } |
|
854 |
|
855 void X86Mmu::RemapPageTableAliases(TPhysAddr aOld, TPhysAddr aNew) |
|
856 { |
|
857 MM::Panic(MM::EOperationNotSupported); |
|
858 } |
|
859 |
|
860 void X86Mmu::DoUnassignPageTable(TLinAddr aAddr, const TAny* aOsAsids) |
|
861 // |
|
862 // Unassign a now-empty page table currently mapping the specified linear address. |
|
863 // We assume that TLB and/or cache flushing has been done when any RAM pages were unmapped. |
|
864 // This should be called with the system unlocked and the MMU mutex held. |
|
865 // |
|
866 { |
|
867 __KTRACE_OPT(KMMU,Kern::Printf("X86Mmu::DoUnassignPageTable at %08x a=%08x",aAddr,aOsAsids)); |
|
868 TInt pdeIndex=TInt(aAddr>>KChunkShift); |
|
869 TInt os_asid=(TInt)aOsAsids; |
|
870 TUint pde=0; |
|
871 |
|
872 SDblQue checkedList; |
|
873 SDblQueLink* next; |
|
874 |
|
875 if (TUint32(os_asid)<TUint32(iNumOsAsids)) |
|
876 { |
|
877 // single OS ASID |
|
878 TPde* pageDir=PageDirectory(os_asid); |
|
879 NKern::LockSystem(); |
|
880 pde = pageDir[pdeIndex]; |
|
881 pageDir[pdeIndex]=0; |
|
882 __KTRACE_OPT(KMMU,Kern::Printf("Clearing PDE at %08x",pageDir+pdeIndex)); |
|
883 |
|
884 // remove any aliases of the page table... |
|
885 TUint ptId = pde>>KPageTableShift; |
|
886 while(!iAliasList.IsEmpty()) |
|
887 { |
|
888 next = iAliasList.First()->Deque(); |
|
889 checkedList.Add(next); |
|
890 DMemModelThread* thread = _LOFF(next, DMemModelThread, iAliasLink); |
|
891 if(thread->iAliasOsAsid==os_asid && (thread->iAliasPde>>KPageTableShift)==ptId) |
|
892 { |
|
893 // the page table is being aliased by the thread, so remove it... |
|
894 thread->iAliasPde = 0; |
|
895 } |
|
896 NKern::FlashSystem(); |
|
897 } |
|
898 } |
|
899 else |
|
900 { |
|
901 // selection of OS ASIDs or all OS ASIDs |
|
902 const TBitMapAllocator* pB=(const TBitMapAllocator*)aOsAsids; |
|
903 if (os_asid==-1) |
|
904 pB=iOsAsidAllocator; // 0's in positions which exist |
|
905 TInt num_os_asids=pB->iSize-pB->iAvail; |
|
906 for (os_asid=0; num_os_asids; ++os_asid) |
|
907 { |
|
908 if (pB->NotAllocated(os_asid,1)) |
|
909 continue; // os_asid is not needed |
|
910 TPde* pageDir=PageDirectory(os_asid); |
|
911 NKern::LockSystem(); |
|
912 pde = pageDir[pdeIndex]; |
|
913 pageDir[pdeIndex]=0; |
|
914 NKern::UnlockSystem(); |
|
915 __KTRACE_OPT(KMMU,Kern::Printf("Clearing PDE at %08x",pageDir+pdeIndex)); |
|
916 --num_os_asids; |
|
917 } |
|
918 |
|
919 // remove any aliases of the page table... |
|
920 TUint ptId = pde>>KPageTableShift; |
|
921 NKern::LockSystem(); |
|
922 while(!iAliasList.IsEmpty()) |
|
923 { |
|
924 next = iAliasList.First()->Deque(); |
|
925 checkedList.Add(next); |
|
926 DMemModelThread* thread = _LOFF(next, DMemModelThread, iAliasLink); |
|
927 if((thread->iAliasPde>>KPageTableShift)==ptId && !pB->NotAllocated(thread->iAliasOsAsid,1)) |
|
928 { |
|
929 // the page table is being aliased by the thread, so remove it... |
|
930 thread->iAliasPde = 0; |
|
931 } |
|
932 NKern::FlashSystem(); |
|
933 } |
|
934 } |
|
935 |
|
936 // copy checkedList back to iAliasList |
|
937 iAliasList.MoveFrom(&checkedList); |
|
938 |
|
939 NKern::UnlockSystem(); |
|
940 |
|
941 __DRAIN_WRITE_BUFFER; // because page tables have been updated |
|
942 } |
|
943 #endif |
|
944 |
|
945 // Initialise page table at physical address aXptPhys to be used as page table aXptId |
|
946 // to expand the virtual address range used for mapping page tables. Map the page table |
|
947 // at aPhysAddr as page table aId using the expanded range. |
|
948 // Assign aXptPhys to kernel's Page Directory. |
|
949 // Called with system unlocked and MMU mutex held. |
|
950 void X86Mmu::BootstrapPageTable(TInt aXptId, TPhysAddr aXptPhys, TInt aId, TPhysAddr aPhysAddr) |
|
951 { |
|
952 __KTRACE_OPT(KMMU,Kern::Printf("X86Mmu::BootstrapPageTable xptid=%04x, xptphys=%08x, id=%04x, phys=%08x", |
|
953 aXptId, aXptPhys, aId, aPhysAddr)); |
|
954 |
|
955 // put in a temporary mapping for aXptPhys |
|
956 *iTempPte = aXptPhys | KPtPtePerm | iPteGlobal; |
|
957 __DRAIN_WRITE_BUFFER; |
|
958 |
|
959 // clear XPT |
|
960 TPte* xpt=(TPte*)iTempAddr; |
|
961 memclr(xpt, KPageSize); |
|
962 |
|
963 // map XPT |
|
964 xpt[aXptId & KPagesInPDEMask] = aXptPhys | KPtPtePerm | iPteGlobal; |
|
965 |
|
966 // map other page table |
|
967 xpt[aId & KPagesInPDEMask] = aPhysAddr | KPtPtePerm | iPteGlobal; |
|
968 |
|
969 // remove temporary mapping |
|
970 iTempPte=0; |
|
971 __DRAIN_WRITE_BUFFER; |
|
972 InvalidateTLBForPage(iTempAddr); |
|
973 |
|
974 // initialise PtInfo... |
|
975 TLinAddr xptAddr = PageTableLinAddr(aXptId); |
|
976 iPtInfo[aXptId].SetGlobal(xptAddr>>KChunkShift); |
|
977 |
|
978 // map xpt... |
|
979 TInt pdeIndex=TInt(xptAddr>>KChunkShift); |
|
980 TPde* pageDir=PageDirectory(0); |
|
981 NKern::LockSystem(); |
|
982 pageDir[pdeIndex]=aXptPhys|KPtPdePerm; |
|
983 __DRAIN_WRITE_BUFFER; |
|
984 NKern::UnlockSystem(); |
|
985 } |
|
986 |
|
987 void X86Mmu::FixupXPageTable(TInt aId, TLinAddr aTempMap, TPhysAddr aOld, TPhysAddr aNew) |
|
988 { |
|
989 MM::Panic(MM::EOperationNotSupported); |
|
990 } |
|
991 |
|
992 TInt X86Mmu::NewPageDirectory(TInt aOsAsid, TBool aSeparateGlobal, TPhysAddr& aPhysAddr, TInt& aNumPages) |
|
993 { |
|
994 __KTRACE_OPT(KMMU,Kern::Printf("X86Mmu::NewPageDirectory(%d,%d)",aOsAsid,aSeparateGlobal)); |
|
995 TInt r=AllocRamPages(&aPhysAddr,1, EPageFixed); |
|
996 if (r!=KErrNone) |
|
997 return r; |
|
998 #ifdef BTRACE_KERNEL_MEMORY |
|
999 BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscAlloc, 1<<KPageShift); |
|
1000 Epoc::KernelMiscPages += 1; |
|
1001 #endif |
|
1002 SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr); |
|
1003 NKern::LockSystem(); |
|
1004 pi->SetPageDir(aOsAsid,0); |
|
1005 NKern::UnlockSystem(); |
|
1006 aNumPages=1; |
|
1007 return KErrNone; |
|
1008 } |
|
1009 |
|
1010 inline void CopyPdes(TPde* aDest, const TPde* aSrc, TLinAddr aBase, TLinAddr aEnd) |
|
1011 { |
|
1012 memcpy(aDest+(aBase>>KChunkShift), aSrc+(aBase>>KChunkShift), ((aEnd-aBase)>>KChunkShift)*sizeof(TPde)); |
|
1013 } |
|
1014 |
|
1015 inline void ZeroPdes(TPde* aDest, TLinAddr aBase, TLinAddr aEnd) |
|
1016 { |
|
1017 memclr(aDest+(aBase>>KChunkShift), ((aEnd-aBase)>>KChunkShift)*sizeof(TPde)); |
|
1018 } |
|
1019 |
|
1020 void X86Mmu::InitPageDirectory(TInt aOsAsid, TBool) |
|
1021 { |
|
1022 __KTRACE_OPT(KMMU,Kern::Printf("X86Mmu::InitPageDirectory(%d)",aOsAsid)); |
|
1023 TPde* newpd=PageDirectory(aOsAsid); // new page directory |
|
1024 const TPde* kpd=(const TPde*)KPageDirectoryBase; // kernel page directory |
|
1025 ZeroPdes(newpd, 0x00000000, KUserSharedDataEnd); // clear user mapping area |
|
1026 ZeroPdes(newpd, KRamDriveStartAddress, KRamDriveEndAddress); // don't copy RAM drive |
|
1027 CopyPdes(newpd, kpd, KRomLinearBase, KUserGlobalDataEnd); // copy ROM + user global |
|
1028 CopyPdes(newpd, kpd, KRamDriveEndAddress, 0x00000000); // copy kernel mappings |
|
1029 __DRAIN_WRITE_BUFFER; |
|
1030 } |
|
1031 |
|
1032 void X86Mmu::ClearPageTable(TInt aId, TInt aFirstIndex) |
|
1033 { |
|
1034 __KTRACE_OPT(KMMU,Kern::Printf("X86Mmu:ClearPageTable(%d,%d)",aId,aFirstIndex)); |
|
1035 TPte* pte=PageTable(aId); |
|
1036 memclr(pte+aFirstIndex, KPageSize-aFirstIndex*sizeof(TPte)); |
|
1037 __DRAIN_WRITE_BUFFER; |
|
1038 } |
|
1039 |
|
1040 void X86Mmu::ApplyTopLevelPermissions(TLinAddr aAddr, TInt aOsAsid, TInt aNumPdes, TPde aPdePerm) |
|
1041 { |
|
1042 __KTRACE_OPT(KMMU,Kern::Printf("X86Mmu::ApplyTopLevelPermissions %04x:%08x->%08x count %d", |
|
1043 aOsAsid, aAddr, aPdePerm, aNumPdes)); |
|
1044 TInt ix=aAddr>>KChunkShift; |
|
1045 TPde* pPde=PageDirectory(aOsAsid)+ix; |
|
1046 TPde* pPdeEnd=pPde+aNumPdes; |
|
1047 NKern::LockSystem(); |
|
1048 for (; pPde<pPdeEnd; ++pPde) |
|
1049 { |
|
1050 TPde pde=*pPde; |
|
1051 if (pde) |
|
1052 *pPde = (pde&KPdePtePhysAddrMask)|aPdePerm; |
|
1053 } |
|
1054 NKern::UnlockSystem(); |
|
1055 (aAddr>=KUserSharedDataEnd) ? InvalidateTLB() : LocalInvalidateTLB(); |
|
1056 __DRAIN_WRITE_BUFFER; |
|
1057 } |
|
1058 |
|
1059 void X86Mmu::ApplyPagePermissions(TInt aId, TInt aPageOffset, TInt aNumPages, TPte aPtePerm) |
|
1060 { |
|
1061 __KTRACE_OPT(KMMU,Kern::Printf("X86Mmu::ApplyPagePermissions %04x:%03x+%03x perm %08x", |
|
1062 aId, aPageOffset, aNumPages, aPtePerm)); |
|
1063 TPte* pPte=PageTable(aId)+aPageOffset; |
|
1064 TPde* pPteEnd=pPte+aNumPages; |
|
1065 TPte g=0; |
|
1066 NKern::LockSystem(); |
|
1067 for (; pPte<pPteEnd; ++pPte) |
|
1068 { |
|
1069 TPte pte=*pPte; |
|
1070 g |= pte; |
|
1071 if (pte) |
|
1072 *pPte = (pte&KPdePtePhysAddrMask)|aPtePerm; |
|
1073 } |
|
1074 NKern::UnlockSystem(); |
|
1075 (g & KPdePteGlobal) ? InvalidateTLB() : LocalInvalidateTLB(); |
|
1076 __DRAIN_WRITE_BUFFER; |
|
1077 } |
|
1078 |
|
1079 |
|
1080 // Set up a page table (specified by aId) to map a 4Mb section of ROM containing aRomAddr |
|
1081 // using ROM at aOrigPhys. |
|
1082 void X86Mmu::InitShadowPageTable(TInt aId, TLinAddr aRomAddr, TPhysAddr aOrigPhys) |
|
1083 { |
|
1084 (void)aId, (void)aRomAddr, (void)aOrigPhys; |
|
1085 FAULT(); // Never used |
|
1086 /* |
|
1087 __KTRACE_OPT(KMMU, Kern::Printf("X86Mmu:InitShadowPageTable id=%04x aRomAddr=%08x aOrigPhys=%08x", |
|
1088 aId, aRomAddr, aOrigPhys)); |
|
1089 TPte* ppte = PageTable(aId); |
|
1090 TPte* ppte_End = ppte + KChunkSize/KPageSize; |
|
1091 TPhysAddr phys = aOrigPhys - (aRomAddr & KChunkMask); |
|
1092 for (; ppte<ppte_End; ++ppte, phys+=KPageSize) |
|
1093 *ppte = phys | KRomPtePerm; |
|
1094 __DRAIN_WRITE_BUFFER; |
|
1095 */ |
|
1096 } |
|
1097 |
|
1098 // Copy the contents of ROM at aRomAddr to a shadow page at physical address aShadowPhys |
|
1099 void X86Mmu::InitShadowPage(TPhysAddr aShadowPhys, TLinAddr aRomAddr) |
|
1100 { |
|
1101 __KTRACE_OPT(KMMU, Kern::Printf("X86Mmu:InitShadowPage aShadowPhys=%08x aRomAddr=%08x", |
|
1102 aShadowPhys, aRomAddr)); |
|
1103 |
|
1104 // put in a temporary mapping for aShadowPhys |
|
1105 // make it noncacheable |
|
1106 *iTempPte = aShadowPhys | KPtPtePerm | iPteGlobal; |
|
1107 __DRAIN_WRITE_BUFFER; |
|
1108 |
|
1109 // copy contents of ROM |
|
1110 wordmove( (TAny*)iTempAddr, (const TAny*)aRomAddr, KPageSize ); |
|
1111 __DRAIN_WRITE_BUFFER; // make sure contents are written to memory |
|
1112 |
|
1113 // remove temporary mapping |
|
1114 *iTempPte=0; |
|
1115 __DRAIN_WRITE_BUFFER; |
|
1116 InvalidateTLBForPage(iTempAddr); |
|
1117 } |
|
1118 |
|
1119 // Assign a shadow page table to replace a ROM section mapping |
|
1120 // Enter and return with system locked |
|
1121 void X86Mmu::AssignShadowPageTable(TInt aId, TLinAddr aRomAddr) |
|
1122 { |
|
1123 __KTRACE_OPT(KMMU, Kern::Printf("X86Mmu:AssignShadowPageTable aId=%04x aRomAddr=%08x", |
|
1124 aId, aRomAddr)); |
|
1125 TLinAddr ptLin=PageTableLinAddr(aId); |
|
1126 TPhysAddr ptPhys=LinearToPhysical(ptLin, 0); |
|
1127 TPde* ppde = ::InitPageDirectory + (aRomAddr>>KChunkShift); |
|
1128 TPde newpde = ptPhys | KShadowPdePerm; |
|
1129 __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x", newpde, ppde)); |
|
1130 #ifdef __SMP__ |
|
1131 TTLBIPI ipi; |
|
1132 NKern::Lock(); // stop other processors passing this point |
|
1133 ShadowSpinLock.LockOnly(); |
|
1134 ipi.QueueAllOther(&TTLBIPI::WaitAndInvalidateIsr); |
|
1135 ipi.WaitEntry(); // wait for other processors to stop in the ISR |
|
1136 #endif |
|
1137 TInt irq=NKern::DisableAllInterrupts(); |
|
1138 *ppde = newpde; // map in the page table |
|
1139 __DRAIN_WRITE_BUFFER; // make sure new PDE written to main memory |
|
1140 DoInvalidateTLB(); // completely flush TLB |
|
1141 NKern::RestoreInterrupts(irq); |
|
1142 #ifdef __SMP__ |
|
1143 ipi.iFlag = 1; // release other processors so they can flush their TLBs |
|
1144 ipi.WaitCompletion(); // wait for other processors to flush their TLBs |
|
1145 ShadowSpinLock.UnlockOnly(); |
|
1146 NKern::Unlock(); |
|
1147 #endif |
|
1148 } |
|
1149 |
|
1150 void X86Mmu::DoUnmapShadowPage(TInt aId, TLinAddr aRomAddr, TPhysAddr aOrigPhys) |
|
1151 { |
|
1152 __KTRACE_OPT(KMMU,Kern::Printf("X86Mmu:DoUnmapShadowPage, id=%04x lin=%08x origphys=%08x", aId, aRomAddr, aOrigPhys)); |
|
1153 TPte* ppte = PageTable(aId) + ((aRomAddr & KChunkMask)>>KPageShift); |
|
1154 TPte newpte = aOrigPhys | KRomPtePerm; |
|
1155 __KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x", newpte, ppte)); |
|
1156 #ifdef __SMP__ |
|
1157 TTLBIPI ipi; |
|
1158 ipi.AddAddress(aRomAddr); |
|
1159 NKern::Lock(); // stop other processors passing this point |
|
1160 ShadowSpinLock.LockOnly(); |
|
1161 ipi.QueueAllOther(&TTLBIPI::WaitAndInvalidateIsr); |
|
1162 ipi.WaitEntry(); // wait for other processors to stop |
|
1163 #endif |
|
1164 TInt irq=NKern::DisableAllInterrupts(); |
|
1165 *ppte = newpte; |
|
1166 __DRAIN_WRITE_BUFFER; |
|
1167 DoInvalidateTLBForPage(aRomAddr); |
|
1168 NKern::RestoreInterrupts(irq); |
|
1169 #ifdef __SMP__ |
|
1170 ipi.iFlag = 1; // release other processors so they can flush their TLBs |
|
1171 ipi.WaitCompletion(); // wait for other processors to flush their TLBs |
|
1172 ShadowSpinLock.UnlockOnly(); |
|
1173 NKern::Unlock(); |
|
1174 #endif |
|
1175 } |
|
1176 |
|
1177 TInt X86Mmu::UnassignShadowPageTable(TLinAddr /*aRomAddr*/, TPhysAddr /*aOrigPhys*/) |
|
1178 { |
|
1179 // not used since we use page mappings for the ROM |
|
1180 return KErrGeneral; |
|
1181 } |
|
1182 |
|
1183 TInt X86Mmu::CopyToShadowMemory(TLinAddr aDest, TLinAddr aSrc, TUint32 aLength) |
|
1184 { |
|
1185 __KTRACE_OPT(KMMU, Kern::Printf("X86Mmu:CopyToShadowMemory aDest=%08x aSrc=%08x aLength=%08x", aDest, aSrc, aLength)); |
|
1186 |
|
1187 // Check that destination is ROM |
|
1188 if (aDest<iRomLinearBase || (aDest+aLength) > iRomLinearEnd) |
|
1189 { |
|
1190 __KTRACE_OPT(KMMU,Kern::Printf("X86Mmu:CopyToShadowMemory: Destination not entirely in ROM")); |
|
1191 return KErrArgument; |
|
1192 } |
|
1193 |
|
1194 // do operation with RamAlloc mutex held (to prevent shadow pages from being released from under us) |
|
1195 Kern::MutexWait(*RamAllocatorMutex); |
|
1196 |
|
1197 TInt r = KErrNone; |
|
1198 while (aLength) |
|
1199 { |
|
1200 // Calculate memory size to copy in this loop. A single page region will be copied per loop |
|
1201 TInt copySize = Min(aLength, iPageSize - (aDest&iPageMask)); |
|
1202 |
|
1203 // Get physical address |
|
1204 TPhysAddr physAddr = LinearToPhysical(aDest&~iPageMask, 0); |
|
1205 if (KPhysAddrInvalid==physAddr) |
|
1206 { |
|
1207 r = KErrArgument; |
|
1208 break; |
|
1209 } |
|
1210 |
|
1211 //check whether it is shadowed rom |
|
1212 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(physAddr); |
|
1213 if (pi==0 || pi->Type()!=SPageInfo::EShadow) |
|
1214 { |
|
1215 __KTRACE_OPT(KMMU,Kern::Printf("X86Mmu:CopyToShadowMemory: No shadow page at this address")); |
|
1216 r = KErrArgument; |
|
1217 break; |
|
1218 } |
|
1219 |
|
1220 //Temporarily map into writable memory and copy data. RamAllocator DMutex is required |
|
1221 TLinAddr tempAddr = MapTemp (physAddr, aDest&~iPageMask); |
|
1222 __KTRACE_OPT(KMMU, Kern::Printf("X86Mmu:CopyToShadowMemory Copy aDest=%08x aSrc=%08x aSize=%08x", tempAddr+(aDest&iPageMask), aSrc, copySize)); |
|
1223 memcpy ((TAny*)(tempAddr+(aDest&iPageMask)), (const TAny*)aSrc, copySize); //Kernel-to-Kernel copy is presumed |
|
1224 UnmapTemp(); |
|
1225 |
|
1226 //Update variables for the next loop/page |
|
1227 aDest+=copySize; |
|
1228 aSrc+=copySize; |
|
1229 aLength-=copySize; |
|
1230 } |
|
1231 |
|
1232 Kern::MutexSignal(*RamAllocatorMutex); |
|
1233 return r; |
|
1234 } |
|
1235 |
|
1236 void X86Mmu::DoFreezeShadowPage(TInt aId, TLinAddr aRomAddr) |
|
1237 { |
|
1238 __KTRACE_OPT(KMMU, Kern::Printf("X86Mmu:DoFreezeShadowPage aId=%04x aRomAddr=%08x", |
|
1239 aId, aRomAddr)); |
|
1240 TPte* ppte = PageTable(aId) + ((aRomAddr & KChunkMask)>>KPageShift); |
|
1241 TPte newpte = (*ppte & KPdePtePhysAddrMask) | KRomPtePerm; |
|
1242 __KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x", newpte, ppte)); |
|
1243 *ppte = newpte; |
|
1244 __DRAIN_WRITE_BUFFER; |
|
1245 InvalidateTLBForPage(aRomAddr); |
|
1246 } |
|
1247 |
|
1248 void X86Mmu::FlushShadow(TLinAddr aRomAddr) |
|
1249 { |
|
1250 #ifdef __SMP__ |
|
1251 TTLBIPI ipi; |
|
1252 ipi.AddAddress(aRomAddr); |
|
1253 NKern::Lock(); // stop other processors passing this point |
|
1254 ShadowSpinLock.LockOnly(); |
|
1255 ipi.QueueAllOther(&TTLBIPI::WaitAndInvalidateIsr); |
|
1256 ipi.WaitEntry(); // wait for other processors to stop |
|
1257 DoInvalidateTLBForPage(aRomAddr); |
|
1258 ipi.iFlag = 1; // release other processors so they can flush their TLBs |
|
1259 ipi.WaitCompletion(); // wait for other processors to flush their TLBs |
|
1260 ShadowSpinLock.UnlockOnly(); |
|
1261 NKern::Unlock(); |
|
1262 #else |
|
1263 InvalidateTLBForPage(aRomAddr); // remove all TLB references to original ROM page |
|
1264 #endif |
|
1265 } |
|
1266 |
|
1267 void X86Mmu::Pagify(TInt aId, TLinAddr aLinAddr) |
|
1268 { |
|
1269 // Nothing to do on x86 |
|
1270 } |
|
1271 |
|
1272 void X86Mmu::ClearRamDrive(TLinAddr aStart) |
|
1273 { |
|
1274 // clear the page directory entries corresponding to the RAM drive |
|
1275 TPde* kpd=(TPde*)KPageDirectoryBase; // kernel page directory |
|
1276 ZeroPdes(kpd, aStart, KRamDriveEndAddress); |
|
1277 __DRAIN_WRITE_BUFFER; |
|
1278 } |
|
1279 |
|
1280 // Generic cache/TLB flush function. |
|
1281 // Which things are flushed is determined by aMask. |
|
1282 void X86Mmu::GenericFlush(TUint32 aMask) |
|
1283 { |
|
1284 __KTRACE_OPT(KMMU,Kern::Printf("GenericFlush %x",aMask)); |
|
1285 if (aMask&(EFlushDPermChg|EFlushIPermChg)) |
|
1286 InvalidateTLB(); |
|
1287 } |
|
1288 |
|
1289 TPde X86Mmu::PdePermissions(TChunkType aChunkType, TBool aRO) |
|
1290 { |
|
1291 if (aChunkType==EUserData && aRO) |
|
1292 return KPdePtePresent|KPdePteUser; |
|
1293 return ChunkPdePermissions[aChunkType]; |
|
1294 } |
|
1295 |
|
1296 TPte X86Mmu::PtePermissions(TChunkType aChunkType) |
|
1297 { |
|
1298 TPte pte=ChunkPtePermissions[aChunkType]; |
|
1299 return (pte&~KPdePteGlobal)|(pte&iPteGlobal); |
|
1300 } |
|
1301 |
|
1302 const TUint FBLK=(EMapAttrFullyBlocking>>12); |
|
1303 const TUint BFNC=(EMapAttrBufferedNC>>12); |
|
1304 const TUint BUFC=(EMapAttrBufferedC>>12); |
|
1305 const TUint L1UN=(EMapAttrL1Uncached>>12); |
|
1306 const TUint WTRA=(EMapAttrCachedWTRA>>12); |
|
1307 const TUint WTWA=(EMapAttrCachedWTWA>>12); |
|
1308 const TUint WBRA=(EMapAttrCachedWBRA>>12); |
|
1309 const TUint WBWA=(EMapAttrCachedWBWA>>12); |
|
1310 const TUint AWTR=(EMapAttrAltCacheWTRA>>12); |
|
1311 const TUint AWTW=(EMapAttrAltCacheWTWA>>12); |
|
1312 const TUint AWBR=(EMapAttrAltCacheWBRA>>12); |
|
1313 const TUint AWBW=(EMapAttrAltCacheWBWA>>12); |
|
1314 |
|
1315 const TUint16 UNS=0xffffu; // Unsupported attribute |
|
1316 const TUint16 SPE=0xfffeu; // Special processing required |
|
1317 |
|
1318 static const TUint16 CacheBuffAttributes[16]= |
|
1319 {0x10,0x10,0x10,0x10,0x08,0x08,0x00,0x00, UNS, UNS, UNS, UNS, UNS, UNS, UNS,0x00}; |
|
1320 static const TUint8 CacheBuffActual[16]= |
|
1321 {FBLK,FBLK,FBLK,FBLK,WTRA,WTRA,WBWA,WBWA,FBLK,FBLK,FBLK,FBLK,FBLK,FBLK,FBLK,WBWA}; |
|
1322 |
|
1323 static const TUint8 ActualReadPrivilegeLevel[4]={1,1,4,4}; // RONO,RWNO,RORO,RWRW |
|
1324 static const TUint8 ActualWritePrivilegeLevel[4]={0,1,0,4}; // RONO,RWNO,RORO,RWRW |
|
1325 |
|
1326 TInt X86Mmu::PdePtePermissions(TUint& aMapAttr, TPde& aPde, TPte& aPte) |
|
1327 { |
|
1328 __KTRACE_OPT(KMMU,Kern::Printf(">X86Mmu::PdePtePermissions, mapattr=%08x",aMapAttr)); |
|
1329 TUint read=aMapAttr & EMapAttrReadMask; |
|
1330 TUint write=(aMapAttr & EMapAttrWriteMask)>>4; |
|
1331 TUint exec=(aMapAttr & EMapAttrExecMask)>>8; |
|
1332 TUint cache=(aMapAttr & EMapAttrL1CacheMask)>>12; |
|
1333 TPte pte; |
|
1334 // ignore L2 cache attributes for now - downgrade to L2 uncached |
|
1335 |
|
1336 // if execute access is greater than read, adjust read (since there are no separate execute permissions on X86) |
|
1337 if (exec>read) |
|
1338 read=exec; |
|
1339 pte=0; |
|
1340 if (write==0) |
|
1341 { |
|
1342 // read-only |
|
1343 if (read>=4) |
|
1344 pte=KPdePermRORO; // user and supervisor read-only |
|
1345 else |
|
1346 pte=KPdePermRONO; // supervisor r/o user no access |
|
1347 } |
|
1348 else if (write<4) |
|
1349 { |
|
1350 // only supervisor can write |
|
1351 if (read>=4) |
|
1352 pte=KPdePermRWRW; // full access since no RWRO |
|
1353 else |
|
1354 pte=KPdePermRWNO; // sup rw user no access |
|
1355 } |
|
1356 else |
|
1357 pte=KPdePermRWRW; // sup rw user rw |
|
1358 read=ActualReadPrivilegeLevel[pte>>1]; |
|
1359 write=ActualWritePrivilegeLevel[pte>>1]; |
|
1360 TUint cbatt=CacheBuffAttributes[cache]; |
|
1361 TInt r=KErrNone; |
|
1362 if (cbatt==UNS) |
|
1363 r=KErrNotSupported; |
|
1364 if (r==KErrNone) |
|
1365 { |
|
1366 cache=CacheBuffActual[cache]; |
|
1367 aPde=KPdePtePresent|KPdePteWrite|KPdePteUser; |
|
1368 aPte=pte|cbatt|iPteGlobal; // HW chunks can always be global |
|
1369 aMapAttr=read|(write<<4)|(read<<8)|(cache<<12); |
|
1370 } |
|
1371 __KTRACE_OPT(KMMU,Kern::Printf("<X86Mmu::PdePtePermissions, r=%d, mapattr=%08x, pde=%08x, pte=%08x", |
|
1372 r,aMapAttr,aPde,aPte)); |
|
1373 return r; |
|
1374 } |
|
1375 |
|
1376 THwChunkAddressAllocator* X86Mmu::MappingRegion(TUint aMapAttr) |
|
1377 { |
|
1378 TUint read=aMapAttr & EMapAttrReadMask; |
|
1379 TUint write=(aMapAttr & EMapAttrWriteMask)>>4; |
|
1380 TUint exec=(aMapAttr & EMapAttrExecMask)>>8; |
|
1381 if (read>=4 || write>=4 || exec>=4) |
|
1382 return iUserHwChunkAllocator; // if any access in user mode, must put it in user global section |
|
1383 return iHwChunkAllocator; |
|
1384 } |
|
1385 |
|
1386 void X86Mmu::Map(TLinAddr aLinAddr, TPhysAddr aPhysAddr, TInt aSize, TPde aPdePerm, TPte aPtePerm, TInt aMapShift) |
|
1387 // |
|
1388 // Map a region of physical addresses aPhysAddr to aPhysAddr+aSize-1 to virtual address aLinAddr. |
|
1389 // Use permissions specified by aPdePerm and aPtePerm. Use mapping sizes up to and including (1<<aMapShift). |
|
1390 // Assume any page tables required are already assigned. |
|
1391 // aLinAddr, aPhysAddr, aSize must be page-aligned. |
|
1392 // |
|
1393 { |
|
1394 __KTRACE_OPT(KMMU, Kern::Printf("X86Mmu::Map lin=%08x phys=%08x size=%08x", aLinAddr, aPhysAddr, aSize)); |
|
1395 __KTRACE_OPT(KMMU, Kern::Printf("pde=%08x pte=%08x mapshift=%d", aPdePerm, aPtePerm, aMapShift)); |
|
1396 TPde lp_pde=aPtePerm|KPdeLargePage; |
|
1397 TLinAddr la=aLinAddr; |
|
1398 TPhysAddr pa=aPhysAddr; |
|
1399 TInt remain=aSize; |
|
1400 while (remain) |
|
1401 { |
|
1402 if (aMapShift>=KChunkShift && (la & KChunkMask)==0 && remain>=KChunkSize) |
|
1403 { |
|
1404 // use large pages |
|
1405 TInt npdes=remain>>KChunkShift; |
|
1406 const TBitMapAllocator& b=*iOsAsidAllocator; |
|
1407 TInt num_os_asids=b.iSize-b.iAvail; |
|
1408 TInt os_asid=0; |
|
1409 for (; num_os_asids; ++os_asid) |
|
1410 { |
|
1411 if (b.NotAllocated(os_asid,1)) |
|
1412 continue; // os_asid is not needed |
|
1413 TPde* p_pde=PageDirectory(os_asid)+(la>>KChunkShift); |
|
1414 TPde* p_pde_E=p_pde+npdes; |
|
1415 TPde pde=pa|lp_pde; |
|
1416 NKern::LockSystem(); |
|
1417 for (; p_pde < p_pde_E; pde+=KChunkSize) |
|
1418 { |
|
1419 __ASSERT_DEBUG(*p_pde==0, MM::Panic(MM::EPdeAlreadyInUse)); |
|
1420 __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x", pde, p_pde)); |
|
1421 *p_pde++=pde; |
|
1422 } |
|
1423 NKern::UnlockSystem(); |
|
1424 --num_os_asids; |
|
1425 } |
|
1426 npdes<<=KChunkShift; |
|
1427 la+=npdes, pa+=npdes, remain-=npdes; |
|
1428 continue; |
|
1429 } |
|
1430 // use normal pages |
|
1431 TInt block_size = Min(remain, KChunkSize-(la&KChunkMask)); |
|
1432 TInt id=PageTableId(la, 0); |
|
1433 __ASSERT_DEBUG(id>=0, MM::Panic(MM::EMmuMapNoPageTable)); |
|
1434 TPte* p_pte=PageTable(id)+((la&KChunkMask)>>KPageShift); |
|
1435 TPte* p_pte_E = p_pte + (block_size>>KPageShift); |
|
1436 TPte pte=pa|aPtePerm; |
|
1437 SPageTableInfo& ptinfo=iPtInfo[id]; |
|
1438 NKern::LockSystem(); |
|
1439 for (; p_pte < p_pte_E; pte+=KPageSize) |
|
1440 { |
|
1441 __ASSERT_DEBUG(*p_pte==0, MM::Panic(MM::EPteAlreadyInUse)); |
|
1442 __KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x", pte, p_pte)); |
|
1443 *p_pte++=pte; |
|
1444 ++ptinfo.iCount; |
|
1445 NKern::FlashSystem(); |
|
1446 } |
|
1447 NKern::UnlockSystem(); |
|
1448 la+=block_size, pa+=block_size, remain-=block_size; |
|
1449 } |
|
1450 } |
|
1451 |
|
1452 void X86Mmu::Unmap(TLinAddr aLinAddr, TInt aSize) |
|
1453 // |
|
1454 // Remove all mappings in the specified range of addresses. |
|
1455 // Don't free page tables. |
|
1456 // aLinAddr, aSize must be page-aligned. |
|
1457 // |
|
1458 { |
|
1459 __KTRACE_OPT(KMMU, Kern::Printf("X86Mmu::Unmap lin=%08x size=%08x", aLinAddr, aSize)); |
|
1460 #ifdef __SMP__ |
|
1461 TTLBIPI ipi; |
|
1462 #endif |
|
1463 TLinAddr a=aLinAddr; |
|
1464 TLinAddr end=a+aSize; |
|
1465 __KTRACE_OPT(KMMU,Kern::Printf("a=%08x end=%08x",a,end)); |
|
1466 NKern::LockSystem(); |
|
1467 while(a!=end) |
|
1468 { |
|
1469 TInt pdeIndex=a>>KChunkShift; |
|
1470 TLinAddr next=(pdeIndex<<KChunkShift)+KChunkSize; |
|
1471 TInt to_do=Min(TInt(end-a), TInt(next-a))>>KPageShift; |
|
1472 __KTRACE_OPT(KMMU,Kern::Printf("a=%08x next=%08x to_do=%d",a,next,to_do)); |
|
1473 TPde pde=::InitPageDirectory[pdeIndex]; |
|
1474 if ( (pde&(KPdePtePresent|KPdeLargePage))==(KPdePtePresent|KPdeLargePage) ) |
|
1475 { |
|
1476 __ASSERT_DEBUG(!(a&KChunkMask), MM::Panic(MM::EUnmapBadAlignment)); |
|
1477 ::InitPageDirectory[pdeIndex]=0; |
|
1478 #ifdef __SMP__ |
|
1479 ipi.AddAddress(a); |
|
1480 #else |
|
1481 InvalidateTLBForPage(a); // flush any corresponding TLB entry |
|
1482 #endif |
|
1483 a=next; |
|
1484 NKern::FlashSystem(); |
|
1485 continue; |
|
1486 } |
|
1487 TInt ptid=PageTableId(a,0); |
|
1488 SPageTableInfo& ptinfo=iPtInfo[ptid]; |
|
1489 if (ptid>=0) |
|
1490 { |
|
1491 TPte* ppte=PageTable(ptid)+((a&KChunkMask)>>KPageShift); |
|
1492 TPte* ppte_End=ppte+to_do; |
|
1493 for (; ppte<ppte_End; ++ppte, a+=KPageSize) |
|
1494 { |
|
1495 if (*ppte & KPdePtePresent) |
|
1496 --ptinfo.iCount; |
|
1497 *ppte=0; |
|
1498 #ifdef __SMP__ |
|
1499 ipi.AddAddress(a); |
|
1500 #else |
|
1501 InvalidateTLBForPage(a); // flush any corresponding TLB entry |
|
1502 #endif |
|
1503 NKern::FlashSystem(); |
|
1504 } |
|
1505 } |
|
1506 else |
|
1507 a += (to_do<<KPageShift); |
|
1508 } |
|
1509 #ifdef __SMP__ |
|
1510 ipi.InvalidateList(); |
|
1511 #endif |
|
1512 NKern::UnlockSystem(); |
|
1513 } |
|
1514 |
|
1515 |
|
1516 void X86Mmu::ClearPages(TInt aNumPages, TPhysAddr* aPageList, TUint8 aClearByte) |
|
1517 { |
|
1518 //map the pages at a temporary address, clear them and unmap |
|
1519 __ASSERT_MUTEX(RamAllocatorMutex); |
|
1520 while (--aNumPages >= 0) |
|
1521 { |
|
1522 TPhysAddr pa; |
|
1523 if((TInt)aPageList&1) |
|
1524 { |
|
1525 pa = (TPhysAddr)aPageList&~1; |
|
1526 *(TPhysAddr*)&aPageList += iPageSize; |
|
1527 } |
|
1528 else |
|
1529 pa = *aPageList++; |
|
1530 *iTempPte = pa | KPdePtePresent | KPdePteWrite | iPteGlobal; |
|
1531 __DRAIN_WRITE_BUFFER; |
|
1532 InvalidateTLBForPage(iTempAddr); |
|
1533 memset((TAny*)iTempAddr, aClearByte, iPageSize); |
|
1534 } |
|
1535 *iTempPte=0; |
|
1536 __DRAIN_WRITE_BUFFER; |
|
1537 InvalidateTLBForPage(iTempAddr); |
|
1538 } |
|
1539 |
|
1540 TLinAddr X86Mmu::MapTemp(TPhysAddr aPage,TLinAddr /*aLinAddr*/,TInt aPages) |
|
1541 { |
|
1542 __ASSERT_MUTEX(RamAllocatorMutex); |
|
1543 __ASSERT_DEBUG(!*iTempPte,MM::Panic(MM::ETempMappingAlreadyInUse)); |
|
1544 __ASSERT_DEBUG(aPages<=4,MM::Panic(MM::ETempMappingNoRoom)); |
|
1545 iTempMapCount = aPages; |
|
1546 for (TInt i=0; i<aPages; i++) |
|
1547 { |
|
1548 iTempPte[i] = ((aPage&~KPageMask)+(i<<KPageShift)) | KPdePtePresent | KPdePteWrite | iPteGlobal |
|
1549 __DRAIN_WRITE_BUFFER; |
|
1550 InvalidateTLBForPage(iTempAddr+(i<<KPageShift)); |
|
1551 } |
|
1552 return iTempAddr; |
|
1553 } |
|
1554 |
|
1555 TLinAddr X86Mmu::MapTemp(TPhysAddr aPage,TLinAddr aLinAddr,TInt aPages, TMemoryType) |
|
1556 { |
|
1557 return MapTemp(aPage, aLinAddr, aPages); |
|
1558 } |
|
1559 |
|
1560 TLinAddr X86Mmu::MapSecondTemp(TPhysAddr aPage,TLinAddr /*aLinAddr*/,TInt aPages) |
|
1561 { |
|
1562 __ASSERT_MUTEX(RamAllocatorMutex); |
|
1563 __ASSERT_DEBUG(!*iSecondTempPte,MM::Panic(MM::ETempMappingAlreadyInUse)); |
|
1564 __ASSERT_DEBUG(aPages<=4,MM::Panic(MM::ETempMappingNoRoom)); |
|
1565 iSecondTempMapCount = aPages; |
|
1566 for (TInt i=0; i<aPages; i++) |
|
1567 { |
|
1568 iSecondTempPte[i] = ((aPage&~KPageMask)+(i<<KPageShift)) | KPdePtePresent | KPdePteWrite | iPteGlobal |
|
1569 __DRAIN_WRITE_BUFFER; |
|
1570 InvalidateTLBForPage(iSecondTempAddr+(i<<KPageShift)); |
|
1571 } |
|
1572 return iSecondTempAddr; |
|
1573 } |
|
1574 |
|
1575 void X86Mmu::UnmapTemp() |
|
1576 { |
|
1577 __ASSERT_MUTEX(RamAllocatorMutex); |
|
1578 for (TInt i=0; i<iTempMapCount; i++) |
|
1579 { |
|
1580 iTempPte[i] = 0; |
|
1581 __DRAIN_WRITE_BUFFER; |
|
1582 InvalidateTLBForPage(iTempAddr+(i<<KPageShift)); |
|
1583 } |
|
1584 } |
|
1585 |
|
1586 void X86Mmu::UnmapSecondTemp() |
|
1587 { |
|
1588 __ASSERT_MUTEX(RamAllocatorMutex); |
|
1589 for (TInt i=0; i<iSecondTempMapCount; i++) |
|
1590 { |
|
1591 iSecondTempPte[i] = 0; |
|
1592 __DRAIN_WRITE_BUFFER; |
|
1593 InvalidateTLBForPage(iSecondTempAddr+(i<<KPageShift)); |
|
1594 } |
|
1595 } |
|
1596 |
|
1597 void ExecHandler::UnlockRamDrive() |
|
1598 { |
|
1599 } |
|
1600 |
|
1601 EXPORT_C void TInternalRamDrive::Unlock() |
|
1602 { |
|
1603 } |
|
1604 |
|
1605 EXPORT_C void TInternalRamDrive::Lock() |
|
1606 { |
|
1607 } |
|
1608 |
|
1609 TBool X86Mmu::ValidateLocalIpcAddress(TLinAddr aAddr,TInt aSize,TBool aWrite) |
|
1610 { |
|
1611 __NK_ASSERT_DEBUG(aSize<=KChunkSize); |
|
1612 TLinAddr end = aAddr+aSize-1; |
|
1613 if(end<aAddr) |
|
1614 end = ~0u; |
|
1615 |
|
1616 if(TUint(aAddr^KIPCAlias)<TUint(KChunkSize) || TUint(end^KIPCAlias)<TUint(KChunkSize)) |
|
1617 { |
|
1618 // local address is in alias region. |
|
1619 // remove alias... |
|
1620 NKern::LockSystem(); |
|
1621 ((DMemModelThread*)TheCurrentThread)->RemoveAlias(); |
|
1622 NKern::UnlockSystem(); |
|
1623 // access memory, which will cause an exception... |
|
1624 if(!(TUint(aAddr^KIPCAlias)<TUint(KChunkSize))) |
|
1625 aAddr = end; |
|
1626 DoInvalidateTLBForPage(aAddr); // only need to do this processor since alias range is owned by the thread |
|
1627 if(aWrite) |
|
1628 *(volatile TUint8*)aAddr = 0; |
|
1629 else |
|
1630 aWrite = *(volatile TUint8*)aAddr; |
|
1631 // can't get here |
|
1632 __NK_ASSERT_DEBUG(0); |
|
1633 } |
|
1634 |
|
1635 TUint32 local_mask; |
|
1636 DMemModelProcess* process=(DMemModelProcess*)TheCurrentThread->iOwningProcess; |
|
1637 if(aWrite) |
|
1638 local_mask = process->iAddressCheckMaskW; |
|
1639 else |
|
1640 local_mask = process->iAddressCheckMaskR; |
|
1641 TInt mask = 2<<(end>>27); |
|
1642 mask -= 1<<(aAddr>>27); |
|
1643 if((local_mask&mask)!=mask) |
|
1644 return EFalse; |
|
1645 |
|
1646 return ETrue; |
|
1647 } |
|
1648 |
|
1649 TInt DMemModelThread::Alias(TLinAddr aAddr, DMemModelProcess* aProcess, TInt aSize, TInt aPerm, TLinAddr& aAliasAddr, TInt& aAliasSize) |
|
1650 // |
|
1651 // Set up an alias mapping starting at address aAddr in specified process. |
|
1652 // Check permissions aPerm. |
|
1653 // Enter and return with system locked. |
|
1654 // Note: Alias is removed if an exception if trapped by DThread::IpcExcHandler. |
|
1655 // |
|
1656 { |
|
1657 __KTRACE_OPT(KMMU2,Kern::Printf("Thread %O Alias %08x+%x Process %O perm %x",this,aAddr,aSize,aProcess,aPerm)); |
|
1658 __ASSERT_SYSTEM_LOCK; |
|
1659 |
|
1660 if(TUint(aAddr^KIPCAlias)<TUint(KIPCAliasAreaSize)) |
|
1661 return KErrBadDescriptor; // prevent access to alias region |
|
1662 |
|
1663 // check if memory is in region which is safe to access with supervisor permissions... |
|
1664 TBool okForSupervisorAccess = aPerm&(EMapAttrReadSup|EMapAttrWriteSup) ? 1 : 0; |
|
1665 if(!okForSupervisorAccess) |
|
1666 { |
|
1667 if(aAddr>=0xc0000000) // address in kernel area (top 1GB)? |
|
1668 return KErrBadDescriptor; // don't have permission |
|
1669 TUint32 local_mask; |
|
1670 if(aPerm&EMapAttrWriteUser) |
|
1671 local_mask = aProcess->iAddressCheckMaskW; |
|
1672 else |
|
1673 local_mask = aProcess->iAddressCheckMaskR; |
|
1674 okForSupervisorAccess = (local_mask>>(aAddr>>27))&1; |
|
1675 } |
|
1676 |
|
1677 if(aAddr>=KUserSharedDataEnd) // if address is in global section, don't bother aliasing it... |
|
1678 { |
|
1679 if(iAliasLinAddr) |
|
1680 RemoveAlias(); |
|
1681 aAliasAddr = aAddr; |
|
1682 TInt maxSize = KChunkSize-(aAddr&KChunkMask); |
|
1683 aAliasSize = aSize<maxSize ? aSize : maxSize; |
|
1684 return okForSupervisorAccess; |
|
1685 } |
|
1686 |
|
1687 TInt asid = aProcess->iOsAsid; |
|
1688 TPde* pd = PageDirectory(asid); |
|
1689 TPde pde = pd[aAddr>>KChunkShift]; |
|
1690 #ifdef __SMP__ |
|
1691 TLinAddr aliasAddr; |
|
1692 #else |
|
1693 TLinAddr aliasAddr = KIPCAlias+(aAddr&(KChunkMask & ~KPageMask)); |
|
1694 #endif |
|
1695 if(pde==iAliasPde && iAliasLinAddr) |
|
1696 { |
|
1697 // pde already aliased, so just update linear address... |
|
1698 #ifdef __SMP__ |
|
1699 __NK_ASSERT_DEBUG(iCpuRestoreCookie>=0); |
|
1700 aliasAddr = iAliasLinAddr & ~KChunkMask; |
|
1701 aliasAddr |= (aAddr & (KChunkMask & ~KPageMask)); |
|
1702 #endif |
|
1703 iAliasLinAddr = aliasAddr; |
|
1704 } |
|
1705 else |
|
1706 { |
|
1707 // alias PDE changed... |
|
1708 if(!iAliasLinAddr) |
|
1709 { |
|
1710 ::TheMmu.iAliasList.Add(&iAliasLink); // add to list if not already aliased |
|
1711 #ifdef __SMP__ |
|
1712 __NK_ASSERT_DEBUG(iCpuRestoreCookie==-1); |
|
1713 iCpuRestoreCookie = NKern::FreezeCpu(); // temporarily lock current thread to this processor |
|
1714 #endif |
|
1715 } |
|
1716 iAliasPde = pde; |
|
1717 iAliasOsAsid = asid; |
|
1718 #ifdef __SMP__ |
|
1719 TSubScheduler& ss = SubScheduler(); // OK since we are locked to this CPU |
|
1720 aliasAddr = TLinAddr(ss.i_AliasLinAddr) + (aAddr & (KChunkMask & ~KPageMask)); |
|
1721 iAliasPdePtr = (TPde*)(TLinAddr(ss.i_AliasPdePtr) + (((DMemModelProcess*)iOwningProcess)->iOsAsid << KPageTableShift)); |
|
1722 #endif |
|
1723 iAliasLinAddr = aliasAddr; |
|
1724 } |
|
1725 __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x", pde, iAliasPdePtr)); |
|
1726 *iAliasPdePtr = pde; |
|
1727 __DRAIN_WRITE_BUFFER; |
|
1728 DoInvalidateTLBForPage(aliasAddr); // only need to do this processor |
|
1729 TInt offset = aAddr&KPageMask; |
|
1730 aAliasAddr = aliasAddr | offset; |
|
1731 TInt maxSize = KPageSize - offset; |
|
1732 aAliasSize = aSize<maxSize ? aSize : maxSize; |
|
1733 return okForSupervisorAccess; |
|
1734 } |
|
1735 |
|
1736 void DMemModelThread::RemoveAlias() |
|
1737 // |
|
1738 // Remove alias mapping (if present) |
|
1739 // Enter and return with system locked. |
|
1740 // |
|
1741 { |
|
1742 __KTRACE_OPT(KMMU2,Kern::Printf("Thread %O RemoveAlias", this)); |
|
1743 __ASSERT_SYSTEM_LOCK; |
|
1744 TLinAddr addr = iAliasLinAddr; |
|
1745 if(addr) |
|
1746 { |
|
1747 iAliasLinAddr = 0; |
|
1748 iAliasPde = 0; |
|
1749 __KTRACE_OPT(KMMU,Kern::Printf("Clearing PDE at %08x", iAliasPdePtr)); |
|
1750 *iAliasPdePtr = 0; |
|
1751 __DRAIN_WRITE_BUFFER; |
|
1752 DoInvalidateTLBForPage(addr); // only need to do it for this processor |
|
1753 iAliasLink.Deque(); |
|
1754 #ifdef __SMP__ |
|
1755 __NK_ASSERT_DEBUG(iCpuRestoreCookie>=0); |
|
1756 NKern::EndFreezeCpu(iCpuRestoreCookie); |
|
1757 iCpuRestoreCookie = -1; |
|
1758 #endif |
|
1759 } |
|
1760 } |
|
1761 |
|
1762 void X86Mmu::CacheMaintenanceOnDecommit(TPhysAddr) |
|
1763 { |
|
1764 // no cache operations required on freeing memory |
|
1765 } |
|
1766 |
|
1767 void X86Mmu::CacheMaintenanceOnDecommit(const TPhysAddr*, TInt) |
|
1768 { |
|
1769 // no cache operations required on freeing memory |
|
1770 } |
|
1771 |
|
1772 void X86Mmu::CacheMaintenanceOnPreserve(TPhysAddr, TUint) |
|
1773 { |
|
1774 // no cache operations required on freeing memory |
|
1775 } |
|
1776 |
|
1777 void X86Mmu::CacheMaintenanceOnPreserve(const TPhysAddr*, TInt, TUint) |
|
1778 { |
|
1779 // no cache operations required on freeing memory |
|
1780 } |
|
1781 |
|
1782 void X86Mmu::CacheMaintenanceOnPreserve(TPhysAddr , TInt , TLinAddr , TUint ) |
|
1783 { |
|
1784 // no cache operations required on freeing memory |
|
1785 } |
|
1786 |
|
1787 |
|
1788 TInt X86Mmu::UnlockRamCachePages(TLinAddr aLinAddr, TInt aNumPages, DProcess* aProcess) |
|
1789 { |
|
1790 TInt asid = ((DMemModelProcess*)aProcess)->iOsAsid; |
|
1791 TInt page = aLinAddr>>KPageShift; |
|
1792 NKern::LockSystem(); |
|
1793 for(;;) |
|
1794 { |
|
1795 TPde* pd = PageDirectory(asid)+(page>>(KChunkShift-KPageShift)); |
|
1796 TPte* pt = SafePageTableFromPde(*pd++); |
|
1797 __NK_ASSERT_DEBUG(pt); |
|
1798 TInt pteIndex = page&(KChunkMask>>KPageShift); |
|
1799 pt += pteIndex; |
|
1800 do |
|
1801 { |
|
1802 TInt pagesInPt = (KChunkSize>>KPageShift)-pteIndex; |
|
1803 if(pagesInPt>aNumPages) |
|
1804 pagesInPt = aNumPages; |
|
1805 if(pagesInPt>KMaxPages) |
|
1806 pagesInPt = KMaxPages; |
|
1807 |
|
1808 aNumPages -= pagesInPt; |
|
1809 page += pagesInPt; |
|
1810 |
|
1811 do |
|
1812 { |
|
1813 TPte pte = *pt++; |
|
1814 if(pte) // pte may be null if page has already been unlocked and reclaimed by system |
|
1815 iRamCache->DonateRamCachePage(SPageInfo::FromPhysAddr(pte)); |
|
1816 } |
|
1817 while(--pagesInPt); |
|
1818 |
|
1819 if(!aNumPages) |
|
1820 { |
|
1821 NKern::UnlockSystem(); |
|
1822 return KErrNone; |
|
1823 } |
|
1824 |
|
1825 pteIndex = page&(KChunkMask>>KPageShift); |
|
1826 } |
|
1827 while(!NKern::FlashSystem() && pteIndex); |
|
1828 } |
|
1829 } |
|
1830 |
|
1831 |
|
1832 TInt X86Mmu::LockRamCachePages(TLinAddr aLinAddr, TInt aNumPages, DProcess* aProcess) |
|
1833 { |
|
1834 TInt asid = ((DMemModelProcess*)aProcess)->iOsAsid; |
|
1835 TInt page = aLinAddr>>KPageShift; |
|
1836 NKern::LockSystem(); |
|
1837 for(;;) |
|
1838 { |
|
1839 TPde* pd = PageDirectory(asid)+(page>>(KChunkShift-KPageShift)); |
|
1840 TPte* pt = SafePageTableFromPde(*pd++); |
|
1841 __NK_ASSERT_DEBUG(pt); |
|
1842 TInt pteIndex = page&(KChunkMask>>KPageShift); |
|
1843 pt += pteIndex; |
|
1844 do |
|
1845 { |
|
1846 TInt pagesInPt = (KChunkSize>>KPageShift)-pteIndex; |
|
1847 if(pagesInPt>aNumPages) |
|
1848 pagesInPt = aNumPages; |
|
1849 if(pagesInPt>KMaxPages) |
|
1850 pagesInPt = KMaxPages; |
|
1851 |
|
1852 aNumPages -= pagesInPt; |
|
1853 page += pagesInPt; |
|
1854 |
|
1855 do |
|
1856 { |
|
1857 TPte pte = *pt++; |
|
1858 if(pte==0) |
|
1859 goto not_found; |
|
1860 if(!iRamCache->ReclaimRamCachePage(SPageInfo::FromPhysAddr(pte))) |
|
1861 goto not_found; |
|
1862 } |
|
1863 while(--pagesInPt); |
|
1864 |
|
1865 if(!aNumPages) |
|
1866 { |
|
1867 NKern::UnlockSystem(); |
|
1868 return KErrNone; |
|
1869 } |
|
1870 |
|
1871 pteIndex = page&(KChunkMask>>KPageShift); |
|
1872 } |
|
1873 while(!NKern::FlashSystem() && pteIndex); |
|
1874 } |
|
1875 not_found: |
|
1876 NKern::UnlockSystem(); |
|
1877 return KErrNotFound; |
|
1878 } |
|
1879 |
|
1880 |
|
1881 void RamCache::SetFree(SPageInfo* aPageInfo) |
|
1882 { |
|
1883 // Make a page free |
|
1884 TInt type = aPageInfo->Type(); |
|
1885 if(type==SPageInfo::EPagedCache) |
|
1886 { |
|
1887 TInt offset = aPageInfo->Offset()<<KPageShift; |
|
1888 DMemModelChunk* chunk = (DMemModelChunk*)aPageInfo->Owner(); |
|
1889 __NK_ASSERT_DEBUG(TUint(offset)<TUint(chunk->iSize)); |
|
1890 TLinAddr lin = ((TLinAddr)chunk->iBase)+offset; |
|
1891 TInt asid = ((DMemModelProcess*)chunk->iOwningProcess)->iOsAsid; |
|
1892 TPte* pt = PtePtrFromLinAddr(lin,asid); |
|
1893 *pt = 0; |
|
1894 InvalidateTLBForPage(lin); |
|
1895 |
|
1896 // actually decommit it from chunk... |
|
1897 TInt ptid = ((TLinAddr)pt-KPageTableBase)>>KPageTableShift; |
|
1898 SPageTableInfo& ptinfo=((X86Mmu*)iMmu)->iPtInfo[ptid]; |
|
1899 if(!--ptinfo.iCount) |
|
1900 { |
|
1901 chunk->iPageTables[offset>>KChunkShift] = 0xffff; |
|
1902 NKern::UnlockSystem(); |
|
1903 ((X86Mmu*)iMmu)->DoUnassignPageTable(lin, (TAny*)asid); |
|
1904 ((X86Mmu*)iMmu)->FreePageTable(ptid); |
|
1905 NKern::LockSystem(); |
|
1906 } |
|
1907 } |
|
1908 else |
|
1909 { |
|
1910 __KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: SetFree() with bad page type = %d",aPageInfo->Type())); |
|
1911 Panic(EUnexpectedPageType); |
|
1912 } |
|
1913 } |
|
1914 |
|
1915 // Not supported on x86 - no defrag yet |
|
1916 void X86Mmu::DisablePageModification(DMemModelChunk* aChunk, TInt aOffset) |
|
1917 { |
|
1918 MM::Panic(MM::EOperationNotSupported); |
|
1919 } |
|
1920 |
|
1921 TInt X86Mmu::RamDefragFault(TAny* aExceptionInfo) |
|
1922 { |
|
1923 MM::Panic(MM::EOperationNotSupported); |
|
1924 return KErrAbort; |
|
1925 } |