|
1 // Copyright (c) 1997-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of the License "Eclipse Public License v1.0" |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: |
|
14 // e32\memmodel\epoc\multiple\arm\xmmu.cpp |
|
15 // |
|
16 // |
|
17 |
|
18 #include "arm_mem.h" |
|
19 #include <mmubase.inl> |
|
20 #include <ramcache.h> |
|
21 #include <demand_paging.h> |
|
22 #include "execs.h" |
|
23 #include <defrag.h> |
|
24 #include "cache_maintenance.inl" |
|
25 |
|
26 #undef __MMU_MACHINE_CODED__ |
|
27 |
|
28 // SECTION_PDE(perm, attr, domain, execute, global) |
|
29 // PT_PDE(domain) |
|
30 // LP_PTE(perm, attr, execute, global) |
|
31 // SP_PTE(perm, attr, execute, global) |
|
32 |
|
33 const TInt KPageColourShift=2; |
|
34 const TInt KPageColourCount=(1<<KPageColourShift); |
|
35 const TInt KPageColourMask=KPageColourCount-1; |
|
36 |
|
37 |
|
38 const TPde KPdPdePerm=PT_PDE(0); |
|
39 const TPde KPtPdePerm=PT_PDE(0); |
|
40 const TPde KShadowPdePerm=PT_PDE(0); |
|
41 |
|
42 #if defined(__CPU_MEMORY_TYPE_REMAPPING) |
|
43 // ARM1176, ARM11MPCore, ARMv7 and later |
|
44 // __CPU_MEMORY_TYPE_REMAPPING means that only three bits (TEX0:C:B) in page table define |
|
45 // memory attributes. Kernel runs with a limited set of memory types: stronlgy ordered, |
|
46 // device, normal un-cached & and normal WBWA. Due to lack of write through mode, page tables are |
|
47 // write-back which means that cache has to be cleaned on every page/directory table update. |
|
48 const TPte KPdPtePerm= SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 0, 1); |
|
49 const TPte KPtPtePerm= SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 0, 1); |
|
50 const TPte KPtInfoPtePerm= SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 0, 1); |
|
51 const TPte KRomPtePerm= SP_PTE(KArmV6PermRORO, EMemAttNormalCached, 1, 1); |
|
52 const TPte KShadowPtePerm= SP_PTE(KArmV6PermRORO, EMemAttNormalCached, 1, 1); |
|
53 const TPde KRomSectionPermissions= SECTION_PDE(KArmV6PermRORO, EMemAttNormalCached, 0, 1, 1); |
|
54 const TPte KUserCodeLoadPte= SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 1, 0); |
|
55 const TPte KUserCodeRunPte= SP_PTE(KArmV6PermRORO, EMemAttNormalCached, 1, 0); |
|
56 const TPte KGlobalCodeRunPte= SP_PTE(KArmV6PermRORO, EMemAttNormalCached, 1, 1); |
|
57 const TPte KKernelCodeRunPte= SP_PTE(KArmV6PermRONO, EMemAttNormalCached, 1, 1); |
|
58 |
|
59 const TInt KNormalUncachedAttr = EMemAttNormalUncached; |
|
60 const TInt KNormalCachedAttr = EMemAttNormalCached; |
|
61 |
|
62 #else |
|
63 |
|
64 //ARM1136 |
|
65 const TPte KPtInfoPtePerm=SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 0, 1); |
|
66 #if defined (__CPU_WriteThroughDisabled) |
|
67 const TPte KPdPtePerm=SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 0, 1); |
|
68 const TPte KPtPtePerm=SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 0, 1); |
|
69 const TPte KRomPtePerm=SP_PTE(KArmV6PermRORO, KArmV6MemAttWBWAWBWA, 1, 1); |
|
70 const TPte KShadowPtePerm=SP_PTE(KArmV6PermRWRO, KArmV6MemAttWBWAWBWA, 1, 1); |
|
71 const TPde KRomSectionPermissions = SECTION_PDE(KArmV6PermRORO, KArmV6MemAttWBWAWBWA, 0, 1, 1); |
|
72 const TPte KUserCodeLoadPte=SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 1, 0); |
|
73 const TPte KUserCodeRunPte=SP_PTE(KArmV6PermRWRO, KArmV6MemAttWBWAWBWA, 1, 0); |
|
74 const TPte KGlobalCodeRunPte=SP_PTE(KArmV6PermRWRO, KArmV6MemAttWBWAWBWA, 1, 1); |
|
75 const TInt KKernelCodeRunPteAttr = KArmV6MemAttWBWAWBWA; |
|
76 #else |
|
77 const TPte KPdPtePerm=SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBRAWTRA, 0, 1); |
|
78 const TPte KPtPtePerm=SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBRAWTRA, 0, 1); |
|
79 const TPte KRomPtePerm=SP_PTE(KArmV6PermRORO, KArmV6MemAttWTRAWTRA, 1, 1); |
|
80 const TPte KShadowPtePerm=SP_PTE(KArmV6PermRWRO, KArmV6MemAttWTRAWTRA, 1, 1); |
|
81 const TPde KRomSectionPermissions = SECTION_PDE(KArmV6PermRORO, KArmV6MemAttWTRAWTRA, 0, 1, 1); |
|
82 const TPte KUserCodeLoadPte=SP_PTE(KArmV6PermRWNO, KArmV6MemAttWTRAWTRA, 1, 0); |
|
83 const TPte KUserCodeRunPte=SP_PTE(KArmV6PermRWRO, KArmV6MemAttWTRAWTRA, 1, 0); |
|
84 const TPte KGlobalCodeRunPte=SP_PTE(KArmV6PermRWRO, KArmV6MemAttWTRAWTRA, 1, 1); |
|
85 const TInt KKernelCodeRunPteAttr = KArmV6MemAttWTRAWTRA; |
|
86 #endif |
|
87 |
|
88 |
|
89 #if defined(__CPU_ARM1136_ERRATUM_353494_FIXED) |
|
90 const TInt KKernelCodeRunPtePerm = KArmV6PermRONO; |
|
91 #else |
|
92 const TInt KKernelCodeRunPtePerm = KArmV6PermRORO; |
|
93 #endif |
|
94 const TPte KKernelCodeRunPte=SP_PTE(KKernelCodeRunPtePerm, KKernelCodeRunPteAttr, 1, 1); |
|
95 |
|
96 const TInt KNormalUncachedAttr = KArmV6MemAttNCNC; |
|
97 const TInt KNormalCachedAttr = KArmV6MemAttWBWAWBWA; |
|
98 |
|
99 #endif |
|
100 |
|
101 |
|
102 extern void __FlushBtb(); |
|
103 |
|
104 #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED) |
|
105 extern void remove_and_invalidate_page(TPte* aPte, TLinAddr aAddr, TInt aAsid); |
|
106 extern void remove_and_invalidate_section(TPde* aPde, TLinAddr aAddr, TInt aAsid); |
|
107 #endif |
|
108 |
|
109 |
|
110 LOCAL_D const TPte ChunkPtePermissions[ENumChunkTypes] = |
|
111 { |
|
112 #if defined(__CPU_MEMORY_TYPE_REMAPPING) |
|
113 // ARM1176, ARM11 mcore, ARMv7 and later |
|
114 SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 0, 1), // EKernelData |
|
115 SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 0, 1), // EKernelStack |
|
116 SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 1, 1), // EKernelCode - loading |
|
117 SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 1, 1), // EDll (used for global code) - loading |
|
118 SP_PTE(KArmV6PermRORO, EMemAttNormalCached, 1, 0), // EUserCode - run |
|
119 SP_PTE(KArmV6PermRWRW, EMemAttNormalCached, 0, 1), // ERamDrive |
|
120 SP_PTE(KArmV6PermRWRW, EMemAttNormalCached, 0, 0), // EUserData |
|
121 SP_PTE(KArmV6PermRWRW, EMemAttNormalCached, 0, 0), // EDllData |
|
122 SP_PTE(KArmV6PermRWRW, EMemAttNormalCached, 1, 0), // EUserSelfModCode |
|
123 SP_PTE(KArmV6PermRWRW, EMemAttNormalCached, 0, 0), // ESharedKernelSingle |
|
124 SP_PTE(KArmV6PermRWRW, EMemAttNormalCached, 0, 0), // ESharedKernelMultiple |
|
125 SP_PTE(KArmV6PermRWRW, EMemAttNormalCached, 0, 0), // ESharedIo |
|
126 SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 0, 1), // ESharedKernelMirror |
|
127 SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 0, 1), // EKernelMessage |
|
128 #else |
|
129 SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 0, 1), // EKernelData |
|
130 SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 0, 1), // EKernelStack |
|
131 #if defined (__CPU_WriteThroughDisabled) |
|
132 SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 1, 1), // EKernelCode - loading |
|
133 SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 1, 1), // EDll (used for global code) - loading |
|
134 SP_PTE(KArmV6PermRWRO, KArmV6MemAttWBWAWBWA, 1, 0), // EUserCode - run |
|
135 #else |
|
136 SP_PTE(KArmV6PermRWNO, KArmV6MemAttWTRAWTRA, 1, 1), // EKernelCode - loading |
|
137 SP_PTE(KArmV6PermRWNO, KArmV6MemAttWTRAWTRA, 1, 1), // EDll (used for global code) - loading |
|
138 SP_PTE(KArmV6PermRWRO, KArmV6MemAttWTRAWTRA, 1, 0), // EUserCode - run |
|
139 #endif |
|
140 SP_PTE(KArmV6PermRWRW, KArmV6MemAttWBWAWBWA, 0, 1), // ERamDrive |
|
141 SP_PTE(KArmV6PermRWRW, KArmV6MemAttWBWAWBWA, 0, 0), // EUserData |
|
142 SP_PTE(KArmV6PermRWRW, KArmV6MemAttWBWAWBWA, 0, 0), // EDllData |
|
143 SP_PTE(KArmV6PermRWRW, KArmV6MemAttWBWAWBWA, 1, 0), // EUserSelfModCode |
|
144 SP_PTE(KArmV6PermRWRW, KArmV6MemAttWBWAWBWA, 0, 0), // ESharedKernelSingle |
|
145 SP_PTE(KArmV6PermRWRW, KArmV6MemAttWBWAWBWA, 0, 0), // ESharedKernelMultiple |
|
146 SP_PTE(KArmV6PermRWRW, KArmV6MemAttWBWAWBWA, 0, 0), // ESharedIo |
|
147 SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 0, 1), // ESharedKernelMirror |
|
148 SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 0, 1), // EKernelMessage |
|
149 #endif |
|
150 }; |
|
151 |
|
152 // The domain for each chunk is selected according to its type. |
|
153 // The RamDrive lives in a separate domain, to minimise the risk |
|
154 // of accidental access and corruption. User chunks may also be |
|
155 // located in a separate domain (15) in DEBUG builds. |
|
156 LOCAL_D const TPde ChunkPdePermissions[ENumChunkTypes] = |
|
157 { |
|
158 PT_PDE(0), // EKernelData |
|
159 PT_PDE(0), // EKernelStack |
|
160 PT_PDE(0), // EKernelCode |
|
161 PT_PDE(0), // EDll |
|
162 PT_PDE(USER_MEMORY_DOMAIN), // EUserCode |
|
163 PT_PDE(1), // ERamDrive |
|
164 PT_PDE(USER_MEMORY_DOMAIN), // EUserData |
|
165 PT_PDE(USER_MEMORY_DOMAIN), // EDllData |
|
166 PT_PDE(USER_MEMORY_DOMAIN), // EUserSelfModCode |
|
167 PT_PDE(USER_MEMORY_DOMAIN), // ESharedKernelSingle |
|
168 PT_PDE(USER_MEMORY_DOMAIN), // ESharedKernelMultiple |
|
169 PT_PDE(0), // ESharedIo |
|
170 PT_PDE(0), // ESharedKernelMirror |
|
171 PT_PDE(0), // EKernelMessage |
|
172 }; |
|
173 |
|
174 // Inline functions for simple transformations |
|
175 inline TLinAddr PageTableLinAddr(TInt aId) |
|
176 { |
|
177 return (KPageTableBase+(aId<<KPageTableShift)); |
|
178 } |
|
179 |
|
180 inline TPte* PageTable(TInt aId) |
|
181 { |
|
182 return (TPte*)(KPageTableBase+(aId<<KPageTableShift)); |
|
183 } |
|
184 |
|
185 inline TPte* PageTableEntry(TInt aId, TLinAddr aAddress) |
|
186 { |
|
187 return PageTable(aId) + ((aAddress >> KPageShift) & (KChunkMask >> KPageShift)); |
|
188 } |
|
189 |
|
190 inline TLinAddr PageDirectoryLinAddr(TInt aOsAsid) |
|
191 { |
|
192 return (KPageDirectoryBase+(aOsAsid<<KPageDirectoryShift)); |
|
193 } |
|
194 |
|
195 inline TPde* PageDirectoryEntry(TInt aOsAsid, TLinAddr aAddress) |
|
196 { |
|
197 return PageDirectory(aOsAsid) + (aAddress >> KChunkShift); |
|
198 } |
|
199 |
|
200 extern void InvalidateTLBForPage(TLinAddr /*aLinAddr*/, TInt /*aAsid*/); |
|
201 extern void FlushTLBs(); |
|
202 extern TUint32 TTCR(); |
|
203 |
|
204 TPte* SafePageTableFromPde(TPde aPde) |
|
205 { |
|
206 if((aPde&KPdeTypeMask)==KArmV6PdePageTable) |
|
207 { |
|
208 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(aPde); |
|
209 if(pi) |
|
210 { |
|
211 TInt id = (pi->Offset()<<KPtClusterShift) | ((aPde>>KPageTableShift)&KPtClusterMask); |
|
212 return PageTable(id); |
|
213 } |
|
214 } |
|
215 return 0; |
|
216 } |
|
217 |
|
218 TPte* SafePtePtrFromLinAddr(TLinAddr aAddress, TInt aOsAsid=0) |
|
219 { |
|
220 if ((TInt)(aAddress>>KChunkShift)>=(TheMmu.iLocalPdSize>>2)) |
|
221 aOsAsid = 0; |
|
222 TPde pde = PageDirectory(aOsAsid)[aAddress>>KChunkShift]; |
|
223 TPte* pt = SafePageTableFromPde(pde); |
|
224 if(pt) |
|
225 pt += (aAddress>>KPageShift)&(KChunkMask>>KPageShift); |
|
226 return pt; |
|
227 } |
|
228 |
|
229 #ifndef _DEBUG |
|
230 // inline in UREL builds... |
|
231 #ifdef __ARMCC__ |
|
232 __forceinline /* RVCT ignores normal inline qualifier :-( */ |
|
233 #else |
|
234 inline |
|
235 #endif |
|
236 #endif |
|
237 TPte* PtePtrFromLinAddr(TLinAddr aAddress, TInt aOsAsid=0) |
|
238 { |
|
239 // this function only works for process local memory addresses, or for kernel memory (asid==0). |
|
240 __NK_ASSERT_DEBUG(aOsAsid==0 || (TInt)(aAddress>>KChunkShift)<(TheMmu.iLocalPdSize>>2)); |
|
241 TPde pde = PageDirectory(aOsAsid)[aAddress>>KChunkShift]; |
|
242 SPageInfo* pi = SPageInfo::FromPhysAddr(pde); |
|
243 TInt id = (pi->Offset()<<KPtClusterShift) | ((pde>>KPageTableShift)&KPtClusterMask); |
|
244 TPte* pt = PageTable(id); |
|
245 pt += (aAddress>>KPageShift)&(KChunkMask>>KPageShift); |
|
246 return pt; |
|
247 } |
|
248 |
|
249 |
|
250 TInt ArmMmu::LinearToPhysical(TLinAddr aLinAddr, TInt aSize, TPhysAddr& aPhysicalAddress, TPhysAddr* aPhysicalPageList, TInt aOsAsid) |
|
251 { |
|
252 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::LinearToPhysical %08x+%08x, asid=%d",aLinAddr,aSize,aOsAsid)); |
|
253 TPhysAddr physStart = ArmMmu::LinearToPhysical(aLinAddr,aOsAsid); |
|
254 TPhysAddr nextPhys = physStart&~KPageMask; |
|
255 |
|
256 TUint32* pageList = aPhysicalPageList; |
|
257 |
|
258 TInt pageIndex = aLinAddr>>KPageShift; |
|
259 TInt pagesLeft = ((aLinAddr+aSize-1)>>KPageShift)+1 - pageIndex; |
|
260 TInt pdeIndex = aLinAddr>>KChunkShift; |
|
261 TPde* pdePtr = (pdeIndex<(iLocalPdSize>>2) || (iAsidInfo[aOsAsid]&1)) |
|
262 ? PageDirectory(aOsAsid) |
|
263 : ::InitPageDirectory; |
|
264 pdePtr += pdeIndex; |
|
265 while(pagesLeft) |
|
266 { |
|
267 pageIndex &= KChunkMask>>KPageShift; |
|
268 TInt pagesLeftInChunk = (1<<(KChunkShift-KPageShift))-pageIndex; |
|
269 if(pagesLeftInChunk>pagesLeft) |
|
270 pagesLeftInChunk = pagesLeft; |
|
271 pagesLeft -= pagesLeftInChunk; |
|
272 |
|
273 TPhysAddr phys; |
|
274 TPde pde = *pdePtr++; |
|
275 TUint pdeType = pde&KPdeTypeMask; |
|
276 if(pdeType==KArmV6PdeSection) |
|
277 { |
|
278 phys = (pde & KPdeSectionAddrMask) + (pageIndex*KPageSize); |
|
279 __KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::LinearToPhysical Section phys=%8x",phys)); |
|
280 TInt n=pagesLeftInChunk; |
|
281 phys==nextPhys ? nextPhys+=n*KPageSize : nextPhys=KPhysAddrInvalid; |
|
282 if(pageList) |
|
283 { |
|
284 TUint32* pageEnd = pageList+n; |
|
285 do |
|
286 { |
|
287 *pageList++ = phys; |
|
288 phys+=KPageSize; |
|
289 } |
|
290 while(pageList<pageEnd); |
|
291 } |
|
292 } |
|
293 else |
|
294 { |
|
295 TPte* pt = SafePageTableFromPde(pde); |
|
296 if(!pt) |
|
297 { |
|
298 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::LinearToPhysical missing page table: PDE=%8x",pde)); |
|
299 return KErrNotFound; |
|
300 } |
|
301 pt += pageIndex; |
|
302 for(;;) |
|
303 { |
|
304 TPte pte = *pt++; |
|
305 TUint pte_type = pte & KPteTypeMask; |
|
306 if (pte_type >= KArmV6PteSmallPage) |
|
307 { |
|
308 phys = (pte & KPteSmallPageAddrMask); |
|
309 __KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::LinearToPhysical Small Page phys=%8x",phys)); |
|
310 phys==nextPhys ? nextPhys+=KPageSize : nextPhys=KPhysAddrInvalid; |
|
311 if(pageList) |
|
312 *pageList++ = phys; |
|
313 if(--pagesLeftInChunk) |
|
314 continue; |
|
315 break; |
|
316 } |
|
317 if (pte_type == KArmV6PteLargePage) |
|
318 { |
|
319 --pt; // back up ptr |
|
320 TUint pageOffset = ((TUint)pt>>2)&(KLargeSmallPageRatio-1); |
|
321 phys = (pte & KPteLargePageAddrMask) + pageOffset*KPageSize; |
|
322 __KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::LinearToPhysical Large Page phys=%8x",phys)); |
|
323 TInt n=KLargeSmallPageRatio-pageOffset; |
|
324 if(n>pagesLeftInChunk) |
|
325 n = pagesLeftInChunk; |
|
326 phys==nextPhys ? nextPhys+=n*KPageSize : nextPhys=KPhysAddrInvalid; |
|
327 if(pageList) |
|
328 { |
|
329 TUint32* pageEnd = pageList+n; |
|
330 do |
|
331 { |
|
332 *pageList++ = phys; |
|
333 phys+=KPageSize; |
|
334 } |
|
335 while(pageList<pageEnd); |
|
336 } |
|
337 pt += n; |
|
338 if(pagesLeftInChunk-=n) |
|
339 continue; |
|
340 break; |
|
341 } |
|
342 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::LinearToPhysical bad PTE %8x",pte)); |
|
343 return KErrNotFound; |
|
344 } |
|
345 } |
|
346 if(!pageList && nextPhys==KPhysAddrInvalid) |
|
347 { |
|
348 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::LinearToPhysical not contiguous")); |
|
349 return KErrNotFound; |
|
350 } |
|
351 pageIndex = 0; |
|
352 } |
|
353 |
|
354 if(nextPhys==KPhysAddrInvalid) |
|
355 { |
|
356 // Memory is discontiguous... |
|
357 aPhysicalAddress = KPhysAddrInvalid; |
|
358 return 1; |
|
359 } |
|
360 else |
|
361 { |
|
362 // Memory is contiguous... |
|
363 aPhysicalAddress = physStart; |
|
364 return KErrNone; |
|
365 } |
|
366 } |
|
367 |
|
368 TInt ArmMmu::PreparePagesForDMA(TLinAddr aLinAddr, TInt aSize, TInt aOsAsid, TPhysAddr* aPhysicalPageList) |
|
369 //Returns the list of physical pages belonging to the specified memory space. |
|
370 //Checks these pages belong to a chunk marked as being trusted. |
|
371 //Locks these pages so they can not be moved by e.g. ram defragmenation. |
|
372 { |
|
373 SPageInfo* pi = NULL; |
|
374 DChunk* chunk = NULL; |
|
375 TInt err = KErrNone; |
|
376 |
|
377 __KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::PreparePagesForDMA %08x+%08x, asid=%d",aLinAddr,aSize,aOsAsid)); |
|
378 |
|
379 TUint32* pageList = aPhysicalPageList; |
|
380 TInt pagesInList = 0; //The number of pages we put in the list so far |
|
381 |
|
382 TInt pageIndex = (aLinAddr & KChunkMask) >> KPageShift; // Index of the page within the section |
|
383 TInt pagesLeft = ((aLinAddr & KPageMask) + aSize + KPageMask) >> KPageShift; |
|
384 |
|
385 TInt pdeIndex = aLinAddr>>KChunkShift; |
|
386 |
|
387 |
|
388 MmuBase::Wait(); // RamAlloc Mutex for accessing page/directory tables. |
|
389 NKern::LockSystem();// SystemlLock for accessing SPageInfo objects. |
|
390 |
|
391 TPde* pdePtr = (pdeIndex<(iLocalPdSize>>2) || (iAsidInfo[aOsAsid]&1)) ? PageDirectory(aOsAsid) : ::InitPageDirectory; |
|
392 pdePtr += pdeIndex;//This points to the first pde |
|
393 |
|
394 while(pagesLeft) |
|
395 { |
|
396 TInt pagesLeftInChunk = (1<<(KChunkShift-KPageShift))-pageIndex; |
|
397 if(pagesLeftInChunk>pagesLeft) |
|
398 pagesLeftInChunk = pagesLeft; |
|
399 |
|
400 pagesLeft -= pagesLeftInChunk; |
|
401 |
|
402 TPte* pt = SafePageTableFromPde(*pdePtr++); |
|
403 if(!pt) { err = KErrNotFound; goto fail; }// Cannot get page table. |
|
404 |
|
405 pt += pageIndex; |
|
406 |
|
407 for(;pagesLeftInChunk--;) |
|
408 { |
|
409 TPhysAddr phys = (*pt++ & KPteSmallPageAddrMask); |
|
410 pi = SPageInfo::SafeFromPhysAddr(phys); |
|
411 if(!pi) { err = KErrNotFound; goto fail; }// Invalid address |
|
412 |
|
413 __KTRACE_OPT(KMMU2,Kern::Printf("PageInfo: PA:%x T:%x S:%x O:%x C:%x",phys, pi->Type(), pi->State(), pi->Owner(), pi->LockCount())); |
|
414 if (chunk==NULL) |
|
415 {//This is the first page. Check 'trusted' bit. |
|
416 if (pi->Type()!= SPageInfo::EChunk) |
|
417 { err = KErrAccessDenied; goto fail; }// The first page do not belong to chunk. |
|
418 |
|
419 chunk = (DChunk*)pi->Owner(); |
|
420 if ( (chunk == NULL) || ((chunk->iAttributes & DChunk::ETrustedChunk)== 0) ) |
|
421 { err = KErrAccessDenied; goto fail; }// Not a trusted chunk |
|
422 } |
|
423 pi->Lock(); |
|
424 |
|
425 *pageList++ = phys; |
|
426 if ( (++pagesInList&127) == 0) //release system lock temporarily on every 512K |
|
427 NKern::FlashSystem(); |
|
428 } |
|
429 pageIndex = 0; |
|
430 } |
|
431 |
|
432 if (pi->Type()!= SPageInfo::EChunk) |
|
433 { err = KErrAccessDenied; goto fail; }// The last page do not belong to chunk. |
|
434 |
|
435 if (chunk && (chunk != (DChunk*)pi->Owner())) |
|
436 { err = KErrArgument; goto fail; }//The first & the last page do not belong to the same chunk. |
|
437 |
|
438 NKern::UnlockSystem(); |
|
439 MmuBase::Signal(); |
|
440 return KErrNone; |
|
441 |
|
442 fail: |
|
443 __KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::PreparePagesForDMA failed")); |
|
444 NKern::UnlockSystem(); |
|
445 MmuBase::Signal(); |
|
446 ReleasePagesFromDMA(aPhysicalPageList, pagesInList); |
|
447 return err; |
|
448 } |
|
449 |
|
450 TInt ArmMmu::ReleasePagesFromDMA(TPhysAddr* aPhysicalPageList, TInt aPageCount) |
|
451 // Unlocks physical pages. |
|
452 // @param aPhysicalPageList - points to the list of physical pages that should be released. |
|
453 // @param aPageCount - the number of physical pages in the list. |
|
454 { |
|
455 NKern::LockSystem(); |
|
456 __KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::ReleasePagesFromDMA count:%d",aPageCount)); |
|
457 |
|
458 while (aPageCount--) |
|
459 { |
|
460 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(*aPhysicalPageList++); |
|
461 if(!pi) |
|
462 { |
|
463 NKern::UnlockSystem(); |
|
464 return KErrArgument; |
|
465 } |
|
466 __KTRACE_OPT(KMMU2,Kern::Printf("PageInfo: T:%x S:%x O:%x C:%x",pi->Type(), pi->State(), pi->Owner(), pi->LockCount())); |
|
467 pi->Unlock(); |
|
468 } |
|
469 NKern::UnlockSystem(); |
|
470 return KErrNone; |
|
471 } |
|
472 |
|
473 TPhysAddr ArmMmu::LinearToPhysical(TLinAddr aLinAddr, TInt aOsAsid) |
|
474 // |
|
475 // Find the physical address corresponding to a given linear address in a specified OS |
|
476 // address space. Call with system locked. |
|
477 // |
|
478 { |
|
479 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::LinearToPhysical(%08x,%d)",aLinAddr,aOsAsid)); |
|
480 TInt pdeIndex=aLinAddr>>KChunkShift; |
|
481 TPde pde = (pdeIndex<(iLocalPdSize>>2) || (iAsidInfo[aOsAsid]&1)) ? PageDirectory(aOsAsid)[pdeIndex] : ::InitPageDirectory[pdeIndex]; |
|
482 TPhysAddr pa=KPhysAddrInvalid; |
|
483 if ((pde&KPdePresentMask)==KArmV6PdePageTable) |
|
484 { |
|
485 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pde); |
|
486 if (pi) |
|
487 { |
|
488 TInt id = (pi->Offset()<<KPtClusterShift) | ((pde>>KPageTableShift)&KPtClusterMask); |
|
489 TPte* pPte=PageTable(id); |
|
490 TPte pte=pPte[(aLinAddr&KChunkMask)>>KPageShift]; |
|
491 if (pte & KArmV6PteSmallPage) |
|
492 { |
|
493 pa=(pte&KPteSmallPageAddrMask)+(aLinAddr&~KPteSmallPageAddrMask); |
|
494 __KTRACE_OPT(KMMU,Kern::Printf("Mapped with small page - returning %08x",pa)); |
|
495 } |
|
496 else if ((pte & KArmV6PteTypeMask) == KArmV6PteLargePage) |
|
497 { |
|
498 pa=(pte&KPteLargePageAddrMask)+(aLinAddr&~KPteLargePageAddrMask); |
|
499 __KTRACE_OPT(KMMU,Kern::Printf("Mapped with large page - returning %08x",pa)); |
|
500 } |
|
501 } |
|
502 } |
|
503 else if ((pde&KPdePresentMask)==KArmV6PdeSection) |
|
504 { |
|
505 pa=(pde&KPdeSectionAddrMask)|(aLinAddr&~KPdeSectionAddrMask); |
|
506 __KTRACE_OPT(KMMU,Kern::Printf("Mapped with section - returning %08x",pa)); |
|
507 } |
|
508 return pa; |
|
509 } |
|
510 |
|
511 // permission table indexed by XN:APX:AP1:AP0 |
|
512 static const TInt PermissionLookup[16]= |
|
513 { //XN:APX:AP1:AP0 |
|
514 0, //0 0 0 0 no access |
|
515 EMapAttrWriteSup|EMapAttrReadSup|EMapAttrExecSup, //0 0 0 1 RW sup execute |
|
516 EMapAttrWriteSup|EMapAttrReadUser|EMapAttrExecUser, //0 0 1 0 supRW usrR execute |
|
517 EMapAttrWriteUser|EMapAttrReadUser|EMapAttrExecUser,//0 0 1 1 supRW usrRW execute |
|
518 0, //0 1 0 0 reserved |
|
519 EMapAttrReadSup|EMapAttrExecSup, //0 1 0 1 supR execute |
|
520 EMapAttrReadUser|EMapAttrExecUser, //0 1 1 0 supR usrR execute |
|
521 0, //0 1 1 1 reserved |
|
522 0, //1 0 0 0 no access |
|
523 EMapAttrWriteSup|EMapAttrReadSup, //1 0 0 1 RW sup |
|
524 EMapAttrWriteSup|EMapAttrReadUser, //1 0 1 0 supRW usrR |
|
525 EMapAttrWriteUser|EMapAttrReadUser, //1 0 1 1 supRW usrRW |
|
526 0, //1 1 0 0 reserved |
|
527 EMapAttrReadSup, //1 1 0 1 supR |
|
528 EMapAttrReadUser, //1 1 1 0 supR usrR |
|
529 EMapAttrReadUser, //1 1 1 1 supR usrR |
|
530 }; |
|
531 |
|
532 TInt ArmMmu::PageTableId(TLinAddr aAddr, TInt aOsAsid) |
|
533 { |
|
534 TInt id=-1; |
|
535 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::PageTableId(%08x,%d)",aAddr,aOsAsid)); |
|
536 TInt pdeIndex=aAddr>>KChunkShift; |
|
537 TPde pde = (pdeIndex<(iLocalPdSize>>2) || (iAsidInfo[aOsAsid]&1)) ? PageDirectory(aOsAsid)[pdeIndex] : ::InitPageDirectory[pdeIndex]; |
|
538 if ((pde&KArmV6PdeTypeMask)==KArmV6PdePageTable) |
|
539 { |
|
540 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pde); |
|
541 if (pi) |
|
542 id = (pi->Offset()<<KPtClusterShift) | ((pde>>KPageTableShift)&KPtClusterMask); |
|
543 } |
|
544 __KTRACE_OPT(KMMU,Kern::Printf("ID=%d",id)); |
|
545 return id; |
|
546 } |
|
547 |
|
548 // Used only during boot for recovery of RAM drive |
|
549 TInt ArmMmu::BootPageTableId(TLinAddr aAddr, TPhysAddr& aPtPhys) |
|
550 { |
|
551 TInt id=KErrNotFound; |
|
552 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu:BootPageTableId(%08x,&)",aAddr)); |
|
553 TPde* kpd=(TPde*)KPageDirectoryBase; // kernel page directory |
|
554 TInt pdeIndex=aAddr>>KChunkShift; |
|
555 TPde pde = kpd[pdeIndex]; |
|
556 if ((pde & KArmV6PdeTypeMask) == KArmV6PdePageTable) |
|
557 { |
|
558 aPtPhys = pde & KPdePageTableAddrMask; |
|
559 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pde); |
|
560 if (pi) |
|
561 { |
|
562 SPageInfo::TType type = pi->Type(); |
|
563 if (type == SPageInfo::EPageTable) |
|
564 id = (pi->Offset()<<KPtClusterShift) | ((pde>>KPageTableShift)&KPtClusterMask); |
|
565 else if (type == SPageInfo::EUnused) |
|
566 id = KErrUnknown; |
|
567 } |
|
568 } |
|
569 __KTRACE_OPT(KMMU,Kern::Printf("ID=%d",id)); |
|
570 return id; |
|
571 } |
|
572 |
|
573 TBool ArmMmu::PteIsPresent(TPte aPte) |
|
574 { |
|
575 return aPte & KArmV6PteTypeMask; |
|
576 } |
|
577 |
|
578 TPhysAddr ArmMmu::PtePhysAddr(TPte aPte, TInt aPteIndex) |
|
579 { |
|
580 TUint32 pte_type = aPte & KArmV6PteTypeMask; |
|
581 if (pte_type == KArmV6PteLargePage) |
|
582 return (aPte & KPteLargePageAddrMask) + (TPhysAddr(aPteIndex << KPageShift) & KLargePageMask); |
|
583 else if (pte_type != 0) |
|
584 return aPte & KPteSmallPageAddrMask; |
|
585 return KPhysAddrInvalid; |
|
586 } |
|
587 |
|
588 TPhysAddr ArmMmu::PdePhysAddr(TLinAddr aAddr) |
|
589 { |
|
590 TPde* kpd = (TPde*)KPageDirectoryBase; // kernel page directory |
|
591 TPde pde = kpd[aAddr>>KChunkShift]; |
|
592 if ((pde & KPdePresentMask) == KArmV6PdeSection) |
|
593 return pde & KPdeSectionAddrMask; |
|
594 return KPhysAddrInvalid; |
|
595 } |
|
596 |
|
597 void ArmMmu::Init1() |
|
598 { |
|
599 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("ArmMmu::Init1")); |
|
600 |
|
601 // MmuBase data |
|
602 iPageSize=KPageSize; |
|
603 iPageMask=KPageMask; |
|
604 iPageShift=KPageShift; |
|
605 iChunkSize=KChunkSize; |
|
606 iChunkMask=KChunkMask; |
|
607 iChunkShift=KChunkShift; |
|
608 iPageTableSize=KPageTableSize; |
|
609 iPageTableMask=KPageTableMask; |
|
610 iPageTableShift=KPageTableShift; |
|
611 iPtClusterSize=KPtClusterSize; |
|
612 iPtClusterMask=KPtClusterMask; |
|
613 iPtClusterShift=KPtClusterShift; |
|
614 iPtBlockSize=KPtBlockSize; |
|
615 iPtBlockMask=KPtBlockMask; |
|
616 iPtBlockShift=KPtBlockShift; |
|
617 iPtGroupSize=KChunkSize/KPageTableSize; |
|
618 iPtGroupMask=iPtGroupSize-1; |
|
619 iPtGroupShift=iChunkShift-iPageTableShift; |
|
620 //TInt* iPtBlockCount; // dynamically allocated - Init2 |
|
621 //TInt* iPtGroupCount; // dynamically allocated - Init2 |
|
622 iPtInfo=(SPageTableInfo*)KPageTableInfoBase; |
|
623 iPageTableLinBase=KPageTableBase; |
|
624 //iRamPageAllocator; // dynamically allocated - Init2 |
|
625 //iAsyncFreeList; // dynamically allocated - Init2 |
|
626 //iPageTableAllocator; // dynamically allocated - Init2 |
|
627 //iPageTableLinearAllocator;// dynamically allocated - Init2 |
|
628 iPtInfoPtePerm=KPtInfoPtePerm; |
|
629 iPtPtePerm=KPtPtePerm; |
|
630 iPtPdePerm=KPtPdePerm; |
|
631 iUserCodeLoadPtePerm=KUserCodeLoadPte; |
|
632 iKernelCodePtePerm=KKernelCodeRunPte; |
|
633 iTempAddr=KTempAddr; |
|
634 iSecondTempAddr=KSecondTempAddr; |
|
635 iMapSizes=KPageSize|KLargePageSize|KChunkSize; |
|
636 iRomLinearBase = ::RomHeaderAddress; |
|
637 iRomLinearEnd = KRomLinearEnd; |
|
638 iShadowPtePerm = KShadowPtePerm; |
|
639 iShadowPdePerm = KShadowPdePerm; |
|
640 |
|
641 // Mmu data |
|
642 TInt total_ram=TheSuperPage().iTotalRamSize; |
|
643 |
|
644 // Large or small configuration? |
|
645 // This is determined by the bootstrap based on RAM size |
|
646 TUint32 ttcr=TTCR(); |
|
647 __NK_ASSERT_ALWAYS(ttcr==1 || ttcr==2); |
|
648 TBool large = (ttcr==1); |
|
649 |
|
650 // calculate cache colouring... |
|
651 TInt iColourCount = 0; |
|
652 TInt dColourCount = 0; |
|
653 TUint32 ctr = InternalCache::TypeRegister(); |
|
654 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("CacheTypeRegister = %08x",ctr)); |
|
655 #ifdef __CPU_ARMV6 |
|
656 __NK_ASSERT_ALWAYS((ctr>>29)==0); // check ARMv6 format |
|
657 if(ctr&0x800) |
|
658 iColourCount = 4; |
|
659 if(ctr&0x800000) |
|
660 dColourCount = 4; |
|
661 #else |
|
662 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("CacheTypeRegister = %08x",ctr)); |
|
663 __NK_ASSERT_ALWAYS((ctr>>29)==4); // check ARMv7 format |
|
664 TUint l1ip = (ctr>>14)&3; // L1 instruction cache indexing and tagging policy |
|
665 __NK_ASSERT_ALWAYS(l1ip>=2); // check I cache is physically tagged |
|
666 |
|
667 TUint32 clidr = InternalCache::LevelIDRegister(); |
|
668 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("CacheLevelIDRegister = %08x",clidr)); |
|
669 TUint l1type = clidr&7; |
|
670 if(l1type) |
|
671 { |
|
672 if(l1type==2 || l1type==3 || l1type==4) |
|
673 { |
|
674 // we have an L1 data cache... |
|
675 TUint32 csir = InternalCache::SizeIdRegister(0,0); |
|
676 TUint sets = ((csir>>13)&0x7fff)+1; |
|
677 TUint ways = ((csir>>3)&0x3ff)+1; |
|
678 TUint lineSizeShift = (csir&7)+4; |
|
679 // assume L1 data cache is VIPT and alias checks broken and so we need data cache colouring... |
|
680 dColourCount = (sets<<lineSizeShift)>>KPageShift; |
|
681 if(l1type==4) // unified cache, so set instruction cache colour as well... |
|
682 iColourCount = (sets<<lineSizeShift)>>KPageShift; |
|
683 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("L1DCache = 0x%x,0x%x,%d colourCount=%d",sets,ways,lineSizeShift,(sets<<lineSizeShift)>>KPageShift)); |
|
684 } |
|
685 |
|
686 if(l1type==1 || l1type==3) |
|
687 { |
|
688 // we have a separate L1 instruction cache... |
|
689 TUint32 csir = InternalCache::SizeIdRegister(1,0); |
|
690 TUint sets = ((csir>>13)&0x7fff)+1; |
|
691 TUint ways = ((csir>>3)&0x3ff)+1; |
|
692 TUint lineSizeShift = (csir&7)+4; |
|
693 iColourCount = (sets<<lineSizeShift)>>KPageShift; |
|
694 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("L1ICache = 0x%x,0x%x,%d colourCount=%d",sets,ways,lineSizeShift,(sets<<lineSizeShift)>>KPageShift)); |
|
695 } |
|
696 } |
|
697 if(l1ip==3) |
|
698 { |
|
699 // PIPT cache, so no colouring restrictions... |
|
700 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("L1ICache is PIPT")); |
|
701 iColourCount = 0; |
|
702 } |
|
703 else |
|
704 { |
|
705 // VIPT cache... |
|
706 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("L1ICache is VIPT")); |
|
707 } |
|
708 #endif |
|
709 TUint colourShift = 0; |
|
710 for(TUint colourCount=Max(iColourCount,dColourCount); colourCount!=0; colourCount>>=1) |
|
711 ++colourShift; |
|
712 iAliasSize=KPageSize<<colourShift; |
|
713 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("iAliasSize=0x%x",iAliasSize)); |
|
714 iAliasMask=iAliasSize-1; |
|
715 iAliasShift=KPageShift+colourShift; |
|
716 |
|
717 iDecommitThreshold = CacheMaintenance::SyncAllPerformanceThresholdPages(); |
|
718 |
|
719 iNumOsAsids=KArmV6NumAsids; |
|
720 iNumGlobalPageDirs=1; |
|
721 //iOsAsidAllocator; // dynamically allocated - Init2 |
|
722 iGlobalPdSize=KPageDirectorySize; |
|
723 iGlobalPdShift=KPageDirectoryShift; |
|
724 iAsidGroupSize=KChunkSize/KPageDirectorySize; |
|
725 iAsidGroupMask=iAsidGroupSize-1; |
|
726 iAsidGroupShift=KChunkShift-KPageDirectoryShift; |
|
727 iUserLocalBase=KUserLocalDataBase; |
|
728 iAsidInfo=(TUint32*)KAsidInfoBase; |
|
729 iPdeBase=KPageDirectoryBase; |
|
730 iPdPtePerm=KPdPtePerm; |
|
731 iPdPdePerm=KPdPdePerm; |
|
732 iRamDriveMask=0x00f00000; |
|
733 iGlobalCodePtePerm=KGlobalCodeRunPte; |
|
734 #if defined(__CPU_MEMORY_TYPE_REMAPPING) |
|
735 iCacheMaintenanceTempMapAttr = CacheMaintenance::TemporaryMapping(); |
|
736 #else |
|
737 switch(CacheMaintenance::TemporaryMapping()) |
|
738 { |
|
739 case EMemAttNormalUncached: |
|
740 iCacheMaintenanceTempMapAttr = KArmV6MemAttNCNC; |
|
741 break; |
|
742 case EMemAttNormalCached: |
|
743 iCacheMaintenanceTempMapAttr = KArmV6MemAttWBWAWBWA; |
|
744 break; |
|
745 default: |
|
746 Panic(ETempMappingFailed); |
|
747 } |
|
748 #endif |
|
749 iMaxDllDataSize=Min(total_ram/2, 0x08000000); // phys RAM/2 up to 128Mb |
|
750 iMaxDllDataSize=(iMaxDllDataSize+iChunkMask)&~iChunkMask; // round up to chunk size |
|
751 iMaxUserCodeSize=Min(total_ram, 0x10000000); // phys RAM up to 256Mb |
|
752 iMaxUserCodeSize=(iMaxUserCodeSize+iChunkMask)&~iChunkMask; // round up to chunk size |
|
753 if (large) |
|
754 { |
|
755 iLocalPdSize=KPageDirectorySize/2; |
|
756 iLocalPdShift=KPageDirectoryShift-1; |
|
757 iUserSharedBase=KUserSharedDataBase2GB; |
|
758 iUserLocalEnd=iUserSharedBase-iMaxDllDataSize; |
|
759 iUserSharedEnd=KUserSharedDataEnd2GB-iMaxUserCodeSize; |
|
760 iDllDataBase=iUserLocalEnd; |
|
761 iUserCodeBase=iUserSharedEnd; |
|
762 } |
|
763 else |
|
764 { |
|
765 iLocalPdSize=KPageDirectorySize/4; |
|
766 iLocalPdShift=KPageDirectoryShift-2; |
|
767 iUserSharedBase=KUserSharedDataBase1GB; |
|
768 iUserLocalEnd=iUserSharedBase; |
|
769 iDllDataBase=KUserSharedDataEnd1GB-iMaxDllDataSize; |
|
770 iUserCodeBase=iDllDataBase-iMaxUserCodeSize; |
|
771 iUserSharedEnd=iUserCodeBase; |
|
772 } |
|
773 __KTRACE_OPT(KMMU,Kern::Printf("LPD size %08x GPD size %08x Alias size %08x", |
|
774 iLocalPdSize, iGlobalPdSize, iAliasSize)); |
|
775 __KTRACE_OPT(KMMU,Kern::Printf("ULB %08x ULE %08x USB %08x USE %08x",iUserLocalBase,iUserLocalEnd, |
|
776 iUserSharedBase,iUserSharedEnd)); |
|
777 __KTRACE_OPT(KMMU,Kern::Printf("DDB %08x UCB %08x",iDllDataBase,iUserCodeBase)); |
|
778 |
|
779 // ArmMmu data |
|
780 |
|
781 // other |
|
782 PP::MaxUserThreadStack=0x14000; // 80K - STDLIB asks for 64K for PosixServer!!!! |
|
783 PP::UserThreadStackGuard=0x2000; // 8K |
|
784 PP::MaxStackSpacePerProcess=0x200000; // 2Mb |
|
785 K::SupervisorThreadStackSize=0x1000; // 4K |
|
786 PP::SupervisorThreadStackGuard=0x1000; // 4K |
|
787 K::MachineConfig=(TMachineConfig*)KMachineConfigLinAddr; |
|
788 PP::RamDriveStartAddress=KRamDriveStartAddress; |
|
789 PP::RamDriveRange=KRamDriveMaxSize; |
|
790 PP::RamDriveMaxSize=KRamDriveMaxSize; // may be reduced later |
|
791 K::MemModelAttributes=EMemModelTypeMultiple|EMemModelAttrNonExProt|EMemModelAttrKernProt|EMemModelAttrWriteProt| |
|
792 EMemModelAttrVA|EMemModelAttrProcessProt|EMemModelAttrSameVA|EMemModelAttrSvKernProt| |
|
793 EMemModelAttrIPCKernProt|EMemModelAttrRamCodeProt; |
|
794 |
|
795 Arm::DefaultDomainAccess=KDefaultDomainAccess; |
|
796 |
|
797 Mmu::Init1(); |
|
798 } |
|
799 |
|
800 void ArmMmu::DoInit2() |
|
801 { |
|
802 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("ArmMmu::DoInit2")); |
|
803 iTempPte=PageTable(PageTableId(iTempAddr,0))+((iTempAddr&KChunkMask)>>KPageShift); |
|
804 iSecondTempPte=PageTable(PageTableId(iSecondTempAddr,0))+((iSecondTempAddr&KChunkMask)>>KPageShift); |
|
805 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("iTempAddr=%08x, iTempPte=%08x, iSecondTempAddr=%08x, iSecondTempPte=%08x", |
|
806 iTempAddr, iTempPte, iSecondTempAddr, iSecondTempPte)); |
|
807 CreateKernelSection(KKernelSectionEnd, iAliasShift); |
|
808 CreateUserGlobalSection(KUserGlobalDataBase, KUserGlobalDataEnd); |
|
809 Mmu::DoInit2(); |
|
810 } |
|
811 |
|
812 #ifndef __MMU_MACHINE_CODED__ |
|
813 void ArmMmu::MapRamPages(TInt aId, SPageInfo::TType aType, TAny* aPtr, TUint32 aOffset, const TPhysAddr* aPageList, TInt aNumPages, TPte aPtePerm) |
|
814 // |
|
815 // Map a list of physical RAM pages into a specified page table with specified PTE permissions. |
|
816 // Update the page information array. |
|
817 // Call this with the system locked. |
|
818 // |
|
819 { |
|
820 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::MapRamPages() id=%d type=%d ptr=%08x off=%08x n=%d perm=%08x", |
|
821 aId, aType, aPtr, aOffset, aNumPages, aPtePerm)); |
|
822 |
|
823 SPageTableInfo& ptinfo=iPtInfo[aId]; |
|
824 ptinfo.iCount+=aNumPages; |
|
825 aOffset>>=KPageShift; |
|
826 TInt ptOffset=aOffset & KPagesInPDEMask; // entry number in page table |
|
827 TPte* pPte=PageTable(aId)+ptOffset; // address of first PTE |
|
828 |
|
829 TLinAddr firstPte = (TLinAddr)pPte; //Will need this to clean page table changes in cache. |
|
830 |
|
831 while(aNumPages--) |
|
832 { |
|
833 TPhysAddr pa = *aPageList++; |
|
834 if(pa==KPhysAddrInvalid) |
|
835 { |
|
836 ++pPte; |
|
837 __NK_ASSERT_DEBUG(aType==SPageInfo::EInvalid); |
|
838 continue; |
|
839 } |
|
840 *pPte++ = pa | aPtePerm; // insert PTE |
|
841 __KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x",pPte[-1],pPte-1)); |
|
842 if (aType!=SPageInfo::EInvalid) |
|
843 { |
|
844 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pa); |
|
845 if(pi) |
|
846 { |
|
847 pi->Set(aType,aPtr,aOffset); |
|
848 __KTRACE_OPT(KMMU,Kern::Printf("I: %d %08x %08x",aType,aPtr,aOffset)); |
|
849 ++aOffset; // increment offset for next page |
|
850 } |
|
851 } |
|
852 } |
|
853 CacheMaintenance::MultiplePtesUpdated(firstPte, (TUint)pPte-firstPte); |
|
854 } |
|
855 |
|
856 void ArmMmu::MapPhysicalPages(TInt aId, SPageInfo::TType aType, TAny* aPtr, TUint32 aOffset, TPhysAddr aPhysAddr, TInt aNumPages, TPte aPtePerm) |
|
857 // |
|
858 // Map consecutive physical pages into a specified page table with specified PTE permissions. |
|
859 // Update the page information array if RAM pages are being mapped. |
|
860 // Call this with the system locked. |
|
861 // |
|
862 { |
|
863 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::MapPhysicalPages() id=%d type=%d ptr=%08x off=%08x phys=%08x n=%d perm=%08x", |
|
864 aId, aType, aPtr, aOffset, aPhysAddr, aNumPages, aPtePerm)); |
|
865 SPageTableInfo& ptinfo=iPtInfo[aId]; |
|
866 ptinfo.iCount+=aNumPages; |
|
867 aOffset>>=KPageShift; |
|
868 TInt ptOffset=aOffset & KPagesInPDEMask; // entry number in page table |
|
869 TPte* pPte=(TPte*)(PageTableLinAddr(aId))+ptOffset; // address of first PTE |
|
870 |
|
871 TLinAddr firstPte = (TLinAddr)pPte; //Will need this to clean page table changes in cache |
|
872 |
|
873 SPageInfo* pi; |
|
874 if(aType==SPageInfo::EInvalid) |
|
875 pi = NULL; |
|
876 else |
|
877 pi = SPageInfo::SafeFromPhysAddr(aPhysAddr); |
|
878 while(aNumPages--) |
|
879 { |
|
880 *pPte++ = aPhysAddr|aPtePerm; // insert PTE |
|
881 aPhysAddr+=KPageSize; |
|
882 __KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x",pPte[-1],pPte-1)); |
|
883 if (pi) |
|
884 { |
|
885 pi->Set(aType,aPtr,aOffset); |
|
886 __KTRACE_OPT(KMMU,Kern::Printf("I: %d %08x %08x",aType,aPtr,aOffset)); |
|
887 ++aOffset; // increment offset for next page |
|
888 ++pi; |
|
889 } |
|
890 } |
|
891 |
|
892 CacheMaintenance::MultiplePtesUpdated(firstPte, (TUint)pPte-firstPte); |
|
893 } |
|
894 |
|
895 void ArmMmu::MapVirtual(TInt aId, TInt aNumPages) |
|
896 // |
|
897 // Called in place of MapRamPages or MapPhysicalPages to update mmu data structures when committing |
|
898 // virtual address space to a chunk. No pages are mapped. |
|
899 // Call this with the system locked. |
|
900 // |
|
901 { |
|
902 SPageTableInfo& ptinfo=iPtInfo[aId]; |
|
903 ptinfo.iCount+=aNumPages; |
|
904 } |
|
905 |
|
906 void ArmMmu::RemapPage(TInt aId, TUint32 aAddr, TPhysAddr aOldAddr, TPhysAddr aNewAddr, TPte aPtePerm, DProcess* aProcess) |
|
907 // |
|
908 // Replace the mapping at address aAddr in page table aId. |
|
909 // Update the page information array for both the old and new pages. |
|
910 // Return physical address of old page if it is now ready to be freed. |
|
911 // Call this with the system locked. |
|
912 // May be called with interrupts disabled, do not enable/disable them. |
|
913 // |
|
914 { |
|
915 TInt ptOffset=(aAddr&KChunkMask)>>KPageShift; // entry number in page table |
|
916 TPte* pPte=PageTable(aId)+ptOffset; // address of PTE |
|
917 TPte pte=*pPte; |
|
918 TInt asid=aProcess ? ((DMemModelProcess*)aProcess)->iOsAsid : |
|
919 (aAddr<KRomLinearBase ? (TInt)UNKNOWN_MAPPING : (TInt)KERNEL_MAPPING ); |
|
920 |
|
921 if (pte & KArmV6PteSmallPage) |
|
922 { |
|
923 __ASSERT_ALWAYS((pte & KPteSmallPageAddrMask) == aOldAddr, Panic(ERemapPageFailed)); |
|
924 SPageInfo* oldpi = SPageInfo::FromPhysAddr(aOldAddr); |
|
925 __ASSERT_DEBUG(oldpi->LockCount()==0,Panic(ERemapPageFailed)); |
|
926 |
|
927 // remap page |
|
928 *pPte = aNewAddr | aPtePerm; // overwrite PTE |
|
929 CacheMaintenance::SinglePteUpdated((TLinAddr)pPte); |
|
930 InvalidateTLBForPage(aAddr,asid); // flush TLB entry |
|
931 |
|
932 // update new pageinfo, clear old |
|
933 SPageInfo* pi = SPageInfo::FromPhysAddr(aNewAddr); |
|
934 pi->Set(oldpi->Type(),oldpi->Owner(),oldpi->Offset()); |
|
935 oldpi->SetUnused(); |
|
936 } |
|
937 else |
|
938 { |
|
939 Panic(ERemapPageFailed); |
|
940 } |
|
941 } |
|
942 |
|
943 void ArmMmu::RemapPageByAsid(TBitMapAllocator* aOsAsids, TLinAddr aLinAddr, TPhysAddr aOldAddr, TPhysAddr aNewAddr, TPte aPtePerm) |
|
944 // |
|
945 // Replace the mapping at address aLinAddr in the relevant page table for all |
|
946 // ASIDs specified in aOsAsids, but only if the currently mapped address is |
|
947 // aOldAddr. |
|
948 // Update the page information array for both the old and new pages. |
|
949 // Call this with the system unlocked. |
|
950 // |
|
951 { |
|
952 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::RemapPageByAsid() linaddr=%08x oldaddr=%08x newaddr=%08x perm=%08x", aLinAddr, aOldAddr, aNewAddr, aPtePerm)); |
|
953 |
|
954 TInt asid = -1; |
|
955 TInt lastAsid = KArmV6NumAsids - 1; |
|
956 TUint32* ptr = aOsAsids->iMap; |
|
957 NKern::LockSystem(); |
|
958 do |
|
959 { |
|
960 TUint32 bits = *ptr++; |
|
961 do |
|
962 { |
|
963 ++asid; |
|
964 if(bits & 0x80000000u) |
|
965 { |
|
966 // mapped in this address space, so update PTE... |
|
967 TPte* pPte = PtePtrFromLinAddr(aLinAddr, asid); |
|
968 TPte pte = *pPte; |
|
969 if ((pte&~KPageMask) == aOldAddr) |
|
970 { |
|
971 *pPte = aNewAddr | aPtePerm; |
|
972 __KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x in asid %d",*pPte,pPte,asid)); |
|
973 CacheMaintenance::SinglePteUpdated((TLinAddr)pPte); |
|
974 InvalidateTLBForPage(aLinAddr,asid); // flush TLB entry |
|
975 } |
|
976 } |
|
977 } |
|
978 while(bits<<=1); |
|
979 NKern::FlashSystem(); |
|
980 asid |= 31; |
|
981 } |
|
982 while(asid<lastAsid); |
|
983 |
|
984 // copy pageinfo attributes and mark old page unused |
|
985 SPageInfo* oldpi = SPageInfo::FromPhysAddr(aOldAddr); |
|
986 SPageInfo::FromPhysAddr(aNewAddr)->Set(oldpi->Type(),oldpi->Owner(),oldpi->Offset()); |
|
987 oldpi->SetUnused(); |
|
988 |
|
989 NKern::UnlockSystem(); |
|
990 } |
|
991 |
|
992 TInt ArmMmu::UnmapPages(TInt aId, TUint32 aAddr, TInt aNumPages, TPhysAddr* aPageList, TBool aSetPagesFree, TInt& aNumPtes, TInt& aNumFree, DProcess* aProcess) |
|
993 // |
|
994 // Unmap a specified area at address aAddr in page table aId. Place physical addresses of unmapped |
|
995 // pages into aPageList, and count of unmapped pages into aNumPtes. |
|
996 // Return number of pages still mapped using this page table. |
|
997 // Call this with the system locked. |
|
998 // On multiple memory model, do not call this method with aSetPagesFree false. Call UnmapUnownedPages instead. |
|
999 { |
|
1000 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::UnmapPages() id=%d addr=%08x n=%d pl=%08x set-free=%d",aId,aAddr,aNumPages,aPageList,aSetPagesFree)); |
|
1001 TInt ptOffset=(aAddr&KChunkMask)>>KPageShift; // entry number in page table |
|
1002 TPte* pPte=PageTable(aId)+ptOffset; // address of first PTE |
|
1003 TInt np=0; |
|
1004 TInt nf=0; |
|
1005 TUint32 ng=0; |
|
1006 TInt asid=aProcess ? ((DMemModelProcess*)aProcess)->iOsAsid : |
|
1007 (aAddr<KRomLinearBase ? (TInt)UNKNOWN_MAPPING : (TInt)KERNEL_MAPPING ); |
|
1008 |
|
1009 |
|
1010 while(aNumPages--) |
|
1011 { |
|
1012 TPte pte=*pPte; // get original PTE |
|
1013 #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED) |
|
1014 remove_and_invalidate_page(pPte, aAddr, asid); |
|
1015 ++pPte; |
|
1016 #else |
|
1017 *pPte++=0; // clear PTE |
|
1018 #endif |
|
1019 |
|
1020 // We count all unmapped pages in np, including demand paged 'old' pages - but we don't pass |
|
1021 // these to PageUnmapped, as the page doesn't become free until it's unmapped from all |
|
1022 // processes |
|
1023 if (pte != KPteNotPresentEntry) |
|
1024 ++np; |
|
1025 |
|
1026 if (pte & KArmV6PteSmallPage) |
|
1027 { |
|
1028 ng |= pte; |
|
1029 #if !defined(__CPU_ARM1136__) || defined(__CPU_ARM1136_ERRATUM_353494_FIXED) |
|
1030 // Remove_and_invalidate_page will sort out cache and TLB. |
|
1031 // When __CPU_ARM1136_ERRATUM_353494_FIXED, we have to do it here. |
|
1032 CacheMaintenance::SinglePteUpdated((TLinAddr)(pPte-1)); |
|
1033 if (asid >= 0) //otherwise, KUnmapPagesTLBFlushDeferred will be returned. |
|
1034 InvalidateTLBForPage(aAddr,asid); // flush any corresponding TLB entry |
|
1035 #endif |
|
1036 TPhysAddr pa=pte & KPteSmallPageAddrMask; // physical address of unmapped page |
|
1037 if (aSetPagesFree) |
|
1038 { |
|
1039 SPageInfo* pi = SPageInfo::FromPhysAddr(pa); |
|
1040 if(iRamCache->PageUnmapped(pi)) |
|
1041 { |
|
1042 pi->SetUnused(); // mark page as unused |
|
1043 if (pi->LockCount()==0) |
|
1044 { |
|
1045 *aPageList++=pa; // store in page list |
|
1046 ++nf; // count free pages |
|
1047 } |
|
1048 } |
|
1049 } |
|
1050 else |
|
1051 *aPageList++=pa; // store in page list |
|
1052 } |
|
1053 aAddr+=KPageSize; |
|
1054 } |
|
1055 |
|
1056 aNumPtes=np; |
|
1057 aNumFree=nf; |
|
1058 SPageTableInfo& ptinfo=iPtInfo[aId]; |
|
1059 TInt r=(ptinfo.iCount-=np); |
|
1060 if (asid<0) |
|
1061 r|=KUnmapPagesTLBFlushDeferred; |
|
1062 |
|
1063 |
|
1064 #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED) |
|
1065 __FlushBtb(); |
|
1066 #endif |
|
1067 |
|
1068 __KTRACE_OPT(KMMU,Kern::Printf("Unmapped %d; Freed: %d; Return %08x",np,nf,r)); |
|
1069 return r; // return number of pages remaining in this page table |
|
1070 } |
|
1071 |
|
1072 TInt ArmMmu::UnmapVirtual(TInt aId, TUint32 aAddr, TInt aNumPages, TPhysAddr* aPageList, TBool aSetPagesFree, TInt& aNumPtes, TInt& aNumFree, DProcess* aProcess) |
|
1073 // |
|
1074 // Unmap a specified area at address aAddr in page table aId. Place physical addresses of unmapped |
|
1075 // pages into aPageList, and count of unmapped pages into aNumPtes. |
|
1076 // Adjust the page table reference count as if aNumPages pages were unmapped. |
|
1077 // Return number of pages still mapped using this page table. |
|
1078 // Call this with the system locked. |
|
1079 // On multiple memory model, do not call this method with aSetPagesFree false. Call UnmapUnownedVirtual instead. |
|
1080 // |
|
1081 { |
|
1082 SPageTableInfo& ptinfo=iPtInfo[aId]; |
|
1083 TInt newCount = ptinfo.iCount - aNumPages; |
|
1084 UnmapPages(aId, aAddr, aNumPages, aPageList, aSetPagesFree, aNumPtes, aNumFree, aProcess); |
|
1085 ptinfo.iCount = newCount; |
|
1086 aNumPtes = aNumPages; |
|
1087 return newCount; |
|
1088 } |
|
1089 |
|
1090 TInt ArmMmu::UnmapUnownedPages(TInt aId, TUint32 aAddr, TInt aNumPages, |
|
1091 TPhysAddr* aPageList, TLinAddr* aLAPageList,TInt& aNumPtes, TInt& aNumFree, DProcess* aProcess) |
|
1092 /* |
|
1093 * Unmaps specified area at address aAddr in page table aId. |
|
1094 * Places physical addresses of not-demaned-paged unmapped pages into aPageList. |
|
1095 * Corresponding linear addresses are placed into aLAPageList. |
|
1096 * 'Old' demand-paged pages (holds invalid PE entry with physucal address) are neither unmapped nor |
|
1097 * encountered in aPageList but are still counted in aNumPtes. |
|
1098 * |
|
1099 * This method should be called to decommit physical memory not owned by the chunk. As we do not know |
|
1100 * the origin of such memory, PtInfo could be invalid (or does't exist) so cache maintenance may not be |
|
1101 * able to obtain mapping colour. For that reason, this also returns former linear address of each page |
|
1102 * in aPageList. |
|
1103 * |
|
1104 * @pre All pages are mapped within a single page table identified by aId. |
|
1105 * @pre On entry, system locked is held and is not released during the execution. |
|
1106 * |
|
1107 * @arg aId Id of the page table that maps tha pages. |
|
1108 * @arg aAddr Linear address of the start of the area. |
|
1109 * @arg aNumPages The number of pages to unmap. |
|
1110 * @arg aProcess The owning process of the mamory area to unmap. |
|
1111 * @arg aPageList On exit, holds the list of unmapped pages. |
|
1112 * @arg aLAPageList On exit, holds the list of linear addresses of unmapped pages. |
|
1113 * @arg aNumFree On exit, holds the number of pages in aPageList. |
|
1114 * @arg aNumPtes On exit, holds the number of unmapped pages. This includes demand-paged 'old' |
|
1115 * pages (with invalid page table entry still holding the address of physical page.) |
|
1116 * |
|
1117 * @return The number of pages still mapped using this page table. It is orred by |
|
1118 * KUnmapPagesTLBFlushDeferred if TLB flush is not executed - which requires |
|
1119 * the caller to do global TLB flush. |
|
1120 */ |
|
1121 { |
|
1122 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::UnmapUnownedPages() id=%d addr=%08x n=%d pl=%08x",aId,aAddr,aNumPages,aPageList)); |
|
1123 TInt ptOffset=(aAddr&KChunkMask)>>KPageShift; // entry number in page table |
|
1124 TPte* pPte=PageTable(aId)+ptOffset; // address of first PTE |
|
1125 TInt np=0; |
|
1126 TInt nf=0; |
|
1127 TUint32 ng=0; |
|
1128 TInt asid=aProcess ? ((DMemModelProcess*)aProcess)->iOsAsid : |
|
1129 (aAddr<KRomLinearBase ? (TInt)UNKNOWN_MAPPING : (TInt)KERNEL_MAPPING ); |
|
1130 |
|
1131 while(aNumPages--) |
|
1132 { |
|
1133 TPte pte=*pPte; // get original PTE |
|
1134 #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED) |
|
1135 remove_and_invalidate_page(pPte, aAddr, asid); |
|
1136 ++pPte; |
|
1137 #else |
|
1138 *pPte++=0; // clear PTE |
|
1139 #endif |
|
1140 |
|
1141 // We count all unmapped pages in np, including demand paged 'old' pages - but we don't pass |
|
1142 // these to PageUnmapped, as the page doesn't become free until it's unmapped from all |
|
1143 // processes |
|
1144 if (pte != KPteNotPresentEntry) |
|
1145 ++np; |
|
1146 |
|
1147 if (pte & KArmV6PteSmallPage) |
|
1148 { |
|
1149 ng |= pte; |
|
1150 #if !defined(__CPU_ARM1136__) || defined(__CPU_ARM1136_ERRATUM_353494_FIXED) |
|
1151 // Remove_and_invalidate_page will sort out cache and TLB. |
|
1152 // When __CPU_ARM1136_ERRATUM_353494_FIXED, we have to do it here. |
|
1153 CacheMaintenance::SinglePteUpdated((TLinAddr)(pPte-1)); |
|
1154 if (asid >= 0) //otherwise, KUnmapPagesTLBFlushDeferred will be returned. |
|
1155 InvalidateTLBForPage(aAddr,asid); // flush any corresponding TLB entry |
|
1156 #endif |
|
1157 TPhysAddr pa=pte & KPteSmallPageAddrMask; // physical address of unmapped page |
|
1158 ++nf; |
|
1159 *aPageList++=pa; // store physical aaddress in page list |
|
1160 *aLAPageList++=aAddr; // store linear address in page list |
|
1161 } |
|
1162 aAddr+=KPageSize; |
|
1163 } |
|
1164 |
|
1165 aNumPtes=np; |
|
1166 aNumFree=nf; |
|
1167 SPageTableInfo& ptinfo=iPtInfo[aId]; |
|
1168 TInt r=(ptinfo.iCount-=np); |
|
1169 if (asid<0) |
|
1170 r|=KUnmapPagesTLBFlushDeferred; |
|
1171 |
|
1172 |
|
1173 #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED) |
|
1174 __FlushBtb(); |
|
1175 #endif |
|
1176 |
|
1177 __KTRACE_OPT(KMMU,Kern::Printf("Unmapped %d; Freed: %d; Return %08x",np,nf,r)); |
|
1178 return r; // return number of pages remaining in this page table |
|
1179 } |
|
1180 |
|
1181 |
|
1182 TInt ArmMmu::UnmapUnownedVirtual(TInt aId, TUint32 aAddr, TInt aNumPages, |
|
1183 TPhysAddr* aPageList, TLinAddr* aLAPageList,TInt& aNumPtes, TInt& aNumFree, DProcess* aProcess) |
|
1184 // |
|
1185 // Unmap a specified area at address aAddr in page table aId. Place physical addresses of unmapped |
|
1186 // pages into aPageList, and count of unmapped pages into aNumPtes. |
|
1187 // Adjust the page table reference count as if aNumPages pages were unmapped. |
|
1188 // Return number of pages still mapped using this page table. |
|
1189 // Call this with the system locked. |
|
1190 // |
|
1191 { |
|
1192 SPageTableInfo& ptinfo=iPtInfo[aId]; |
|
1193 TInt newCount = ptinfo.iCount - aNumPages; |
|
1194 UnmapUnownedPages(aId, aAddr, aNumPages, aPageList, aLAPageList, aNumPtes, aNumFree, aProcess); |
|
1195 ptinfo.iCount = newCount; |
|
1196 aNumPtes = aNumPages; |
|
1197 return newCount; |
|
1198 } |
|
1199 |
|
1200 void ArmMmu::DoAssignPageTable(TInt aId, TLinAddr aAddr, TPde aPdePerm, const TAny* aOsAsids) |
|
1201 // |
|
1202 // Assign an allocated page table to map a given linear address with specified permissions. |
|
1203 // This should be called with the system unlocked and the MMU mutex held. |
|
1204 // |
|
1205 { |
|
1206 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::DoAssignPageTable %d to %08x perm %08x asid %08x",aId,aAddr,aPdePerm,aOsAsids)); |
|
1207 TLinAddr ptLin=PageTableLinAddr(aId); |
|
1208 TPhysAddr ptPhys=LinearToPhysical(ptLin,0); |
|
1209 TInt pdeIndex=TInt(aAddr>>KChunkShift); |
|
1210 TBool gpd=(pdeIndex>=(iLocalPdSize>>2)); |
|
1211 TInt os_asid=(TInt)aOsAsids; |
|
1212 if (TUint32(os_asid)<TUint32(iNumOsAsids)) |
|
1213 { |
|
1214 // single OS ASID |
|
1215 TPde* pageDir=PageDirectory(os_asid); |
|
1216 NKern::LockSystem(); |
|
1217 pageDir[pdeIndex]=ptPhys|aPdePerm; // will blow up here if address is in global region aOsAsid doesn't have a global PD |
|
1218 CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex)); |
|
1219 NKern::UnlockSystem(); |
|
1220 |
|
1221 __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x",ptPhys|aPdePerm,pageDir+pdeIndex)); |
|
1222 } |
|
1223 else if (os_asid==-1 && gpd) |
|
1224 { |
|
1225 // all OS ASIDs, address in global region |
|
1226 TInt num_os_asids=iNumGlobalPageDirs; |
|
1227 const TBitMapAllocator& b=*(const TBitMapAllocator*)iOsAsidAllocator; |
|
1228 for (os_asid=0; num_os_asids; ++os_asid) |
|
1229 { |
|
1230 if (!b.NotAllocated(os_asid,1) && (iAsidInfo[os_asid]&1)) |
|
1231 { |
|
1232 // this OS ASID exists and has a global page directory |
|
1233 TPde* pageDir=PageDirectory(os_asid); |
|
1234 NKern::LockSystem(); |
|
1235 pageDir[pdeIndex]=ptPhys|aPdePerm; |
|
1236 CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex)); |
|
1237 NKern::UnlockSystem(); |
|
1238 |
|
1239 __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x",ptPhys|aPdePerm,pageDir+pdeIndex)); |
|
1240 --num_os_asids; |
|
1241 } |
|
1242 } |
|
1243 } |
|
1244 else |
|
1245 { |
|
1246 // selection of OS ASIDs or all OS ASIDs |
|
1247 const TBitMapAllocator* pB=(const TBitMapAllocator*)aOsAsids; |
|
1248 if (os_asid==-1) |
|
1249 pB=iOsAsidAllocator; // 0's in positions which exist |
|
1250 TInt num_os_asids=pB->iSize-pB->iAvail; |
|
1251 for (os_asid=0; num_os_asids; ++os_asid) |
|
1252 { |
|
1253 if (pB->NotAllocated(os_asid,1)) |
|
1254 continue; // os_asid is not needed |
|
1255 TPde* pageDir=PageDirectory(os_asid); |
|
1256 NKern::LockSystem(); |
|
1257 pageDir[pdeIndex]=ptPhys|aPdePerm; |
|
1258 CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex)); |
|
1259 NKern::UnlockSystem(); |
|
1260 |
|
1261 __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x",ptPhys|aPdePerm,pageDir+pdeIndex)); |
|
1262 --num_os_asids; |
|
1263 } |
|
1264 } |
|
1265 } |
|
1266 |
|
1267 void ArmMmu::RemapPageTableSingle(TPhysAddr aOld, TPhysAddr aNew, TLinAddr aAddr, TInt aOsAsid) |
|
1268 // |
|
1269 // Replace a single page table mapping the specified linear address. |
|
1270 // This should be called with the system locked and the MMU mutex held. |
|
1271 // |
|
1272 { |
|
1273 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::RemapPageTableSingle %08x to %08x at %08x asid %d",aOld,aNew,aAddr,aOsAsid)); |
|
1274 TPde* pageDir=PageDirectory(aOsAsid); |
|
1275 TInt pdeIndex=TInt(aAddr>>KChunkShift); |
|
1276 TPde pde=pageDir[pdeIndex]; |
|
1277 __ASSERT_ALWAYS((pde & KPdePageTableAddrMask) == aOld, Panic(ERemapPageTableFailed)); |
|
1278 TPde newPde=aNew|(pde&~KPdePageTableAddrMask); |
|
1279 pageDir[pdeIndex]=newPde; // will blow up here if address is in global region aOsAsid doesn't have a global PD |
|
1280 CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex)); |
|
1281 |
|
1282 __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x",newPde,pageDir+pdeIndex)); |
|
1283 } |
|
1284 |
|
1285 void ArmMmu::RemapPageTableGlobal(TPhysAddr aOld, TPhysAddr aNew, TLinAddr aAddr) |
|
1286 // |
|
1287 // Replace a global page table mapping the specified linear address. |
|
1288 // This should be called with the system locked and the MMU mutex held. |
|
1289 // |
|
1290 { |
|
1291 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::RemapPageTableGlobal %08x to %08x at %08x",aOld,aNew,aAddr)); |
|
1292 TInt pdeIndex=TInt(aAddr>>KChunkShift); |
|
1293 TInt num_os_asids=iNumGlobalPageDirs; |
|
1294 const TBitMapAllocator& b=*(const TBitMapAllocator*)iOsAsidAllocator; |
|
1295 for (TInt os_asid=0; num_os_asids; ++os_asid) |
|
1296 { |
|
1297 if (!b.NotAllocated(os_asid,1) && (iAsidInfo[os_asid]&1)) |
|
1298 { |
|
1299 // this OS ASID exists and has a global page directory |
|
1300 TPde* pageDir=PageDirectory(os_asid); |
|
1301 TPde pde=pageDir[pdeIndex]; |
|
1302 if ((pde & KPdePageTableAddrMask) == aOld) |
|
1303 { |
|
1304 TPde newPde=aNew|(pde&~KPdePageTableAddrMask); |
|
1305 pageDir[pdeIndex]=newPde; |
|
1306 CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex)); |
|
1307 |
|
1308 __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x",newPde,pageDir+pdeIndex)); |
|
1309 } |
|
1310 --num_os_asids; |
|
1311 } |
|
1312 if ((os_asid&31)==31) |
|
1313 NKern::FlashSystem(); |
|
1314 } |
|
1315 } |
|
1316 |
|
1317 void ArmMmu::RemapPageTableMultiple(TPhysAddr aOld, TPhysAddr aNew, TLinAddr aAddr, const TAny* aOsAsids) |
|
1318 // |
|
1319 // Replace multiple page table mappings of the specified linear address. |
|
1320 // This should be called with the system locked and the MMU mutex held. |
|
1321 // |
|
1322 { |
|
1323 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::RemapPageTableMultiple %08x to %08x at %08x asids %08x",aOld,aNew,aAddr,aOsAsids)); |
|
1324 TInt pdeIndex=TInt(aAddr>>KChunkShift); |
|
1325 const TBitMapAllocator* pB=(const TBitMapAllocator*)aOsAsids; |
|
1326 if ((TInt)aOsAsids==-1) |
|
1327 pB=iOsAsidAllocator; // 0's in positions which exist |
|
1328 |
|
1329 TInt asid = -1; |
|
1330 TInt lastAsid = KArmV6NumAsids - 1; |
|
1331 const TUint32* ptr = pB->iMap; |
|
1332 do |
|
1333 { |
|
1334 TUint32 bits = *ptr++; |
|
1335 do |
|
1336 { |
|
1337 ++asid; |
|
1338 if ((bits & 0x80000000u) == 0) |
|
1339 { |
|
1340 // mapped in this address space - bitmap is inverted |
|
1341 TPde* pageDir=PageDirectory(asid); |
|
1342 TPde pde=pageDir[pdeIndex]; |
|
1343 if ((pde & KPdePageTableAddrMask) == aOld) |
|
1344 { |
|
1345 TPde newPde=aNew|(pde&~KPdePageTableAddrMask); |
|
1346 pageDir[pdeIndex]=newPde; |
|
1347 CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex)); |
|
1348 |
|
1349 __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x",newPde,pageDir+pdeIndex)); |
|
1350 } |
|
1351 } |
|
1352 } |
|
1353 while(bits<<=1); |
|
1354 NKern::FlashSystem(); |
|
1355 asid |= 31; |
|
1356 } |
|
1357 while(asid<lastAsid); |
|
1358 } |
|
1359 |
|
1360 void ArmMmu::RemapPageTableAliases(TPhysAddr aOld, TPhysAddr aNew) |
|
1361 // |
|
1362 // Replace aliases of the specified page table. |
|
1363 // This should be called with the system locked and the MMU mutex held. |
|
1364 // |
|
1365 { |
|
1366 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::RemapPageTableAliases %08x to %08x",aOld,aNew)); |
|
1367 SDblQue checkedList; |
|
1368 SDblQueLink* next; |
|
1369 |
|
1370 while(!iAliasList.IsEmpty()) |
|
1371 { |
|
1372 next = iAliasList.First()->Deque(); |
|
1373 checkedList.Add(next); |
|
1374 DMemModelThread* thread = (DMemModelThread*)((TInt)next-_FOFF(DMemModelThread,iAliasLink)); |
|
1375 TPde pde = thread->iAliasPde; |
|
1376 if ((pde & ~KPageMask) == aOld) |
|
1377 { |
|
1378 // a page table in this page is being aliased by the thread, so update it... |
|
1379 thread->iAliasPde = (pde & KPageMask) | aNew; |
|
1380 } |
|
1381 NKern::FlashSystem(); |
|
1382 } |
|
1383 |
|
1384 // copy checkedList back to iAliasList |
|
1385 iAliasList.MoveFrom(&checkedList); |
|
1386 } |
|
1387 |
|
1388 void ArmMmu::DoUnassignPageTable(TLinAddr aAddr, const TAny* aOsAsids) |
|
1389 // |
|
1390 // Unassign a now-empty page table currently mapping the specified linear address. |
|
1391 // We assume that TLB and/or cache flushing has been done when any RAM pages were unmapped. |
|
1392 // This should be called with the system unlocked and the MMU mutex held. |
|
1393 // |
|
1394 { |
|
1395 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::DoUnassignPageTable at %08x a=%08x",aAddr,aOsAsids)); |
|
1396 TInt pdeIndex=TInt(aAddr>>KChunkShift); |
|
1397 TBool gpd=(pdeIndex>=(iLocalPdSize>>2)); |
|
1398 TInt os_asid=(TInt)aOsAsids; |
|
1399 TUint pde=0; |
|
1400 |
|
1401 SDblQue checkedList; |
|
1402 SDblQueLink* next; |
|
1403 |
|
1404 if (TUint32(os_asid)<TUint32(iNumOsAsids)) |
|
1405 { |
|
1406 // single OS ASID |
|
1407 TPde* pageDir=PageDirectory(os_asid); |
|
1408 NKern::LockSystem(); |
|
1409 pde = pageDir[pdeIndex]; |
|
1410 pageDir[pdeIndex]=0; |
|
1411 CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex)); |
|
1412 __KTRACE_OPT(KMMU,Kern::Printf("Clearing PDE at %08x",pageDir+pdeIndex)); |
|
1413 |
|
1414 // remove any aliases of the page table... |
|
1415 TUint ptId = pde>>KPageTableShift; |
|
1416 while(!iAliasList.IsEmpty()) |
|
1417 { |
|
1418 next = iAliasList.First()->Deque(); |
|
1419 checkedList.Add(next); |
|
1420 DMemModelThread* thread = (DMemModelThread*)((TInt)next-_FOFF(DMemModelThread,iAliasLink)); |
|
1421 if(thread->iAliasOsAsid==os_asid && (thread->iAliasPde>>KPageTableShift)==ptId) |
|
1422 { |
|
1423 // the page table is being aliased by the thread, so remove it... |
|
1424 thread->iAliasPde = 0; |
|
1425 } |
|
1426 NKern::FlashSystem(); |
|
1427 } |
|
1428 } |
|
1429 else if (os_asid==-1 && gpd) |
|
1430 { |
|
1431 // all OS ASIDs, address in global region |
|
1432 TInt num_os_asids=iNumGlobalPageDirs; |
|
1433 const TBitMapAllocator& b=*(const TBitMapAllocator*)iOsAsidAllocator; |
|
1434 for (os_asid=0; num_os_asids; ++os_asid) |
|
1435 { |
|
1436 if (!b.NotAllocated(os_asid,1) && (iAsidInfo[os_asid]&1)) |
|
1437 { |
|
1438 // this OS ASID exists and has a global page directory |
|
1439 TPde* pageDir=PageDirectory(os_asid); |
|
1440 NKern::LockSystem(); |
|
1441 pageDir[pdeIndex]=0; |
|
1442 CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex)); |
|
1443 NKern::UnlockSystem(); |
|
1444 |
|
1445 __KTRACE_OPT(KMMU,Kern::Printf("Clearing PDE at %08x",pageDir+pdeIndex)); |
|
1446 --num_os_asids; |
|
1447 } |
|
1448 } |
|
1449 // we don't need to look for aliases in this case, because these aren't |
|
1450 // created for page tables in the global region. |
|
1451 NKern::LockSystem(); |
|
1452 } |
|
1453 else |
|
1454 { |
|
1455 // selection of OS ASIDs or all OS ASIDs |
|
1456 const TBitMapAllocator* pB=(const TBitMapAllocator*)aOsAsids; |
|
1457 if (os_asid==-1) |
|
1458 pB=iOsAsidAllocator; // 0's in positions which exist |
|
1459 TInt num_os_asids=pB->iSize-pB->iAvail; |
|
1460 for (os_asid=0; num_os_asids; ++os_asid) |
|
1461 { |
|
1462 if (pB->NotAllocated(os_asid,1)) |
|
1463 continue; // os_asid is not needed |
|
1464 TPde* pageDir=PageDirectory(os_asid); |
|
1465 NKern::LockSystem(); |
|
1466 pde = pageDir[pdeIndex]; |
|
1467 pageDir[pdeIndex]=0; |
|
1468 CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex)); |
|
1469 NKern::UnlockSystem(); |
|
1470 |
|
1471 __KTRACE_OPT(KMMU,Kern::Printf("Clearing PDE at %08x",pageDir+pdeIndex)); |
|
1472 --num_os_asids; |
|
1473 } |
|
1474 |
|
1475 // remove any aliases of the page table... |
|
1476 TUint ptId = pde>>KPageTableShift; |
|
1477 NKern::LockSystem(); |
|
1478 while(!iAliasList.IsEmpty()) |
|
1479 { |
|
1480 next = iAliasList.First()->Deque(); |
|
1481 checkedList.Add(next); |
|
1482 DMemModelThread* thread = (DMemModelThread*)((TInt)next-_FOFF(DMemModelThread,iAliasLink)); |
|
1483 if((thread->iAliasPde>>KPageTableShift)==ptId && !pB->NotAllocated(thread->iAliasOsAsid,1)) |
|
1484 { |
|
1485 // the page table is being aliased by the thread, so remove it... |
|
1486 thread->iAliasPde = 0; |
|
1487 } |
|
1488 NKern::FlashSystem(); |
|
1489 } |
|
1490 } |
|
1491 |
|
1492 // copy checkedList back to iAliasList |
|
1493 iAliasList.MoveFrom(&checkedList); |
|
1494 |
|
1495 NKern::UnlockSystem(); |
|
1496 } |
|
1497 #endif |
|
1498 |
|
1499 // Initialise page table at physical address aXptPhys to be used as page table aXptId |
|
1500 // to expand the virtual address range used for mapping page tables. Map the page table |
|
1501 // at aPhysAddr as page table aId using the expanded range. |
|
1502 // Assign aXptPhys to kernel's Page Directory. |
|
1503 // Called with system unlocked and MMU mutex held. |
|
1504 void ArmMmu::BootstrapPageTable(TInt aXptId, TPhysAddr aXptPhys, TInt aId, TPhysAddr aPhysAddr) |
|
1505 { |
|
1506 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::BootstrapPageTable xptid=%04x, xptphys=%08x, id=%04x, phys=%08x", |
|
1507 aXptId, aXptPhys, aId, aPhysAddr)); |
|
1508 |
|
1509 // put in a temporary mapping for aXptPhys |
|
1510 // make it noncacheable |
|
1511 TPhysAddr pa=aXptPhys&~KPageMask; |
|
1512 *iTempPte = pa | SP_PTE(KArmV6PermRWNO, KNormalUncachedAttr, 0, 1); |
|
1513 CacheMaintenance::SinglePteUpdated((TLinAddr)iTempPte); |
|
1514 |
|
1515 // clear XPT |
|
1516 TPte* xpt=(TPte*)(iTempAddr+(aXptPhys&KPageMask)); |
|
1517 memclr(xpt, KPageTableSize); |
|
1518 |
|
1519 // must in fact have aXptPhys and aPhysAddr in same physical page |
|
1520 __ASSERT_ALWAYS( TUint32(aXptPhys^aPhysAddr)<TUint32(KPageSize), MM::Panic(MM::EBootstrapPageTableBadAddr)); |
|
1521 |
|
1522 // so only need one mapping |
|
1523 xpt[(aXptId>>KPtClusterShift)&KPagesInPDEMask] = pa | KPtPtePerm; |
|
1524 CacheMaintenance::MultiplePtesUpdated((TLinAddr)xpt, KPageTableSize); |
|
1525 |
|
1526 // remove temporary mapping |
|
1527 *iTempPte=0; |
|
1528 CacheMaintenance::SinglePteUpdated((TLinAddr)iTempPte); |
|
1529 |
|
1530 InvalidateTLBForPage(iTempAddr, KERNEL_MAPPING); |
|
1531 |
|
1532 // initialise PtInfo... |
|
1533 TLinAddr xptAddr = PageTableLinAddr(aXptId); |
|
1534 iPtInfo[aXptId].SetGlobal(xptAddr>>KChunkShift); |
|
1535 |
|
1536 // map xpt... |
|
1537 TInt pdeIndex=TInt(xptAddr>>KChunkShift); |
|
1538 TPde* pageDir=PageDirectory(0); |
|
1539 NKern::LockSystem(); |
|
1540 pageDir[pdeIndex]=aXptPhys|KPtPdePerm; |
|
1541 CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex)); |
|
1542 |
|
1543 NKern::UnlockSystem(); |
|
1544 } |
|
1545 |
|
1546 // Edit the self-mapping entry in page table aId, mapped at aTempMap, to |
|
1547 // change the physical address from aOld to aNew. Used when moving page |
|
1548 // tables which were created by BootstrapPageTable. |
|
1549 // Called with system locked and MMU mutex held. |
|
1550 void ArmMmu::FixupXPageTable(TInt aId, TLinAddr aTempMap, TPhysAddr aOld, TPhysAddr aNew) |
|
1551 { |
|
1552 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::FixupXPageTable id=%04x, tempmap=%08x, old=%08x, new=%08x", |
|
1553 aId, aTempMap, aOld, aNew)); |
|
1554 |
|
1555 // find correct page table inside the page |
|
1556 TPte* xpt=(TPte*)(aTempMap + ((aId & KPtClusterMask) << KPageTableShift)); |
|
1557 // find the pte in that page table |
|
1558 xpt += (aId>>KPtClusterShift)&KPagesInPDEMask; |
|
1559 |
|
1560 // switch the mapping |
|
1561 __ASSERT_ALWAYS((*xpt&~KPageMask)==aOld, Panic(EFixupXPTFailed)); |
|
1562 *xpt = aNew | KPtPtePerm; |
|
1563 // mapped with MapTemp, and thus not mapped as a PTE - have to do real cache clean. |
|
1564 CacheMaintenance::SinglePteUpdated((TLinAddr)xpt); |
|
1565 } |
|
1566 |
|
1567 TInt ArmMmu::NewPageDirectory(TInt aOsAsid, TBool aSeparateGlobal, TPhysAddr& aPhysAddr, TInt& aNumPages) |
|
1568 { |
|
1569 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::NewPageDirectory(%d,%d)",aOsAsid,aSeparateGlobal)); |
|
1570 TInt r=0; |
|
1571 TInt nlocal=iLocalPdSize>>KPageShift; |
|
1572 aNumPages=aSeparateGlobal ? KPageDirectorySize/KPageSize : nlocal; |
|
1573 __KTRACE_OPT(KMMU,Kern::Printf("nlocal=%d, aNumPages=%d",nlocal,aNumPages)); |
|
1574 if (aNumPages>1) |
|
1575 { |
|
1576 TInt align=aSeparateGlobal ? KPageDirectoryShift : KPageDirectoryShift-1; |
|
1577 r=AllocContiguousRam(aNumPages<<KPageShift, aPhysAddr, EPageFixed, align); |
|
1578 } |
|
1579 else |
|
1580 r=AllocRamPages(&aPhysAddr,1, EPageFixed); |
|
1581 __KTRACE_OPT(KMMU,Kern::Printf("r=%d, phys=%08x",r,aPhysAddr)); |
|
1582 if (r!=KErrNone) |
|
1583 return r; |
|
1584 #ifdef BTRACE_KERNEL_MEMORY |
|
1585 BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscAlloc, aNumPages<<KPageShift); |
|
1586 Epoc::KernelMiscPages += aNumPages; |
|
1587 #endif |
|
1588 SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr); |
|
1589 NKern::LockSystem(); |
|
1590 TInt i; |
|
1591 for (i=0; i<aNumPages; ++i) |
|
1592 pi[i].SetPageDir(aOsAsid,i); |
|
1593 NKern::UnlockSystem(); |
|
1594 return KErrNone; |
|
1595 } |
|
1596 |
|
1597 inline void CopyPdes(TPde* aDest, const TPde* aSrc, TLinAddr aBase, TLinAddr aEnd) |
|
1598 { |
|
1599 memcpy(aDest+(aBase>>KChunkShift), aSrc+(aBase>>KChunkShift), ((aEnd-aBase)>>KChunkShift)*sizeof(TPde)); |
|
1600 CacheMaintenance::MultiplePtesUpdated((TLinAddr)(aDest+(aBase>>KChunkShift)), ((aEnd-aBase)>>KChunkShift)*sizeof(TPde)); |
|
1601 } |
|
1602 |
|
1603 inline void ZeroPdes(TPde* aDest, TLinAddr aBase, TLinAddr aEnd) |
|
1604 { |
|
1605 memclr(aDest+(aBase>>KChunkShift), ((aEnd-aBase)>>KChunkShift)*sizeof(TPde)); |
|
1606 CacheMaintenance::MultiplePtesUpdated((TLinAddr)(aDest+(aBase>>KChunkShift)), ((aEnd-aBase)>>KChunkShift)*sizeof(TPde)); |
|
1607 } |
|
1608 |
|
1609 void ArmMmu::InitPageDirectory(TInt aOsAsid, TBool aSeparateGlobal) |
|
1610 { |
|
1611 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::InitPageDirectory(%d,%d)",aOsAsid,aSeparateGlobal)); |
|
1612 TPde* newpd=PageDirectory(aOsAsid); // new page directory |
|
1613 memclr(newpd, iLocalPdSize); // clear local page directory |
|
1614 CacheMaintenance::MultiplePtesUpdated((TLinAddr)newpd, iLocalPdSize); |
|
1615 if (aSeparateGlobal) |
|
1616 { |
|
1617 const TPde* kpd=(const TPde*)KPageDirectoryBase; // kernel page directory |
|
1618 if (iLocalPdSize==KPageSize) |
|
1619 ZeroPdes(newpd, KUserSharedDataEnd1GB, KUserSharedDataEnd2GB); |
|
1620 ZeroPdes(newpd, KRamDriveStartAddress, KRamDriveEndAddress); // don't copy RAM drive |
|
1621 CopyPdes(newpd, kpd, KRomLinearBase, KUserGlobalDataEnd); // copy ROM + user global |
|
1622 CopyPdes(newpd, kpd, KRamDriveEndAddress, 0x00000000); // copy kernel mappings |
|
1623 } |
|
1624 } |
|
1625 |
|
1626 void ArmMmu::ClearPageTable(TInt aId, TInt aFirstIndex) |
|
1627 { |
|
1628 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::ClearPageTable(%d,%d)",aId,aFirstIndex)); |
|
1629 TPte* pte=PageTable(aId); |
|
1630 memclr(pte+aFirstIndex, KPageTableSize-aFirstIndex*sizeof(TPte)); |
|
1631 CacheMaintenance::MultiplePtesUpdated((TLinAddr)(pte+aFirstIndex), KPageTableSize-aFirstIndex*sizeof(TPte)); |
|
1632 } |
|
1633 |
|
1634 void ArmMmu::ApplyTopLevelPermissions(TLinAddr aAddr, TInt aOsAsid, TInt aNumPdes, TPde aPdePerm) |
|
1635 { |
|
1636 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::ApplyTopLevelPermissions %04x:%08x->%08x count %d", |
|
1637 aOsAsid, aAddr, aPdePerm, aNumPdes)); |
|
1638 TInt ix=aAddr>>KChunkShift; |
|
1639 TPde* pPde=PageDirectory(aOsAsid)+ix; |
|
1640 TLinAddr firstPde = (TLinAddr)pPde; //Will need this to clean page table memory region in cache |
|
1641 |
|
1642 TPde* pPdeEnd=pPde+aNumPdes; |
|
1643 NKern::LockSystem(); |
|
1644 for (; pPde<pPdeEnd; ++pPde) |
|
1645 { |
|
1646 TPde pde=*pPde; |
|
1647 if (pde) |
|
1648 *pPde = (pde&KPdePageTableAddrMask)|aPdePerm; |
|
1649 } |
|
1650 CacheMaintenance::MultiplePtesUpdated(firstPde, aNumPdes*sizeof(TPde)); |
|
1651 FlushTLBs(); |
|
1652 NKern::UnlockSystem(); |
|
1653 } |
|
1654 |
|
1655 void ArmMmu::ApplyPagePermissions(TInt aId, TInt aPageOffset, TInt aNumPages, TPte aPtePerm) |
|
1656 { |
|
1657 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::ApplyPagePermissions %04x:%03x+%03x perm %08x", |
|
1658 aId, aPageOffset, aNumPages, aPtePerm)); |
|
1659 TPte* pPte=PageTable(aId)+aPageOffset; |
|
1660 TLinAddr firstPte = (TLinAddr)pPte; //Will need this to clean page table memory region in cache |
|
1661 |
|
1662 TPde* pPteEnd=pPte+aNumPages; |
|
1663 NKern::LockSystem(); |
|
1664 for (; pPte<pPteEnd; ++pPte) |
|
1665 { |
|
1666 TPte pte=*pPte; |
|
1667 if (pte) |
|
1668 *pPte = (pte&KPteSmallPageAddrMask)|aPtePerm; |
|
1669 } |
|
1670 CacheMaintenance::MultiplePtesUpdated(firstPte, aNumPages*sizeof(TPte)); |
|
1671 FlushTLBs(); |
|
1672 NKern::UnlockSystem(); |
|
1673 } |
|
1674 |
|
1675 void ArmMmu::ClearRamDrive(TLinAddr aStart) |
|
1676 { |
|
1677 // clear the page directory entries corresponding to the RAM drive |
|
1678 TPde* kpd=(TPde*)KPageDirectoryBase; // kernel page directory |
|
1679 ZeroPdes(kpd, aStart, KRamDriveEndAddress); |
|
1680 } |
|
1681 |
|
1682 TPde ArmMmu::PdePermissions(TChunkType aChunkType, TBool aRO) |
|
1683 { |
|
1684 // if (aChunkType==EUserData && aRO) |
|
1685 // return KPdePtePresent|KPdePteUser; |
|
1686 return ChunkPdePermissions[aChunkType]; |
|
1687 } |
|
1688 |
|
1689 TPte ArmMmu::PtePermissions(TChunkType aChunkType) |
|
1690 { |
|
1691 return ChunkPtePermissions[aChunkType]; |
|
1692 } |
|
1693 |
|
1694 // Set up a page table (specified by aId) to map a 1Mb section of ROM containing aRomAddr |
|
1695 // using ROM at aOrigPhys. |
|
1696 void ArmMmu::InitShadowPageTable(TInt aId, TLinAddr aRomAddr, TPhysAddr aOrigPhys) |
|
1697 { |
|
1698 __KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:InitShadowPageTable id=%04x aRomAddr=%08x aOrigPhys=%08x", |
|
1699 aId, aRomAddr, aOrigPhys)); |
|
1700 TPte* ppte = PageTable(aId); |
|
1701 TLinAddr firstPte = (TLinAddr)ppte; //Will need this to clean page table memory region in cache |
|
1702 |
|
1703 TPte* ppte_End = ppte + KChunkSize/KPageSize; |
|
1704 TPhysAddr phys = aOrigPhys - (aRomAddr & KChunkMask); |
|
1705 for (; ppte<ppte_End; ++ppte, phys+=KPageSize) |
|
1706 *ppte = phys | KRomPtePerm; |
|
1707 CacheMaintenance::MultiplePtesUpdated(firstPte, sizeof(TPte)*KChunkSize/KPageSize); |
|
1708 } |
|
1709 |
|
1710 // Copy the contents of ROM at aRomAddr to a shadow page at physical address aShadowPhys |
|
1711 // It is assumed aShadowPage is not mapped, therefore any mapping colour is OK. |
|
1712 void ArmMmu::InitShadowPage(TPhysAddr aShadowPhys, TLinAddr aRomAddr) |
|
1713 { |
|
1714 __KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:InitShadowPage aShadowPhys=%08x aRomAddr=%08x", |
|
1715 aShadowPhys, aRomAddr)); |
|
1716 |
|
1717 // put in a temporary mapping for aShadowPhys |
|
1718 // make it noncacheable |
|
1719 *iTempPte = aShadowPhys | SP_PTE(KArmV6PermRWNO, KNormalUncachedAttr, 0, 1); |
|
1720 CacheMaintenance::SinglePteUpdated((TLinAddr)iTempPte); |
|
1721 |
|
1722 // copy contents of ROM |
|
1723 wordmove( (TAny*)iTempAddr, (const TAny*)aRomAddr, KPageSize ); |
|
1724 //Temp address is uncached. No need to clean cache, just flush write buffer |
|
1725 CacheMaintenance::MemoryToPreserveAndReuse((TLinAddr)iTempAddr, KPageSize, EMapAttrBufferedC); |
|
1726 |
|
1727 // remove temporary mapping |
|
1728 *iTempPte=0; |
|
1729 CacheMaintenance::SinglePteUpdated((TLinAddr)iTempPte); |
|
1730 InvalidateTLBForPage(iTempAddr, KERNEL_MAPPING); |
|
1731 } |
|
1732 |
|
1733 // Assign a shadow page table to replace a ROM section mapping |
|
1734 // Enter and return with system locked |
|
1735 void ArmMmu::AssignShadowPageTable(TInt aId, TLinAddr aRomAddr) |
|
1736 { |
|
1737 __KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:AssignShadowPageTable aId=%04x aRomAddr=%08x", |
|
1738 aId, aRomAddr)); |
|
1739 TLinAddr ptLin=PageTableLinAddr(aId); |
|
1740 TPhysAddr ptPhys=LinearToPhysical(ptLin, 0); |
|
1741 TPde* ppde = ::InitPageDirectory + (aRomAddr>>KChunkShift); |
|
1742 TPde newpde = ptPhys | KShadowPdePerm; |
|
1743 __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x", newpde, ppde)); |
|
1744 TInt irq=NKern::DisableAllInterrupts(); |
|
1745 *ppde = newpde; // map in the page table |
|
1746 CacheMaintenance::SinglePteUpdated((TLinAddr)ppde); |
|
1747 |
|
1748 FlushTLBs(); // flush both TLBs (no need to flush cache yet) |
|
1749 NKern::RestoreInterrupts(irq); |
|
1750 } |
|
1751 |
|
1752 void ArmMmu::DoUnmapShadowPage(TInt aId, TLinAddr aRomAddr, TPhysAddr aOrigPhys) |
|
1753 { |
|
1754 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu:DoUnmapShadowPage, id=%04x lin=%08x origphys=%08x", aId, aRomAddr, aOrigPhys)); |
|
1755 TPte* ppte = PageTable(aId) + ((aRomAddr & KChunkMask)>>KPageShift); |
|
1756 TPte newpte = aOrigPhys | KRomPtePerm; |
|
1757 __KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x", newpte, ppte)); |
|
1758 TInt irq=NKern::DisableAllInterrupts(); |
|
1759 *ppte = newpte; |
|
1760 CacheMaintenance::SinglePteUpdated((TLinAddr)ppte); |
|
1761 |
|
1762 InvalidateTLBForPage(aRomAddr, KERNEL_MAPPING); |
|
1763 #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED) |
|
1764 __FlushBtb(); |
|
1765 #endif |
|
1766 |
|
1767 CacheMaintenance::CodeChanged(aRomAddr, KPageSize, CacheMaintenance::EMemoryRemap); |
|
1768 CacheMaintenance::PageToReuse(aRomAddr, EMemAttNormalCached, KPhysAddrInvalid); |
|
1769 NKern::RestoreInterrupts(irq); |
|
1770 } |
|
1771 |
|
1772 TInt ArmMmu::UnassignShadowPageTable(TLinAddr aRomAddr, TPhysAddr aOrigPhys) |
|
1773 { |
|
1774 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu:UnassignShadowPageTable, lin=%08x origphys=%08x", aRomAddr, aOrigPhys)); |
|
1775 TPde* ppde = ::InitPageDirectory + (aRomAddr>>KChunkShift); |
|
1776 TPde newpde = (aOrigPhys &~ KChunkMask) | KRomSectionPermissions; |
|
1777 __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x", newpde, ppde)); |
|
1778 TInt irq=NKern::DisableAllInterrupts(); |
|
1779 *ppde = newpde; // revert to section mapping |
|
1780 CacheMaintenance::SinglePteUpdated((TLinAddr)ppde); |
|
1781 |
|
1782 FlushTLBs(); // flush both TLBs |
|
1783 NKern::RestoreInterrupts(irq); |
|
1784 return KErrNone; |
|
1785 } |
|
1786 |
|
1787 |
|
1788 #if defined(__CPU_MEMORY_TYPE_REMAPPING) // arm1176, arm11mcore, armv7, ... |
|
1789 /** |
|
1790 Shadow pages on platforms with remapping (mpcore, 1176, cortex...) are not writable. |
|
1791 This will map the region into writable memory first. |
|
1792 @pre No Fast Mutex held |
|
1793 */ |
|
1794 TInt ArmMmu::CopyToShadowMemory(TLinAddr aDest, TLinAddr aSrc, TUint32 aLength) |
|
1795 { |
|
1796 __KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:CopyToShadowMemory aDest=%08x aSrc=%08x aLength=%08x", aDest, aSrc, aLength)); |
|
1797 |
|
1798 // Check that destination is ROM |
|
1799 if (aDest<iRomLinearBase || (aDest+aLength) > iRomLinearEnd) |
|
1800 { |
|
1801 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu:CopyToShadowMemory: Destination not entirely in ROM")); |
|
1802 return KErrArgument; |
|
1803 } |
|
1804 // do operation with RamAlloc mutex held (to prevent shadow pages from being released from under us) |
|
1805 MmuBase::Wait(); |
|
1806 |
|
1807 |
|
1808 TInt r = KErrNone; |
|
1809 while (aLength) |
|
1810 { |
|
1811 // Calculate memory size to copy in this loop. A single page region will be copied per loop |
|
1812 TInt copySize = Min(aLength, iPageSize - (aDest&iPageMask)); |
|
1813 |
|
1814 // Get physical address |
|
1815 TPhysAddr physAddr = LinearToPhysical(aDest&~iPageMask, 0); |
|
1816 if (KPhysAddrInvalid==physAddr) |
|
1817 { |
|
1818 r = KErrArgument; |
|
1819 break; |
|
1820 } |
|
1821 |
|
1822 //check whether it is shadowed rom |
|
1823 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(physAddr); |
|
1824 if (pi==0 || pi->Type()!=SPageInfo::EShadow) |
|
1825 { |
|
1826 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu:CopyToShadowMemory: No shadow page at this address")); |
|
1827 r = KErrArgument; |
|
1828 break; |
|
1829 } |
|
1830 |
|
1831 //Temporarily map into writable memory and copy data. RamAllocator DMutex is required |
|
1832 TLinAddr tempAddr = MapTemp (physAddr, aDest&~iPageMask); |
|
1833 __KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:CopyToShadowMemory Copy aDest=%08x aSrc=%08x aSize=%08x", tempAddr+(aDest&iPageMask), aSrc, copySize)); |
|
1834 memcpy ((TAny*)(tempAddr+(aDest&iPageMask)), (const TAny*)aSrc, copySize); //Kernel-to-Kernel copy is presumed |
|
1835 UnmapTemp(); |
|
1836 |
|
1837 //Update variables for the next loop/page |
|
1838 aDest+=copySize; |
|
1839 aSrc+=copySize; |
|
1840 aLength-=copySize; |
|
1841 } |
|
1842 MmuBase::Signal(); |
|
1843 return r; |
|
1844 } |
|
1845 #endif |
|
1846 |
|
1847 void ArmMmu::DoFreezeShadowPage(TInt aId, TLinAddr aRomAddr) |
|
1848 { |
|
1849 #if defined(__CPU_MEMORY_TYPE_REMAPPING) //arm1176, arm11mcore, armv7 and later |
|
1850 __KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:DoFreezeShadowPage not required with MEMORY_TYPE_REMAPPING")); |
|
1851 #else |
|
1852 __KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:DoFreezeShadowPage aId=%04x aRomAddr=%08x", |
|
1853 aId, aRomAddr)); |
|
1854 TPte* ppte = PageTable(aId) + ((aRomAddr & KChunkMask)>>KPageShift); |
|
1855 TPte newpte = (*ppte & KPteSmallPageAddrMask) | KRomPtePerm; |
|
1856 __KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x", newpte, ppte)); |
|
1857 *ppte = newpte; |
|
1858 CacheMaintenance::SinglePteUpdated((TLinAddr)ppte); |
|
1859 InvalidateTLBForPage(aRomAddr, KERNEL_MAPPING); |
|
1860 #endif |
|
1861 } |
|
1862 |
|
1863 /** Replaces large page(64K) entry in page table with small page(4K) entries.*/ |
|
1864 void ArmMmu::Pagify(TInt aId, TLinAddr aLinAddr) |
|
1865 { |
|
1866 __KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:Pagify aId=%04x aLinAddr=%08x", aId, aLinAddr)); |
|
1867 |
|
1868 TInt pteIndex = (aLinAddr & KChunkMask)>>KPageShift; |
|
1869 TPte* pte = PageTable(aId); |
|
1870 if ((pte[pteIndex] & KArmV6PteTypeMask) == KArmV6PteLargePage) |
|
1871 { |
|
1872 __KTRACE_OPT(KMMU,Kern::Printf("Converting 64K page to 4K pages")); |
|
1873 pteIndex &= ~0xf; |
|
1874 TPte source = pte[pteIndex]; |
|
1875 source = (source & KPteLargePageAddrMask) | SP_PTE_FROM_LP_PTE(source); |
|
1876 pte += pteIndex; |
|
1877 for (TInt entry=0; entry<16; entry++) |
|
1878 { |
|
1879 pte[entry] = source | (entry<<12); |
|
1880 } |
|
1881 CacheMaintenance::MultiplePtesUpdated((TLinAddr)pte, 16*sizeof(TPte)); |
|
1882 FlushTLBs(); |
|
1883 } |
|
1884 } |
|
1885 |
|
1886 void ArmMmu::FlushShadow(TLinAddr aRomAddr) |
|
1887 { |
|
1888 CacheMaintenance::CodeChanged(aRomAddr, KPageSize, CacheMaintenance::EMemoryRemap); |
|
1889 CacheMaintenance::PageToReuse(aRomAddr, EMemAttNormalCached, KPhysAddrInvalid); |
|
1890 InvalidateTLBForPage(aRomAddr, KERNEL_MAPPING); // remove all TLB references to original ROM page |
|
1891 } |
|
1892 |
|
1893 |
|
1894 #if defined(__CPU_MEMORY_TYPE_REMAPPING) //arm1176, arm11mcore, armv7 |
|
1895 /** |
|
1896 Calculates page directory/table entries for memory type described in aMapAttr. |
|
1897 Global, small page (4KB) mapping is assumed. |
|
1898 (All magic numbers come from ARM page table descriptions.) |
|
1899 @param aMapAttr On entry, holds description(memory type, access permisions,...) of the memory. |
|
1900 It is made up of TMappingAttributes constants or TMappingAttributes2 object. If TMappingAttributes, |
|
1901 may be altered on exit to hold the actual cache attributes & access permissions. |
|
1902 @param aPde On exit, holds page-table-entry for the 1st level descriptor |
|
1903 for given type of memory, with base address set to 0. |
|
1904 @param aPte On exit, holds small-page-entry (4K) for the 2nd level descriptor |
|
1905 for given type of memory, with base address set to 0. |
|
1906 @return KErrNotSupported If memory described in aMapAttr is not supported |
|
1907 KErrNone Otherwise |
|
1908 */ |
|
1909 TInt ArmMmu::PdePtePermissions(TUint& aMapAttr, TPde& aPde, TPte& aPte) |
|
1910 { |
|
1911 __KTRACE_OPT(KMMU,Kern::Printf(">ArmMmu::PdePtePermissions, mapattr=%08x",aMapAttr)); |
|
1912 |
|
1913 TMappingAttributes2& memory = (TMappingAttributes2&)aMapAttr; |
|
1914 |
|
1915 if(memory.ObjectType2()) |
|
1916 { |
|
1917 //---------Memory described by TMappingAttributes2 object----------------- |
|
1918 aPde = KArmV6PdePageTable | |
|
1919 (memory.Parity() ? KArmV6PdeECCEnable : 0); |
|
1920 #if defined(FAULTY_NONSHARED_DEVICE_MEMORY) |
|
1921 if(!memory.Shared() && (memory.Type() == EMemAttDevice )) |
|
1922 { |
|
1923 aMapAttr ^= EMapAttrBufferedNC; |
|
1924 aMapAttr |= EMapAttrFullyBlocking; |
|
1925 // Clear EMemAttDevice |
|
1926 aMapAttr ^= (EMemAttDevice << 26); |
|
1927 aMapAttr |= (EMemAttStronglyOrdered << 26); |
|
1928 } |
|
1929 #endif |
|
1930 aPte = KArmV6PteSmallPage | |
|
1931 KArmV6PteAP0 | // AP0 bit always 1 |
|
1932 ((memory.Type()&3)<<2) | ((memory.Type()&4)<<4) | // memory type |
|
1933 (memory.Executable() ? 0 : KArmV6PteSmallXN) | // eXecuteNever bit |
|
1934 #if defined (__CPU_USE_SHARED_MEMORY) |
|
1935 KArmV6PteS | // Memory is always shared. |
|
1936 #else |
|
1937 (memory.Shared() ? KArmV6PteS : 0) | // Shared bit |
|
1938 #endif |
|
1939 (memory.Writable() ? 0 : KArmV6PteAPX) | // APX = !Writable |
|
1940 (memory.UserAccess() ? KArmV6PteAP1: 0); // AP1 = UserAccess |
|
1941 // aMapAttr remains the same |
|
1942 } |
|
1943 else |
|
1944 { |
|
1945 //---------Memory described by TMappingAttributes bitmask----------------- |
|
1946 #if defined(FAULTY_NONSHARED_DEVICE_MEMORY) |
|
1947 if(((aMapAttr & EMapAttrL1CacheMask) == EMapAttrBufferedNC) && !(aMapAttr & EMapAttrShared)) |
|
1948 { |
|
1949 // Clear EMapAttrBufferedNC attribute |
|
1950 aMapAttr ^= EMapAttrBufferedNC; |
|
1951 aMapAttr |= EMapAttrFullyBlocking; |
|
1952 } |
|
1953 #endif |
|
1954 // 1. Calculate TEX0:C:B bits in page table and actual cache attributes. |
|
1955 // Only L1 cache attribute from aMapAttr matters. Outer (L2) cache policy will be the same as inner one. |
|
1956 TUint l1cache=aMapAttr & EMapAttrL1CacheMask; // Inner cache attributes. May change to actual value. |
|
1957 TUint l2cache; // Will hold actual L2 cache attributes (in terms of TMappingAttributes constants) |
|
1958 TUint tex0_c_b; // Will hold TEX[0]:C:B value in page table |
|
1959 |
|
1960 switch (l1cache) |
|
1961 { |
|
1962 case EMapAttrFullyBlocking: |
|
1963 tex0_c_b = EMemAttStronglyOrdered; |
|
1964 l2cache = EMapAttrL2Uncached; |
|
1965 break; |
|
1966 case EMapAttrBufferedNC: |
|
1967 tex0_c_b = EMemAttDevice; |
|
1968 l2cache = EMapAttrL2Uncached; |
|
1969 break; |
|
1970 case EMapAttrBufferedC: |
|
1971 case EMapAttrL1Uncached: |
|
1972 case EMapAttrCachedWTRA: |
|
1973 case EMapAttrCachedWTWA: |
|
1974 tex0_c_b = EMemAttNormalUncached; |
|
1975 l1cache = EMapAttrBufferedC; |
|
1976 l2cache = EMapAttrL2Uncached; |
|
1977 break; |
|
1978 case EMapAttrCachedWBRA: |
|
1979 case EMapAttrCachedWBWA: |
|
1980 case EMapAttrL1CachedMax: |
|
1981 tex0_c_b = EMemAttNormalCached; |
|
1982 l1cache = EMapAttrCachedWBWA; |
|
1983 l2cache = EMapAttrL2CachedWBWA; |
|
1984 break; |
|
1985 default: |
|
1986 return KErrNotSupported; |
|
1987 } |
|
1988 |
|
1989 // 2. Step 2 has been removed :) |
|
1990 |
|
1991 // 3. Calculate access permissions (apx:ap bits in page table + eXecute it) |
|
1992 TUint read=aMapAttr & EMapAttrReadMask; |
|
1993 TUint write=(aMapAttr & EMapAttrWriteMask)>>4; |
|
1994 TUint exec=(aMapAttr & EMapAttrExecMask)>>8; |
|
1995 |
|
1996 read|=exec; // User/Sup execute access requires User/Sup read access. |
|
1997 if (exec) exec = 1; // There is a single eXecute bit in page table. Set to one if User or Sup exec is required. |
|
1998 |
|
1999 TUint apxap=0; |
|
2000 if (write==0) // no write required |
|
2001 { |
|
2002 if (read>=4) apxap=KArmV6PermRORO; // user read required |
|
2003 else if (read==1) apxap=KArmV6PermRONO; // supervisor read required |
|
2004 else return KErrNotSupported; // no read required |
|
2005 } |
|
2006 else if (write<4) // supervisor write required |
|
2007 { |
|
2008 if (read<4) apxap=KArmV6PermRWNO; // user read not required |
|
2009 else return KErrNotSupported; // user read required |
|
2010 } |
|
2011 else // user & supervisor writes required |
|
2012 { |
|
2013 apxap=KArmV6PermRWRW; |
|
2014 } |
|
2015 |
|
2016 // 4. Calculate page-table-entry for the 1st level (aka page directory) descriptor |
|
2017 aPde=((aMapAttr&EMapAttrUseECC)>>8)|KArmV6PdePageTable; |
|
2018 |
|
2019 // 5. Calculate small-page-entry for the 2nd level (aka page table) descriptor |
|
2020 aPte=SP_PTE(apxap, tex0_c_b, exec, 1); // always global |
|
2021 if (aMapAttr&EMapAttrShared) |
|
2022 aPte |= KArmV6PteS; |
|
2023 |
|
2024 // 6. Fix aMapAttr to hold the actual values for access permission & cache attributes |
|
2025 TUint xnapxap=((aPte<<3)&8)|((aPte>>7)&4)|((aPte>>4)&3); |
|
2026 aMapAttr &= ~(EMapAttrAccessMask|EMapAttrL1CacheMask|EMapAttrL2CacheMask); |
|
2027 aMapAttr |= PermissionLookup[xnapxap]; // Set actual access permissions |
|
2028 aMapAttr |= l1cache; // Set actual inner cache attributes |
|
2029 aMapAttr |= l2cache; // Set actual outer cache attributes |
|
2030 } |
|
2031 |
|
2032 __KTRACE_OPT(KMMU,Kern::Printf("<ArmMmu::PdePtePermissions, mapattr=%08x, pde=%08x, pte=%08x", aMapAttr, aPde, aPte)); |
|
2033 return KErrNone; |
|
2034 } |
|
2035 |
|
2036 #else //ARMv6 (arm1136) |
|
2037 |
|
2038 const TUint FBLK=(EMapAttrFullyBlocking>>12); |
|
2039 const TUint BFNC=(EMapAttrBufferedNC>>12); |
|
2040 //const TUint BUFC=(EMapAttrBufferedC>>12); |
|
2041 const TUint L1UN=(EMapAttrL1Uncached>>12); |
|
2042 const TUint WTRA=(EMapAttrCachedWTRA>>12); |
|
2043 //const TUint WTWA=(EMapAttrCachedWTWA>>12); |
|
2044 const TUint WBRA=(EMapAttrCachedWBRA>>12); |
|
2045 const TUint WBWA=(EMapAttrCachedWBWA>>12); |
|
2046 const TUint AWTR=(EMapAttrAltCacheWTRA>>12); |
|
2047 //const TUint AWTW=(EMapAttrAltCacheWTWA>>12); |
|
2048 //const TUint AWBR=(EMapAttrAltCacheWBRA>>12); |
|
2049 const TUint AWBW=(EMapAttrAltCacheWBWA>>12); |
|
2050 const TUint MAXC=(EMapAttrL1CachedMax>>12); |
|
2051 |
|
2052 const TUint L2UN=(EMapAttrL2Uncached>>16); |
|
2053 |
|
2054 const TUint8 UNS=0xffu; // Unsupported attribute |
|
2055 |
|
2056 //Maps L1 & L2 cache attributes into TEX[4:2]:CB[1:0] |
|
2057 //ARMv6 doesn't do WTWA so we use WTRA instead |
|
2058 |
|
2059 #if !defined(__CPU_ARM1136_ERRATUM_399234_FIXED) |
|
2060 // L1 Write-Through mode is outlawed, L1WT acts as L1UN. |
|
2061 static const TUint8 CBTEX[40]= |
|
2062 { // L1CACHE: |
|
2063 // FBLK BFNC BUFC L1UN WTRA WTWA WBRA WBWA L2CACHE: |
|
2064 0x00, 0x01, 0x01, 0x04, 0x04, 0x04, 0x13, 0x11, //NC |
|
2065 0x00, 0x01, 0x01, 0x18, 0x18, 0x18, 0x1b, 0x19, //WTRA |
|
2066 0x00, 0x01, 0x01, 0x18, 0x18, 0x18, 0x1b, 0x19, //WTWA |
|
2067 0x00, 0x01, 0x01, 0x1c, 0x1c, 0x1c, 0x1f, 0x1d, //WBRA |
|
2068 0x00, 0x01, 0x01, 0x14, 0x14, 0x14, 0x17, 0x15 //WBWA |
|
2069 }; |
|
2070 #else |
|
2071 static const TUint8 CBTEX[40]= |
|
2072 { // L1CACHE: |
|
2073 // FBLK BFNC BUFC L1UN WTRA WTWA WBRA WBWA L2CACHE: |
|
2074 0x00, 0x01, 0x01, 0x04, 0x12, 0x12, 0x13, 0x11, //NC |
|
2075 0x00, 0x01, 0x01, 0x18, 0x02, 0x02, 0x1b, 0x19, //WTRA |
|
2076 0x00, 0x01, 0x01, 0x18, 0x02, 0x02, 0x1b, 0x19, //WTWA |
|
2077 0x00, 0x01, 0x01, 0x1c, 0x1e, 0x1e, 0x1f, 0x1d, //WBRA |
|
2078 0x00, 0x01, 0x01, 0x14, 0x16, 0x16, 0x17, 0x15 //WBWA |
|
2079 }; |
|
2080 #endif |
|
2081 |
|
2082 //Maps TEX[4:2]:CB[1:0] value into L1 cache attributes |
|
2083 static const TUint8 L1Actual[32]= |
|
2084 { |
|
2085 //CB 00 01 10 11 //TEX |
|
2086 FBLK, BFNC, WTRA, WBRA, //000 |
|
2087 L1UN, UNS, UNS, WBWA, //001 |
|
2088 BFNC, UNS, UNS, UNS, //010 |
|
2089 UNS, UNS, UNS, UNS, //011 |
|
2090 L1UN, WBWA, WTRA, WBRA, //100 |
|
2091 L1UN, WBWA, WTRA, WBRA, //101 |
|
2092 L1UN, WBWA, WTRA, WBRA, //110 |
|
2093 L1UN, WBWA, WTRA, WBRA //111 |
|
2094 }; |
|
2095 |
|
2096 //Maps TEX[4:2]:CB[1:0] value into L2 cache attributes |
|
2097 static const TUint8 L2Actual[32]= |
|
2098 { |
|
2099 //CB 00 01 10 11 //TEX |
|
2100 L2UN, L2UN, WTRA, WBRA, //000 |
|
2101 L2UN, UNS, UNS, WBWA, //001 |
|
2102 L2UN, UNS, UNS, UNS, //010 |
|
2103 UNS, UNS, UNS, UNS, //011 |
|
2104 L2UN, L2UN, L2UN, L2UN, //100 |
|
2105 WBWA, WBWA, WBWA, WBWA, //101 |
|
2106 WTRA, WTRA, WTRA, WTRA, //110 |
|
2107 WBRA, WBRA, WBRA, WBRA //111 |
|
2108 }; |
|
2109 |
|
2110 TInt ArmMmu::PdePtePermissions(TUint& aMapAttr, TPde& aPde, TPte& aPte) |
|
2111 { |
|
2112 __KTRACE_OPT(KMMU,Kern::Printf(">ArmMmu::PdePtePermissions, mapattr=%08x",aMapAttr)); |
|
2113 |
|
2114 TUint read=aMapAttr & EMapAttrReadMask; |
|
2115 TUint write=(aMapAttr & EMapAttrWriteMask)>>4; |
|
2116 TUint exec=(aMapAttr & EMapAttrExecMask)>>8; |
|
2117 TUint l1cache=(aMapAttr & EMapAttrL1CacheMask)>>12; |
|
2118 TUint l2cache=(aMapAttr & EMapAttrL2CacheMask)>>16; |
|
2119 if (l1cache==MAXC) l1cache=WBRA; // map max cache to WBRA |
|
2120 if (l1cache>AWBW) |
|
2121 return KErrNotSupported; // undefined attribute |
|
2122 if (l1cache>=AWTR) l1cache-=4; // no alternate cache, so use normal cache |
|
2123 if (l1cache<L1UN) l2cache=0; // for blocking/device, don't cache L2 |
|
2124 if (l2cache==MAXC) l2cache=WBRA; // map max cache to WBRA |
|
2125 if (l2cache>WBWA) |
|
2126 return KErrNotSupported; // undefined attribute |
|
2127 if (l2cache) l2cache-=(WTRA-1); // l2cache now in range 0-4 |
|
2128 aPde=((aMapAttr&EMapAttrUseECC)>>8)|KArmV6PdePageTable; |
|
2129 |
|
2130 #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED) |
|
2131 // if broken 1136, can't have supervisor only code |
|
2132 if (exec) |
|
2133 exec = TUint(EMapAttrExecUser>>8); |
|
2134 #endif |
|
2135 |
|
2136 // if any execute access, must have read=execute |
|
2137 if (exec) |
|
2138 (void)(read>=exec || (read=exec)!=0), exec=1; |
|
2139 |
|
2140 // l1cache between 0 and 7, l2cache between 0 and 4; look up CBTEX |
|
2141 TUint cbtex=CBTEX[(l2cache<<3)|l1cache]; |
|
2142 |
|
2143 // work out apx:ap |
|
2144 TUint apxap; |
|
2145 if (write==0) |
|
2146 apxap=(read>=4)?KArmV6PermRORO:(read?KArmV6PermRONO:KArmV6PermNONO); |
|
2147 else if (write<4) |
|
2148 apxap=(read>=4)?KArmV6PermRWRO:KArmV6PermRWNO; |
|
2149 else |
|
2150 apxap=KArmV6PermRWRW; |
|
2151 TPte pte=SP_PTE(apxap, cbtex, exec, 1); // always global |
|
2152 if (aMapAttr&EMapAttrShared) |
|
2153 pte |= KArmV6PteS; |
|
2154 |
|
2155 // Translate back to get actual map attributes |
|
2156 TUint xnapxap=((pte<<3)&8)|((pte>>7)&4)|((pte>>4)&3); |
|
2157 cbtex=((pte>>4)&0x1c)|((pte>>2)&3); // = TEX[4:2]::CB[1:0] |
|
2158 aMapAttr &= ~(EMapAttrAccessMask|EMapAttrL1CacheMask|EMapAttrL2CacheMask); |
|
2159 aMapAttr |= PermissionLookup[xnapxap]; |
|
2160 aMapAttr |= (L1Actual[cbtex]<<12); |
|
2161 aMapAttr |= (L2Actual[cbtex]<<16); |
|
2162 aPte=pte; |
|
2163 __KTRACE_OPT(KMMU,Kern::Printf("<ArmMmu::PdePtePermissions, mapattr=%08x, pde=%08x, pte=%08x", |
|
2164 aMapAttr, aPde, aPte)); |
|
2165 return KErrNone; |
|
2166 } |
|
2167 #endif |
|
2168 |
|
2169 void ArmMmu::Map(TLinAddr aLinAddr, TPhysAddr aPhysAddr, TInt aSize, TPde aPdePerm, TPte aPtePerm, TInt aMapShift) |
|
2170 // |
|
2171 // Map a region of physical addresses aPhysAddr to aPhysAddr+aSize-1 to virtual address aLinAddr. |
|
2172 // Use permissions specified by aPdePerm and aPtePerm. Use mapping sizes up to and including (1<<aMapShift). |
|
2173 // Assume any page tables required are already assigned. |
|
2174 // aLinAddr, aPhysAddr, aSize must be page-aligned. |
|
2175 // |
|
2176 { |
|
2177 __KTRACE_OPT(KMMU, Kern::Printf("ArmMmu::Map lin=%08x phys=%08x size=%08x", aLinAddr, aPhysAddr, aSize)); |
|
2178 __KTRACE_OPT(KMMU, Kern::Printf("pde=%08x pte=%08x mapshift=%d", aPdePerm, aPtePerm, aMapShift)); |
|
2179 TPde pt_pde=aPdePerm; |
|
2180 TPte sp_pte=aPtePerm; |
|
2181 TPde section_pde=SECTION_PDE_FROM_PDEPTE(pt_pde, sp_pte); |
|
2182 TPte lp_pte=LP_PTE_FROM_SP_PTE(sp_pte); |
|
2183 TLinAddr la=aLinAddr; |
|
2184 TPhysAddr pa=aPhysAddr; |
|
2185 TInt remain=aSize; |
|
2186 while (remain) |
|
2187 { |
|
2188 if (aMapShift>=KChunkShift && (la & KChunkMask)==0 && remain>=KChunkSize) |
|
2189 { |
|
2190 // use sections - ASSUMES ADDRESS IS IN GLOBAL REGION |
|
2191 TInt npdes=remain>>KChunkShift; |
|
2192 const TBitMapAllocator& b=*iOsAsidAllocator; |
|
2193 TInt num_os_asids=iNumGlobalPageDirs; |
|
2194 TInt os_asid=0; |
|
2195 for (; num_os_asids; ++os_asid) |
|
2196 { |
|
2197 if (b.NotAllocated(os_asid,1) || (iAsidInfo[os_asid]&1)==0) |
|
2198 continue; // os_asid is not needed |
|
2199 TPde* p_pde=PageDirectory(os_asid)+(la>>KChunkShift); |
|
2200 TPde* p_pde_E=p_pde+npdes; |
|
2201 TPde pde=pa|section_pde; |
|
2202 TLinAddr firstPde = (TLinAddr)p_pde; //Will need this to clean page table memory region from cache |
|
2203 |
|
2204 NKern::LockSystem(); |
|
2205 for (; p_pde < p_pde_E; pde+=KChunkSize) |
|
2206 { |
|
2207 __ASSERT_DEBUG(*p_pde==0, MM::Panic(MM::EPdeAlreadyInUse)); |
|
2208 __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x", pde, p_pde)); |
|
2209 *p_pde++=pde; |
|
2210 } |
|
2211 CacheMaintenance::MultiplePtesUpdated(firstPde, (TUint)p_pde-firstPde); |
|
2212 NKern::UnlockSystem(); |
|
2213 --num_os_asids; |
|
2214 } |
|
2215 npdes<<=KChunkShift; |
|
2216 la+=npdes, pa+=npdes, remain-=npdes; |
|
2217 continue; |
|
2218 } |
|
2219 TInt block_size = Min(remain, KChunkSize-(la&KChunkMask)); |
|
2220 TPte pa_mask=~KPageMask; |
|
2221 TPte pte_perm=sp_pte; |
|
2222 if (aMapShift>=KLargePageShift && block_size>=KLargePageSize) |
|
2223 { |
|
2224 if ((la & KLargePageMask)==0) |
|
2225 { |
|
2226 // use 64K large pages |
|
2227 pa_mask=~KLargePageMask; |
|
2228 pte_perm=lp_pte; |
|
2229 } |
|
2230 else |
|
2231 block_size = Min(remain, KLargePageSize-(la&KLargePageMask)); |
|
2232 } |
|
2233 block_size &= pa_mask; |
|
2234 |
|
2235 // use pages (large or small) |
|
2236 TInt id=PageTableId(la, 0); |
|
2237 __ASSERT_DEBUG(id>=0, MM::Panic(MM::EMmuMapNoPageTable)); |
|
2238 TPte* p_pte=PageTable(id)+((la&KChunkMask)>>KPageShift); |
|
2239 TPte* p_pte_E=p_pte + (block_size>>KPageShift); |
|
2240 SPageTableInfo& ptinfo=iPtInfo[id]; |
|
2241 TLinAddr firstPte = (TLinAddr)p_pte; //Will need this to clean page table memory region from cache |
|
2242 |
|
2243 NKern::LockSystem(); |
|
2244 for (; p_pte < p_pte_E; pa+=KPageSize) |
|
2245 { |
|
2246 __ASSERT_DEBUG(*p_pte==0, MM::Panic(MM::EPteAlreadyInUse)); |
|
2247 TPte pte = (pa & pa_mask) | pte_perm; |
|
2248 __KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x", pte, p_pte)); |
|
2249 *p_pte++=pte; |
|
2250 ++ptinfo.iCount; |
|
2251 NKern::FlashSystem(); |
|
2252 } |
|
2253 CacheMaintenance::MultiplePtesUpdated(firstPte, (TUint)p_pte-firstPte); |
|
2254 NKern::UnlockSystem(); |
|
2255 la+=block_size, remain-=block_size; |
|
2256 } |
|
2257 } |
|
2258 |
|
2259 void ArmMmu::Unmap(TLinAddr aLinAddr, TInt aSize) |
|
2260 // |
|
2261 // Remove all mappings in the specified range of addresses. |
|
2262 // Assumes there are only global mappings involved. |
|
2263 // Don't free page tables. |
|
2264 // aLinAddr, aSize must be page-aligned. |
|
2265 // |
|
2266 { |
|
2267 __KTRACE_OPT(KMMU, Kern::Printf("ArmMmu::Unmap lin=%08x size=%08x", aLinAddr, aSize)); |
|
2268 TLinAddr a=aLinAddr; |
|
2269 TLinAddr end=a+aSize; |
|
2270 __KTRACE_OPT(KMMU,Kern::Printf("a=%08x end=%08x",a,end)); |
|
2271 NKern::LockSystem(); |
|
2272 while(a!=end) |
|
2273 { |
|
2274 TInt pdeIndex=a>>KChunkShift; |
|
2275 TLinAddr next=(pdeIndex<<KChunkShift)+KChunkSize; |
|
2276 TInt to_do = Min(TInt(end-a), TInt(next-a))>>KPageShift; |
|
2277 __KTRACE_OPT(KMMU,Kern::Printf("a=%08x next=%08x to_do=%d",a,next,to_do)); |
|
2278 TPde pde=::InitPageDirectory[pdeIndex]; |
|
2279 if ( (pde&KArmV6PdeTypeMask)==KArmV6PdeSection ) |
|
2280 { |
|
2281 __ASSERT_DEBUG(!(a&KChunkMask), MM::Panic(MM::EUnmapBadAlignment)); |
|
2282 #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED) |
|
2283 remove_and_invalidate_section(::InitPageDirectory + pdeIndex, a, KERNEL_MAPPING); |
|
2284 #else |
|
2285 ::InitPageDirectory[pdeIndex]=0; |
|
2286 CacheMaintenance::SinglePteUpdated(TLinAddr(::InitPageDirectory + pdeIndex)); |
|
2287 InvalidateTLBForPage(a, KERNEL_MAPPING); // ASID irrelevant since global |
|
2288 #endif |
|
2289 a=next; |
|
2290 NKern::FlashSystem(); |
|
2291 continue; |
|
2292 } |
|
2293 TInt ptid=PageTableId(a,0); |
|
2294 SPageTableInfo& ptinfo=iPtInfo[ptid]; |
|
2295 if (ptid>=0) |
|
2296 { |
|
2297 TPte* ppte=PageTable(ptid)+((a&KChunkMask)>>KPageShift); |
|
2298 TPte* ppte_End=ppte+to_do; |
|
2299 for (; ppte<ppte_End; ++ppte, a+=KPageSize) |
|
2300 { |
|
2301 if (*ppte & KArmV6PteSmallPage) |
|
2302 { |
|
2303 --ptinfo.iCount; |
|
2304 #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED) |
|
2305 remove_and_invalidate_page(ppte, a, KERNEL_MAPPING); |
|
2306 #else |
|
2307 *ppte=0; |
|
2308 CacheMaintenance::SinglePteUpdated((TLinAddr)ppte); |
|
2309 InvalidateTLBForPage(a, KERNEL_MAPPING); |
|
2310 #endif |
|
2311 } |
|
2312 else if ((*ppte & KArmV6PteTypeMask) == KArmV6PteLargePage) |
|
2313 { |
|
2314 __ASSERT_DEBUG(!(a&KLargePageMask), MM::Panic(MM::EUnmapBadAlignment)); |
|
2315 ptinfo.iCount-=KLargeSmallPageRatio; |
|
2316 #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED) |
|
2317 remove_and_invalidate_page(ppte, a, KERNEL_MAPPING); |
|
2318 #else |
|
2319 memclr(ppte, KLargeSmallPageRatio*sizeof(TPte)); |
|
2320 CacheMaintenance::MultiplePtesUpdated((TLinAddr)ppte, KLargeSmallPageRatio*sizeof(TPte)); |
|
2321 InvalidateTLBForPage(a, KERNEL_MAPPING); |
|
2322 #endif |
|
2323 a+=(KLargePageSize-KPageSize); |
|
2324 ppte+=(KLargeSmallPageRatio-1); |
|
2325 } |
|
2326 NKern::FlashSystem(); |
|
2327 } |
|
2328 } |
|
2329 else |
|
2330 a += (to_do<<KPageShift); |
|
2331 } |
|
2332 NKern::UnlockSystem(); |
|
2333 #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED) |
|
2334 __FlushBtb(); |
|
2335 #endif |
|
2336 } |
|
2337 |
|
2338 |
|
2339 void ArmMmu::ClearPages(TInt aNumPages, TPhysAddr* aPageList, TUint8 aClearByte) |
|
2340 { |
|
2341 //map the pages at a temporary address, clear them and unmap |
|
2342 __ASSERT_MUTEX(RamAllocatorMutex); |
|
2343 while (--aNumPages >= 0) |
|
2344 { |
|
2345 TPhysAddr pa; |
|
2346 if((TInt)aPageList&1) |
|
2347 { |
|
2348 pa = (TPhysAddr)aPageList&~1; |
|
2349 *(TPhysAddr*)&aPageList += iPageSize; |
|
2350 } |
|
2351 else |
|
2352 pa = *aPageList++; |
|
2353 |
|
2354 *iTempPte = pa | SP_PTE(KArmV6PermRWNO, KNormalUncachedAttr, 0, 1); |
|
2355 CacheMaintenance::SinglePteUpdated((TLinAddr)iTempPte); |
|
2356 InvalidateTLBForPage(iTempAddr, KERNEL_MAPPING); |
|
2357 memset((TAny*)iTempAddr, aClearByte, iPageSize); |
|
2358 // This temporary mapping is noncached => No need to flush cache here. |
|
2359 // Still, we have to make sure that write buffer(s) are drained. |
|
2360 CacheMaintenance::MemoryToPreserveAndReuse((TLinAddr)iTempAddr, iPageSize, EMapAttrBufferedC); |
|
2361 } |
|
2362 *iTempPte=0; |
|
2363 CacheMaintenance::SinglePteUpdated((TLinAddr)iTempPte); |
|
2364 InvalidateTLBForPage(iTempAddr, KERNEL_MAPPING); |
|
2365 } |
|
2366 |
|
2367 |
|
2368 /** |
|
2369 Create a temporary mapping of one or more contiguous physical pages. |
|
2370 Fully cached memory attributes apply. |
|
2371 The RamAllocatorMutex must be held before this function is called and not released |
|
2372 until after UnmapTemp has been called. |
|
2373 |
|
2374 @param aPage The physical address of the pages to be mapped. |
|
2375 @param aLinAddr The linear address of any existing location where the page is mapped. |
|
2376 If the page isn't already mapped elsewhere as a cachable page then |
|
2377 this value irrelevent. (It is used for page colouring.) |
|
2378 @param aPages Number of pages to map. |
|
2379 |
|
2380 @return The linear address of where the pages have been mapped. |
|
2381 */ |
|
2382 TLinAddr ArmMmu::MapTemp(TPhysAddr aPage,TLinAddr aLinAddr,TInt aPages) |
|
2383 { |
|
2384 __ASSERT_MUTEX(RamAllocatorMutex); |
|
2385 __ASSERT_DEBUG(!*iTempPte,MM::Panic(MM::ETempMappingAlreadyInUse)); |
|
2386 iTempMapColor = (aLinAddr>>KPageShift)&KPageColourMask; |
|
2387 iTempMapCount = aPages; |
|
2388 if (aPages==1) |
|
2389 { |
|
2390 iTempPte[iTempMapColor] = (aPage&~KPageMask) | SP_PTE(KArmV6PermRWNO, KNormalCachedAttr, 0, 1); |
|
2391 CacheMaintenance::SinglePteUpdated((TLinAddr)(iTempPte+iTempMapColor)); |
|
2392 } |
|
2393 else |
|
2394 { |
|
2395 __ASSERT_DEBUG(iTempMapColor+aPages<=KPageColourCount,MM::Panic(MM::ETempMappingNoRoom)); |
|
2396 for (TInt i=0; i<aPages; i++) |
|
2397 iTempPte[iTempMapColor+i] = ((aPage&~KPageMask)+(i<<KPageShift)) | SP_PTE(KArmV6PermRWNO, KNormalCachedAttr, 0, 1); |
|
2398 CacheMaintenance::MultiplePtesUpdated((TLinAddr)(iTempPte+iTempMapColor), aPages*sizeof(TPte)); |
|
2399 } |
|
2400 return iTempAddr+(iTempMapColor<<KPageShift); |
|
2401 } |
|
2402 |
|
2403 /** |
|
2404 Create a temporary mapping of one or more contiguous physical pages. |
|
2405 Memory attributes as specified by aMemType apply. |
|
2406 @See ArmMmu::MapTemp(TPhysAddr aPage,TLinAddr aLinAddr,TInt aPages) for other details. |
|
2407 */ |
|
2408 TLinAddr ArmMmu::MapTemp(TPhysAddr aPage,TLinAddr aLinAddr,TInt aPages, TMemoryType aMemType) |
|
2409 { |
|
2410 __ASSERT_MUTEX(RamAllocatorMutex); |
|
2411 __ASSERT_DEBUG(!*iTempPte,MM::Panic(MM::ETempMappingAlreadyInUse)); |
|
2412 iTempMapColor = (aLinAddr>>KPageShift)&KPageColourMask; |
|
2413 iTempMapCount = aPages; |
|
2414 TUint pte = SP_PTE(KArmV6PermRWNO, aMemType, 0, 1); |
|
2415 if (aPages==1) |
|
2416 { |
|
2417 iTempPte[iTempMapColor] = (aPage&~KPageMask) | SP_PTE(KArmV6PermRWNO, pte, 0, 1); |
|
2418 CacheMaintenance::SinglePteUpdated((TLinAddr)(iTempPte+iTempMapColor)); |
|
2419 } |
|
2420 else |
|
2421 { |
|
2422 __ASSERT_DEBUG(iTempMapColor+aPages<=KPageColourCount,MM::Panic(MM::ETempMappingNoRoom)); |
|
2423 for (TInt i=0; i<aPages; i++) |
|
2424 iTempPte[iTempMapColor+i] = ((aPage&~KPageMask)+(i<<KPageShift)) | SP_PTE(KArmV6PermRWNO, pte, 0, 1); |
|
2425 CacheMaintenance::MultiplePtesUpdated((TLinAddr)(iTempPte+iTempMapColor), aPages*sizeof(TPte)); |
|
2426 } |
|
2427 return iTempAddr+(iTempMapColor<<KPageShift); |
|
2428 } |
|
2429 |
|
2430 /** |
|
2431 Create a temporary mapping of one or more contiguous physical pages, distinct from |
|
2432 that created by MapTemp. |
|
2433 The RamAllocatorMutex must be held before this function is called and not released |
|
2434 until after UnmapSecondTemp has been called. |
|
2435 |
|
2436 @param aPage The physical address of the pages to be mapped. |
|
2437 @param aLinAddr The linear address of any existing location where the page is mapped. |
|
2438 If the page isn't already mapped elsewhere as a cachable page then |
|
2439 this value irrelevent. (It is used for page colouring.) |
|
2440 @param aPages Number of pages to map. |
|
2441 |
|
2442 @return The linear address of where the pages have been mapped. |
|
2443 */ |
|
2444 TLinAddr ArmMmu::MapSecondTemp(TPhysAddr aPage,TLinAddr aLinAddr,TInt aPages) |
|
2445 { |
|
2446 __ASSERT_MUTEX(RamAllocatorMutex); |
|
2447 __ASSERT_DEBUG(!*iSecondTempPte,MM::Panic(MM::ETempMappingAlreadyInUse)); |
|
2448 iSecondTempMapColor = (aLinAddr>>KPageShift)&KPageColourMask; |
|
2449 iSecondTempMapCount = aPages; |
|
2450 if (aPages==1) |
|
2451 { |
|
2452 iSecondTempPte[iSecondTempMapColor] = (aPage&~KPageMask) | SP_PTE(KArmV6PermRWNO, KNormalCachedAttr, 0, 1); |
|
2453 CacheMaintenance::SinglePteUpdated((TLinAddr)(iSecondTempPte+iSecondTempMapColor)); |
|
2454 } |
|
2455 else |
|
2456 { |
|
2457 __ASSERT_DEBUG(iSecondTempMapColor+aPages<=KPageColourCount,MM::Panic(MM::ETempMappingNoRoom)); |
|
2458 for (TInt i=0; i<aPages; i++) |
|
2459 iSecondTempPte[iSecondTempMapColor+i] = ((aPage&~KPageMask)+(i<<KPageShift)) | SP_PTE(KArmV6PermRWNO, KNormalCachedAttr, 0, 1); |
|
2460 CacheMaintenance::MultiplePtesUpdated((TLinAddr)(iSecondTempPte+iSecondTempMapColor), aPages*sizeof(TPte)); |
|
2461 } |
|
2462 return iSecondTempAddr+(iSecondTempMapColor<<KPageShift); |
|
2463 } |
|
2464 |
|
2465 /** |
|
2466 Remove the temporary mapping created with MapTemp. |
|
2467 */ |
|
2468 void ArmMmu::UnmapTemp() |
|
2469 { |
|
2470 __ASSERT_MUTEX(RamAllocatorMutex); |
|
2471 for (TInt i=0; i<iTempMapCount; i++) |
|
2472 { |
|
2473 iTempPte[iTempMapColor+i] = 0; |
|
2474 CacheMaintenance::SinglePteUpdated((TLinAddr)(iTempPte+iTempMapColor+i)); |
|
2475 InvalidateTLBForPage(iTempAddr+((iTempMapColor+i)<<KPageShift), KERNEL_MAPPING); |
|
2476 } |
|
2477 } |
|
2478 |
|
2479 /** |
|
2480 Remove the temporary mapping created with MapSecondTemp. |
|
2481 */ |
|
2482 void ArmMmu::UnmapSecondTemp() |
|
2483 { |
|
2484 __ASSERT_MUTEX(RamAllocatorMutex); |
|
2485 for (TInt i=0; i<iSecondTempMapCount; i++) |
|
2486 { |
|
2487 iSecondTempPte[iSecondTempMapColor+i] = 0; |
|
2488 CacheMaintenance::SinglePteUpdated((TLinAddr)(iSecondTempPte+iSecondTempMapColor+i)); |
|
2489 InvalidateTLBForPage(iSecondTempAddr+((iSecondTempMapColor+i)<<KPageShift), KERNEL_MAPPING); |
|
2490 } |
|
2491 } |
|
2492 |
|
2493 |
|
2494 TBool ArmMmu::ValidateLocalIpcAddress(TLinAddr aAddr,TInt aSize,TBool aWrite) |
|
2495 { |
|
2496 __NK_ASSERT_DEBUG(aSize<=KChunkSize); |
|
2497 TLinAddr end = aAddr+aSize-1; |
|
2498 if(end<aAddr) |
|
2499 end = ~0u; |
|
2500 |
|
2501 if(TUint(aAddr^KIPCAlias)<TUint(KChunkSize) || TUint(end^KIPCAlias)<TUint(KChunkSize)) |
|
2502 { |
|
2503 // local address is in alias region. |
|
2504 // remove alias... |
|
2505 NKern::LockSystem(); |
|
2506 ((DMemModelThread*)TheCurrentThread)->RemoveAlias(); |
|
2507 NKern::UnlockSystem(); |
|
2508 // access memory, which will cause an exception... |
|
2509 if(!(TUint(aAddr^KIPCAlias)<TUint(KChunkSize))) |
|
2510 aAddr = end; |
|
2511 InvalidateTLBForPage(aAddr,((DMemModelProcess*)TheCurrentThread->iOwningProcess)->iOsAsid); |
|
2512 if(aWrite) |
|
2513 *(volatile TUint8*)aAddr = 0; |
|
2514 else |
|
2515 aWrite = *(volatile TUint8*)aAddr; |
|
2516 // can't get here |
|
2517 __NK_ASSERT_DEBUG(0); |
|
2518 } |
|
2519 |
|
2520 TUint32 local_mask; |
|
2521 DMemModelProcess* process=(DMemModelProcess*)TheCurrentThread->iOwningProcess; |
|
2522 if(aWrite) |
|
2523 local_mask = process->iAddressCheckMaskW; |
|
2524 else |
|
2525 local_mask = process->iAddressCheckMaskR; |
|
2526 TUint32 mask = 2<<(end>>27); |
|
2527 mask -= 1<<(aAddr>>27); |
|
2528 if((local_mask&mask)!=mask) |
|
2529 return EFalse; |
|
2530 |
|
2531 if(!aWrite) |
|
2532 return ETrue; // reads are ok |
|
2533 |
|
2534 // writes need further checking... |
|
2535 TLinAddr userCodeStart = iUserCodeBase; |
|
2536 TLinAddr userCodeEnd = userCodeStart+iMaxUserCodeSize; |
|
2537 if(end>=userCodeStart && aAddr<userCodeEnd) |
|
2538 return EFalse; // trying to write to user code area |
|
2539 |
|
2540 return ETrue; |
|
2541 } |
|
2542 |
|
2543 TInt DMemModelThread::Alias(TLinAddr aAddr, DMemModelProcess* aProcess, TInt aSize, TInt aPerm, TLinAddr& aAliasAddr, TInt& aAliasSize) |
|
2544 // |
|
2545 // Set up an alias mapping starting at address aAddr in specified process. |
|
2546 // Check permissions aPerm. |
|
2547 // Enter and return with system locked. |
|
2548 // Note: Alias is removed if an exception if trapped by DThread::IpcExcHandler. |
|
2549 // |
|
2550 { |
|
2551 __KTRACE_OPT(KMMU2,Kern::Printf("Thread %O Alias %08x+%x Process %O perm %x",this,aAddr,aSize,aProcess,aPerm)); |
|
2552 __ASSERT_SYSTEM_LOCK |
|
2553 |
|
2554 if(TUint(aAddr^KIPCAlias)<TUint(KChunkSize)) |
|
2555 return KErrBadDescriptor; // prevent access to alias region |
|
2556 |
|
2557 ArmMmu& m=::TheMmu; |
|
2558 |
|
2559 // check if memory is in region which is safe to access with supervisor permissions... |
|
2560 TBool okForSupervisorAccess = aPerm&(EMapAttrReadSup|EMapAttrWriteSup) ? 1 : 0; |
|
2561 if(!okForSupervisorAccess) |
|
2562 { |
|
2563 TInt shift = aAddr>>27; |
|
2564 if(!(aPerm&EMapAttrWriteUser)) |
|
2565 { |
|
2566 // reading with user permissions... |
|
2567 okForSupervisorAccess = (aProcess->iAddressCheckMaskR>>shift)&1; |
|
2568 } |
|
2569 else |
|
2570 { |
|
2571 // writing with user permissions... |
|
2572 okForSupervisorAccess = (aProcess->iAddressCheckMaskW>>shift)&1; |
|
2573 if(okForSupervisorAccess) |
|
2574 { |
|
2575 // check for user code, because this is supervisor r/w and so |
|
2576 // is not safe to write to access with supervisor permissions. |
|
2577 if(TUint(aAddr-m.iUserCodeBase)<TUint(m.iMaxUserCodeSize)) |
|
2578 return KErrBadDescriptor; // prevent write to this... |
|
2579 } |
|
2580 } |
|
2581 } |
|
2582 |
|
2583 TInt pdeIndex = aAddr>>KChunkShift; |
|
2584 if(pdeIndex>=(m.iLocalPdSize>>2)) |
|
2585 { |
|
2586 // address is in global section, don't bother aliasing it... |
|
2587 if(iAliasLinAddr) |
|
2588 RemoveAlias(); |
|
2589 aAliasAddr = aAddr; |
|
2590 TInt maxSize = KChunkSize-(aAddr&KChunkMask); |
|
2591 aAliasSize = aSize<maxSize ? aSize : maxSize; |
|
2592 __KTRACE_OPT(KMMU2,Kern::Printf("DMemModelThread::Alias() abandoned as memory is globaly mapped")); |
|
2593 return okForSupervisorAccess; |
|
2594 } |
|
2595 |
|
2596 TInt asid = aProcess->iOsAsid; |
|
2597 TPde* pd = PageDirectory(asid); |
|
2598 TPde pde = pd[pdeIndex]; |
|
2599 if ((TPhysAddr)(pde&~KPageMask) == AliasRemapOld) |
|
2600 pde = AliasRemapNew|(pde&KPageMask); |
|
2601 pde = PDE_IN_DOMAIN(pde, KIPCAliasDomain); |
|
2602 TLinAddr aliasAddr = KIPCAlias+(aAddr&(KChunkMask & ~KPageMask)); |
|
2603 if(pde==iAliasPde && iAliasLinAddr) |
|
2604 { |
|
2605 // pde already aliased, so just update linear address... |
|
2606 iAliasLinAddr = aliasAddr; |
|
2607 } |
|
2608 else |
|
2609 { |
|
2610 // alias PDE changed... |
|
2611 iAliasPde = pde; |
|
2612 iAliasOsAsid = asid; |
|
2613 if(!iAliasLinAddr) |
|
2614 { |
|
2615 ArmMmu::UnlockAlias(); |
|
2616 ::TheMmu.iAliasList.Add(&iAliasLink); // add to list if not already aliased |
|
2617 } |
|
2618 iAliasLinAddr = aliasAddr; |
|
2619 *iAliasPdePtr = pde; |
|
2620 CacheMaintenance::SinglePteUpdated((TLinAddr)iAliasPdePtr); |
|
2621 } |
|
2622 |
|
2623 __KTRACE_OPT(KMMU2,Kern::Printf("DMemModelThread::Alias() PDEntry=%x, iAliasLinAddr=%x",pde, aliasAddr)); |
|
2624 InvalidateTLBForPage(aliasAddr, ((DMemModelProcess*)iOwningProcess)->iOsAsid); |
|
2625 TInt offset = aAddr&KPageMask; |
|
2626 aAliasAddr = aliasAddr | offset; |
|
2627 TInt maxSize = KPageSize - offset; |
|
2628 aAliasSize = aSize<maxSize ? aSize : maxSize; |
|
2629 iAliasTarget = aAddr & ~KPageMask; |
|
2630 return okForSupervisorAccess; |
|
2631 } |
|
2632 |
|
2633 void DMemModelThread::RemoveAlias() |
|
2634 // |
|
2635 // Remove alias mapping (if present) |
|
2636 // Enter and return with system locked. |
|
2637 // |
|
2638 { |
|
2639 __KTRACE_OPT(KMMU2,Kern::Printf("Thread %O RemoveAlias", this)); |
|
2640 __ASSERT_SYSTEM_LOCK |
|
2641 TLinAddr addr = iAliasLinAddr; |
|
2642 if(addr) |
|
2643 { |
|
2644 ArmMmu::LockAlias(); |
|
2645 iAliasLinAddr = 0; |
|
2646 iAliasPde = 0; |
|
2647 *iAliasPdePtr = 0; |
|
2648 CacheMaintenance::SinglePteUpdated((TLinAddr)iAliasPdePtr); |
|
2649 InvalidateTLBForPage(addr, ((DMemModelProcess*)iOwningProcess)->iOsAsid); |
|
2650 iAliasLink.Deque(); |
|
2651 } |
|
2652 } |
|
2653 |
|
2654 /* |
|
2655 * Performs cache maintenance for physical page that is going to be reused. |
|
2656 * Fully cached attributes are assumed. |
|
2657 */ |
|
2658 void ArmMmu::CacheMaintenanceOnDecommit(TPhysAddr a) |
|
2659 { |
|
2660 // purge a single page from the cache following decommit |
|
2661 ArmMmu& m=::TheMmu; |
|
2662 TInt colour = SPageInfo::FromPhysAddr(a)->Offset()&KPageColourMask; |
|
2663 TPte& pte=m.iTempPte[colour]; |
|
2664 TLinAddr va=m.iTempAddr+(colour<<KPageShift); |
|
2665 pte=a|SP_PTE(KArmV6PermRWNO, iCacheMaintenanceTempMapAttr, 1, 1); |
|
2666 CacheMaintenance::SinglePteUpdated((TLinAddr)&pte); |
|
2667 |
|
2668 CacheMaintenance::PageToReuse(va,EMemAttNormalCached, a); |
|
2669 |
|
2670 pte=0; |
|
2671 CacheMaintenance::SinglePteUpdated((TLinAddr)&pte); |
|
2672 InvalidateTLBForPage(va,KERNEL_MAPPING); |
|
2673 } |
|
2674 |
|
2675 void ArmMmu::CacheMaintenanceOnDecommit(const TPhysAddr* al, TInt n) |
|
2676 { |
|
2677 // purge a list of pages from the cache following decommit |
|
2678 while (--n>=0) |
|
2679 ArmMmu::CacheMaintenanceOnDecommit(*al++); |
|
2680 } |
|
2681 |
|
2682 /* |
|
2683 * Performs cache maintenance to preserve physical page that is going to be reused. |
|
2684 */ |
|
2685 void ArmMmu::CacheMaintenanceOnPreserve(TPhysAddr a, TUint aMapAttr) |
|
2686 { |
|
2687 // purge a single page from the cache following decommit |
|
2688 ArmMmu& m=::TheMmu; |
|
2689 TInt colour = SPageInfo::FromPhysAddr(a)->Offset()&KPageColourMask; |
|
2690 TPte& pte=m.iTempPte[colour]; |
|
2691 TLinAddr va=m.iTempAddr+(colour<<KPageShift); |
|
2692 pte=a|SP_PTE(KArmV6PermRWNO, iCacheMaintenanceTempMapAttr, 1, 1); |
|
2693 CacheMaintenance::SinglePteUpdated((TLinAddr)&pte); |
|
2694 |
|
2695 CacheMaintenance::MemoryToPreserveAndReuse(va, KPageSize,aMapAttr); |
|
2696 |
|
2697 pte=0; |
|
2698 CacheMaintenance::SinglePteUpdated((TLinAddr)&pte); |
|
2699 InvalidateTLBForPage(va,KERNEL_MAPPING); |
|
2700 } |
|
2701 |
|
2702 void ArmMmu::CacheMaintenanceOnPreserve(const TPhysAddr* al, TInt n, TUint aMapAttr) |
|
2703 { |
|
2704 // purge a list of pages from the cache following decommit |
|
2705 while (--n>=0) |
|
2706 ArmMmu::CacheMaintenanceOnPreserve(*al++, aMapAttr); |
|
2707 } |
|
2708 |
|
2709 /* |
|
2710 * Performs cache maintenance of physical memory that has been decommited and has to be preserved. |
|
2711 * Call this method for physical pages with no page info updated (or no page info at all). |
|
2712 * @arg aPhysAddr The address of contiguous physical memory to be preserved. |
|
2713 * @arg aSize The size of the region |
|
2714 * @arg aLinAddr Former linear address of the region. As said above, the physical memory is |
|
2715 * already remapped from this linear address. |
|
2716 * @arg aMapAttr Mapping attributes of the region when it was mapped in aLinAddr. |
|
2717 * @pre MMU mutex is held. |
|
2718 */ |
|
2719 void ArmMmu::CacheMaintenanceOnPreserve(TPhysAddr aPhysAddr, TInt aSize, TLinAddr aLinAddr, TUint aMapAttr ) |
|
2720 { |
|
2721 __NK_ASSERT_DEBUG((aPhysAddr&KPageMask)==0); |
|
2722 __NK_ASSERT_DEBUG((aSize&KPageMask)==0); |
|
2723 __NK_ASSERT_DEBUG((aLinAddr&KPageMask)==0); |
|
2724 |
|
2725 TPhysAddr pa = aPhysAddr; |
|
2726 TInt size = aSize; |
|
2727 TInt colour = (aLinAddr>>KPageShift)&KPageColourMask; |
|
2728 TPte* pte = &(iTempPte[colour]); |
|
2729 while (size) |
|
2730 { |
|
2731 pte=&(iTempPte[colour]); |
|
2732 TLinAddr va=iTempAddr+(colour<<KPageShift); |
|
2733 *pte=pa|SP_PTE(KArmV6PermRWNO, iCacheMaintenanceTempMapAttr, 1, 1); |
|
2734 CacheMaintenance::SinglePteUpdated((TLinAddr)pte); |
|
2735 CacheMaintenance::MemoryToPreserveAndReuse(va, KPageSize,aMapAttr); |
|
2736 |
|
2737 *pte=0; |
|
2738 CacheMaintenance::SinglePteUpdated((TLinAddr)pte); |
|
2739 InvalidateTLBForPage(va,KERNEL_MAPPING); |
|
2740 |
|
2741 colour = (colour+1)&KPageColourMask; |
|
2742 pa += KPageSize; |
|
2743 size -=KPageSize; |
|
2744 } |
|
2745 } |
|
2746 |
|
2747 TInt ArmMmu::UnlockRamCachePages(TLinAddr aLinAddr, TInt aNumPages, DProcess* aProcess) |
|
2748 { |
|
2749 TInt asid = ((DMemModelProcess*)aProcess)->iOsAsid; |
|
2750 TInt page = aLinAddr>>KPageShift; |
|
2751 NKern::LockSystem(); |
|
2752 for(;;) |
|
2753 { |
|
2754 TPde* pd = PageDirectory(asid)+(page>>(KChunkShift-KPageShift)); |
|
2755 TPte* pt = SafePageTableFromPde(*pd++); |
|
2756 TInt pteIndex = page&(KChunkMask>>KPageShift); |
|
2757 if(!pt) |
|
2758 { |
|
2759 // whole page table has gone, so skip all pages in it... |
|
2760 TInt pagesInPt = (KChunkSize>>KPageShift)-pteIndex; |
|
2761 aNumPages -= pagesInPt; |
|
2762 page += pagesInPt; |
|
2763 if(aNumPages>0) |
|
2764 continue; |
|
2765 NKern::UnlockSystem(); |
|
2766 return KErrNone; |
|
2767 } |
|
2768 pt += pteIndex; |
|
2769 do |
|
2770 { |
|
2771 TInt pagesInPt = (KChunkSize>>KPageShift)-pteIndex; |
|
2772 if(pagesInPt>aNumPages) |
|
2773 pagesInPt = aNumPages; |
|
2774 if(pagesInPt>KMaxPages) |
|
2775 pagesInPt = KMaxPages; |
|
2776 |
|
2777 aNumPages -= pagesInPt; |
|
2778 page += pagesInPt; |
|
2779 |
|
2780 do |
|
2781 { |
|
2782 TPte pte = *pt++; |
|
2783 if(pte) // pte may be null if page has already been unlocked and reclaimed by system |
|
2784 iRamCache->DonateRamCachePage(SPageInfo::FromPhysAddr(pte)); |
|
2785 } |
|
2786 while(--pagesInPt); |
|
2787 |
|
2788 if(!aNumPages) |
|
2789 { |
|
2790 NKern::UnlockSystem(); |
|
2791 return KErrNone; |
|
2792 } |
|
2793 |
|
2794 pteIndex = page&(KChunkMask>>KPageShift); |
|
2795 } |
|
2796 while(!NKern::FlashSystem() && pteIndex); |
|
2797 } |
|
2798 } |
|
2799 |
|
2800 |
|
2801 TInt ArmMmu::LockRamCachePages(TLinAddr aLinAddr, TInt aNumPages, DProcess* aProcess) |
|
2802 { |
|
2803 TInt asid = ((DMemModelProcess*)aProcess)->iOsAsid; |
|
2804 TInt page = aLinAddr>>KPageShift; |
|
2805 NKern::LockSystem(); |
|
2806 for(;;) |
|
2807 { |
|
2808 TPde* pd = PageDirectory(asid)+(page>>(KChunkShift-KPageShift)); |
|
2809 TPte* pt = SafePageTableFromPde(*pd++); |
|
2810 TInt pteIndex = page&(KChunkMask>>KPageShift); |
|
2811 if(!pt) |
|
2812 goto not_found; |
|
2813 pt += pteIndex; |
|
2814 do |
|
2815 { |
|
2816 TInt pagesInPt = (KChunkSize>>KPageShift)-pteIndex; |
|
2817 if(pagesInPt>aNumPages) |
|
2818 pagesInPt = aNumPages; |
|
2819 if(pagesInPt>KMaxPages) |
|
2820 pagesInPt = KMaxPages; |
|
2821 |
|
2822 aNumPages -= pagesInPt; |
|
2823 page += pagesInPt; |
|
2824 |
|
2825 do |
|
2826 { |
|
2827 TPte pte = *pt++; |
|
2828 if(pte==0) |
|
2829 goto not_found; |
|
2830 if(!iRamCache->ReclaimRamCachePage(SPageInfo::FromPhysAddr(pte))) |
|
2831 goto not_found; |
|
2832 } |
|
2833 while(--pagesInPt); |
|
2834 |
|
2835 if(!aNumPages) |
|
2836 { |
|
2837 NKern::UnlockSystem(); |
|
2838 return KErrNone; |
|
2839 } |
|
2840 |
|
2841 pteIndex = page&(KChunkMask>>KPageShift); |
|
2842 } |
|
2843 while(!NKern::FlashSystem() && pteIndex); |
|
2844 } |
|
2845 not_found: |
|
2846 NKern::UnlockSystem(); |
|
2847 return KErrNotFound; |
|
2848 } |
|
2849 |
|
2850 |
|
2851 void RamCache::SetFree(SPageInfo* aPageInfo) |
|
2852 { |
|
2853 ArmMmu& m=::TheMmu; |
|
2854 // Make a page free |
|
2855 SPageInfo::TType type = aPageInfo->Type(); |
|
2856 if(type==SPageInfo::EPagedCache) |
|
2857 { |
|
2858 TInt offset = aPageInfo->Offset()<<KPageShift; |
|
2859 DMemModelChunk* chunk = (DMemModelChunk*)aPageInfo->Owner(); |
|
2860 __NK_ASSERT_DEBUG(TUint(offset)<TUint(chunk->iMaxSize)); |
|
2861 TLinAddr lin = ((TLinAddr)chunk->iBase)+offset; |
|
2862 TInt asid = ((DMemModelProcess*)chunk->iOwningProcess)->iOsAsid; |
|
2863 TPte* pt = PtePtrFromLinAddr(lin,asid); |
|
2864 TPhysAddr phys = (*pt)&~KPageMask; |
|
2865 *pt = KPteNotPresentEntry; |
|
2866 CacheMaintenance::SinglePteUpdated((TLinAddr)pt); |
|
2867 InvalidateTLBForPage(lin,asid); |
|
2868 m.CacheMaintenanceOnDecommit(phys); |
|
2869 |
|
2870 // actually decommit it from chunk... |
|
2871 TInt ptid = ((TLinAddr)pt-KPageTableBase)>>KPageTableShift; |
|
2872 SPageTableInfo& ptinfo=((ArmMmu*)iMmu)->iPtInfo[ptid]; |
|
2873 if(!--ptinfo.iCount) |
|
2874 { |
|
2875 chunk->iPageTables[offset>>KChunkShift] = 0xffff; |
|
2876 NKern::UnlockSystem(); |
|
2877 ((ArmMmu*)iMmu)->DoUnassignPageTable(lin, (TAny*)asid); |
|
2878 ((ArmMmu*)iMmu)->FreePageTable(ptid); |
|
2879 NKern::LockSystem(); |
|
2880 } |
|
2881 } |
|
2882 else |
|
2883 { |
|
2884 __KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: SetFree() with bad page type = %d",aPageInfo->Type())); |
|
2885 Panic(EUnexpectedPageType); |
|
2886 } |
|
2887 } |
|
2888 |
|
2889 |
|
2890 // |
|
2891 // MemModelDemandPaging |
|
2892 // |
|
2893 |
|
2894 class MemModelDemandPaging : public DemandPaging |
|
2895 { |
|
2896 public: |
|
2897 // From RamCacheBase |
|
2898 virtual void Init2(); |
|
2899 virtual TInt Init3(); |
|
2900 virtual TBool PageUnmapped(SPageInfo* aPageInfo); |
|
2901 // From DemandPaging |
|
2902 virtual TInt Fault(TAny* aExceptionInfo); |
|
2903 virtual void SetOld(SPageInfo* aPageInfo); |
|
2904 virtual void SetFree(SPageInfo* aPageInfo); |
|
2905 virtual void NotifyPageFree(TPhysAddr aPage); |
|
2906 virtual TInt EnsurePagePresent(TLinAddr aPage, DProcess* aProcess); |
|
2907 virtual TPhysAddr LinearToPhysical(TLinAddr aPage, DProcess* aProcess); |
|
2908 virtual void AllocLoadAddress(DPagingRequest& aReq, TInt aDeviceId); |
|
2909 virtual TInt PageState(TLinAddr aAddr); |
|
2910 virtual TBool NeedsMutexOrderCheck(TLinAddr aStartAddr, TUint aLength); |
|
2911 // New |
|
2912 inline ArmMmu& Mmu() { return (ArmMmu&)*iMmu; } |
|
2913 void InitRomPaging(); |
|
2914 void InitCodePaging(); |
|
2915 TInt HandleFault(TArmExcInfo& aExc, TLinAddr aFaultAddress, TInt aAsid); |
|
2916 TInt PageIn(TLinAddr aAddress, TInt aAsid, DMemModelCodeSegMemory* aCodeSegMemory); |
|
2917 public: |
|
2918 // use of the folowing members is protected by the system lock.. |
|
2919 TPte* iPurgePte; // PTE used for temporary mappings during cache purge operations |
|
2920 TLinAddr iPurgeAddr; // address corresponding to iPurgePte |
|
2921 }; |
|
2922 |
|
2923 extern void MakeGlobalPTEInaccessible(TPte* aPtePtr, TPte aNewPte, TLinAddr aLinAddr); |
|
2924 extern void MakePTEInaccessible(TPte* aPtePtr, TPte aNewPte, TLinAddr aLinAddr, TInt aAsid); |
|
2925 |
|
2926 // |
|
2927 // MemModelDemandPaging |
|
2928 // |
|
2929 |
|
2930 |
|
2931 DemandPaging* DemandPaging::New() |
|
2932 { |
|
2933 return new MemModelDemandPaging(); |
|
2934 } |
|
2935 |
|
2936 |
|
2937 void MemModelDemandPaging::Init2() |
|
2938 { |
|
2939 __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf(">MemModelDemandPaging::Init2")); |
|
2940 DemandPaging::Init2(); |
|
2941 |
|
2942 iPurgeAddr = KDemandPagingTempAddr; |
|
2943 iPurgePte = PtePtrFromLinAddr(iPurgeAddr); |
|
2944 |
|
2945 __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("<MemModelDemandPaging::Init2")); |
|
2946 } |
|
2947 |
|
2948 |
|
2949 void MemModelDemandPaging::AllocLoadAddress(DPagingRequest& aReq, TInt aReqId) |
|
2950 { |
|
2951 aReq.iLoadAddr = iTempPages + aReqId * KPageSize * KPageColourCount; |
|
2952 aReq.iLoadPte = PtePtrFromLinAddr(aReq.iLoadAddr); |
|
2953 } |
|
2954 |
|
2955 |
|
2956 TInt MemModelDemandPaging::Init3() |
|
2957 { |
|
2958 TInt r=DemandPaging::Init3(); |
|
2959 if(r!=KErrNone) |
|
2960 return r; |
|
2961 |
|
2962 // Create a region for mapping pages during page in |
|
2963 DPlatChunkHw* chunk; |
|
2964 TInt chunkSize = (KMaxPagingDevices * KPagingRequestsPerDevice + 1) * KPageColourCount * KPageSize; |
|
2965 DPlatChunkHw::DoNew(chunk, KPhysAddrInvalid, chunkSize, EMapAttrSupRw|EMapAttrFullyBlocking); |
|
2966 if(!chunk) |
|
2967 Panic(EInitialiseFailed); |
|
2968 TInt colourMask = KPageColourMask << KPageShift; |
|
2969 iTempPages = (chunk->iLinAddr + colourMask) & ~colourMask; |
|
2970 |
|
2971 if(RomPagingRequested()) |
|
2972 InitRomPaging(); |
|
2973 |
|
2974 if (CodePagingRequested()) |
|
2975 InitCodePaging(); |
|
2976 |
|
2977 __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("<MemModelDemandPaging::Init3")); |
|
2978 return KErrNone; |
|
2979 } |
|
2980 |
|
2981 void MemModelDemandPaging::InitRomPaging() |
|
2982 { |
|
2983 // Make page tables for demand paged part of ROM... |
|
2984 __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("MemModelDemandPaging::Init3 making page tables for paged ROM")); |
|
2985 TLinAddr lin = iRomPagedLinearBase&~KChunkMask; // first chunk with paged ROM in |
|
2986 TLinAddr linEnd = iRomLinearBase+iRomSize; |
|
2987 while(lin<linEnd) |
|
2988 { |
|
2989 // Get a Page Table |
|
2990 TInt ptid = Mmu().PageTableId(lin,0); |
|
2991 if(ptid<0) |
|
2992 { |
|
2993 MmuBase::Wait(); |
|
2994 ptid = Mmu().AllocPageTable(); |
|
2995 MmuBase::Signal(); |
|
2996 __NK_ASSERT_DEBUG(ptid>=0); |
|
2997 Mmu().PtInfo(ptid).SetGlobal(lin >> KChunkShift); |
|
2998 } |
|
2999 |
|
3000 // Get new page table addresses |
|
3001 TPte* pt = PageTable(ptid); |
|
3002 TPhysAddr ptPhys=Mmu().LinearToPhysical((TLinAddr)pt,0); |
|
3003 |
|
3004 // Pointer to page directory entry |
|
3005 TPde* ppde = ::InitPageDirectory + (lin>>KChunkShift); |
|
3006 |
|
3007 // Fill in Page Table |
|
3008 TPte* ptEnd = pt+(1<<(KChunkShift-KPageShift)); |
|
3009 pt += (lin&KChunkMask)>>KPageShift; |
|
3010 TLinAddr firstPte = (TLinAddr)pt; // Will need this to clean page table memory region from cache |
|
3011 |
|
3012 do |
|
3013 { |
|
3014 if(lin<iRomPagedLinearBase) |
|
3015 *pt++ = Mmu().LinearToPhysical(lin,0) | KRomPtePerm; |
|
3016 else |
|
3017 { |
|
3018 MakeGlobalPTEInaccessible(pt, KPteNotPresentEntry, lin); |
|
3019 ++pt; |
|
3020 } |
|
3021 lin += KPageSize; |
|
3022 } |
|
3023 while(pt<ptEnd && lin<=linEnd); |
|
3024 |
|
3025 CacheMaintenance::MultiplePtesUpdated((TLinAddr)firstPte, (TUint)pt-firstPte); |
|
3026 |
|
3027 // Add new Page Table to the Page Directory |
|
3028 TPde newpde = ptPhys | KShadowPdePerm; |
|
3029 __KTRACE_OPT2(KPAGING,KMMU,Kern::Printf("Writing PDE %08x to %08x", newpde, ppde)); |
|
3030 TInt irq=NKern::DisableAllInterrupts(); |
|
3031 *ppde = newpde; |
|
3032 CacheMaintenance::SinglePteUpdated((TLinAddr)ppde); |
|
3033 FlushTLBs(); |
|
3034 NKern::RestoreInterrupts(irq); |
|
3035 } |
|
3036 } |
|
3037 |
|
3038 |
|
3039 void MemModelDemandPaging::InitCodePaging() |
|
3040 { |
|
3041 // Initialise code paging info |
|
3042 iCodeLinearBase = Mmu().iUserCodeBase; |
|
3043 iCodeSize = Mmu().iMaxUserCodeSize; |
|
3044 } |
|
3045 |
|
3046 |
|
3047 /** |
|
3048 @return ETrue when the unmapped page should be freed, EFalse otherwise |
|
3049 */ |
|
3050 TBool MemModelDemandPaging::PageUnmapped(SPageInfo* aPageInfo) |
|
3051 { |
|
3052 SPageInfo::TType type = aPageInfo->Type(); |
|
3053 |
|
3054 // Only have to deal with cache pages - pages containg code don't get returned to the system |
|
3055 // when they are decommitted from an individual process, only when the code segment is destroyed |
|
3056 if(type!=SPageInfo::EPagedCache) |
|
3057 { |
|
3058 __NK_ASSERT_DEBUG(type!=SPageInfo::EPagedCode); // shouldn't happen |
|
3059 __NK_ASSERT_DEBUG(type!=SPageInfo::EPagedData); // not supported yet |
|
3060 return ETrue; |
|
3061 } |
|
3062 |
|
3063 RemovePage(aPageInfo); |
|
3064 AddAsFreePage(aPageInfo); |
|
3065 // Return false to stop DMemModelChunk::DoDecommit from freeing this page |
|
3066 return EFalse; |
|
3067 } |
|
3068 |
|
3069 |
|
3070 void DoSetCodeOld(SPageInfo* aPageInfo, DMemModelCodeSegMemory* aCodeSegMemory, TLinAddr aLinAddr) |
|
3071 { |
|
3072 NThread* currentThread = NKern::CurrentThread(); |
|
3073 aPageInfo->SetModifier(currentThread); |
|
3074 // scan all address spaces... |
|
3075 TInt asid = -1; |
|
3076 TInt lastAsid = KArmV6NumAsids-1; |
|
3077 TUint32* ptr = aCodeSegMemory->iOsAsids->iMap; |
|
3078 do |
|
3079 { |
|
3080 TUint32 bits = *ptr++; |
|
3081 do |
|
3082 { |
|
3083 ++asid; |
|
3084 if(bits&0x80000000u) |
|
3085 { |
|
3086 // codeseg is mapped in this address space, so update PTE... |
|
3087 TPte* pt = PtePtrFromLinAddr(aLinAddr,asid); |
|
3088 TPte pte = *pt; |
|
3089 if(pte&KPtePresentMask) |
|
3090 { |
|
3091 __NK_ASSERT_DEBUG((pte&~KPageMask) == aPageInfo->PhysAddr()); |
|
3092 MakePTEInaccessible(pt, pte&~KPtePresentMask, aLinAddr, asid); |
|
3093 } |
|
3094 } |
|
3095 } |
|
3096 while(bits<<=1); |
|
3097 if(NKern::FlashSystem() && aPageInfo->CheckModified(currentThread)) |
|
3098 return; // page was modified by another thread |
|
3099 asid |= 31; |
|
3100 } |
|
3101 while(asid<lastAsid); |
|
3102 } |
|
3103 |
|
3104 |
|
3105 void MemModelDemandPaging::SetOld(SPageInfo* aPageInfo) |
|
3106 { |
|
3107 __ASSERT_SYSTEM_LOCK; |
|
3108 __NK_ASSERT_DEBUG(aPageInfo->State() == SPageInfo::EStatePagedOld); |
|
3109 |
|
3110 SPageInfo::TType type = aPageInfo->Type(); |
|
3111 |
|
3112 if(type==SPageInfo::EPagedROM) |
|
3113 { |
|
3114 // get linear address of page... |
|
3115 TInt offset = aPageInfo->Offset()<<KPageShift; |
|
3116 __NK_ASSERT_DEBUG(TUint(offset)<iRomSize); |
|
3117 |
|
3118 // make page inaccessible... |
|
3119 TLinAddr lin = iRomLinearBase+offset; |
|
3120 TPte* pt = PtePtrFromLinAddr(lin); |
|
3121 MakeGlobalPTEInaccessible(pt, *pt&~KPtePresentMask, lin); |
|
3122 } |
|
3123 else if(type==SPageInfo::EPagedCode) |
|
3124 { |
|
3125 START_PAGING_BENCHMARK; |
|
3126 |
|
3127 // get linear address of page... |
|
3128 TInt offset = aPageInfo->Offset()<<KPageShift; |
|
3129 __NK_ASSERT_DEBUG(TUint(offset)<iCodeSize); |
|
3130 TLinAddr lin = iCodeLinearBase+offset; |
|
3131 |
|
3132 // get CodeSegMemory... |
|
3133 DMemModelCodeSegMemory* codeSegMemory = (DMemModelCodeSegMemory*)aPageInfo->Owner(); |
|
3134 __NK_ASSERT_DEBUG(codeSegMemory && codeSegMemory->iPages && codeSegMemory->iIsDemandPaged); |
|
3135 |
|
3136 #ifdef _DEBUG |
|
3137 TInt pageNumber = (lin - codeSegMemory->iRamInfo.iCodeRunAddr) >> KPageShift; |
|
3138 __NK_ASSERT_DEBUG(codeSegMemory->iPages[pageNumber] == aPageInfo->PhysAddr()); |
|
3139 #endif |
|
3140 |
|
3141 // make page inaccessible... |
|
3142 DoSetCodeOld(aPageInfo,codeSegMemory,lin); |
|
3143 |
|
3144 END_PAGING_BENCHMARK(this, EPagingBmSetCodePageOld); |
|
3145 } |
|
3146 else if(type==SPageInfo::EPagedCache) |
|
3147 { |
|
3148 // leave page accessible |
|
3149 } |
|
3150 else if(type!=SPageInfo::EPagedFree) |
|
3151 { |
|
3152 __KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: SetOld() with bad page type = %d",aPageInfo->Type())); |
|
3153 Panic(EUnexpectedPageType); |
|
3154 } |
|
3155 NKern::FlashSystem(); |
|
3156 } |
|
3157 |
|
3158 |
|
3159 void DoSetCodeFree(SPageInfo* aPageInfo, TPhysAddr aPhysAddr, DMemModelCodeSegMemory* aCodeSegMemory, TLinAddr aLinAddr) |
|
3160 { |
|
3161 NThread* currentThread = NKern::CurrentThread(); |
|
3162 aPageInfo->SetModifier(currentThread); |
|
3163 // scan all address spaces... |
|
3164 TInt asid = -1; |
|
3165 TInt lastAsid = KArmV6NumAsids-1; |
|
3166 TUint32* ptr = aCodeSegMemory->iOsAsids->iMap; |
|
3167 do |
|
3168 { |
|
3169 TUint32 bits = *ptr++; |
|
3170 do |
|
3171 { |
|
3172 ++asid; |
|
3173 if(bits&0x80000000u) |
|
3174 { |
|
3175 // codeseg is mapped in this address space, so update PTE... |
|
3176 TPte* pt = PtePtrFromLinAddr(aLinAddr,asid); |
|
3177 TPte pte = *pt; |
|
3178 if (pte!=KPteNotPresentEntry && (pte&~KPageMask) == aPhysAddr) |
|
3179 MakePTEInaccessible(pt, KPteNotPresentEntry, aLinAddr, asid); |
|
3180 } |
|
3181 } |
|
3182 while(bits<<=1); |
|
3183 if(NKern::FlashSystem()) |
|
3184 { |
|
3185 // nobody else should modify page! |
|
3186 __NK_ASSERT_DEBUG(!aPageInfo->CheckModified(currentThread)); |
|
3187 } |
|
3188 asid |= 31; |
|
3189 } |
|
3190 while(asid<lastAsid); |
|
3191 } |
|
3192 |
|
3193 |
|
3194 void MemModelDemandPaging::SetFree(SPageInfo* aPageInfo) |
|
3195 { |
|
3196 __ASSERT_SYSTEM_LOCK; |
|
3197 __ASSERT_MUTEX(MmuBase::RamAllocatorMutex); |
|
3198 __NK_ASSERT_DEBUG(aPageInfo->State() == SPageInfo::EStatePagedDead); |
|
3199 if(aPageInfo->LockCount()) |
|
3200 Panic(ERamPageLocked); |
|
3201 |
|
3202 SPageInfo::TType type = aPageInfo->Type(); |
|
3203 TPhysAddr phys = aPageInfo->PhysAddr(); |
|
3204 |
|
3205 if(type==SPageInfo::EPagedROM) |
|
3206 { |
|
3207 // get linear address of page... |
|
3208 TInt offset = aPageInfo->Offset()<<KPageShift; |
|
3209 __NK_ASSERT_DEBUG(TUint(offset)<iRomSize); |
|
3210 TLinAddr lin = iRomLinearBase+offset; |
|
3211 |
|
3212 // unmap it... |
|
3213 TPte* pt = PtePtrFromLinAddr(lin); |
|
3214 MakeGlobalPTEInaccessible(pt, KPteNotPresentEntry, lin); |
|
3215 |
|
3216 #ifdef BTRACE_PAGING |
|
3217 BTraceContext8(BTrace::EPaging,BTrace::EPagingPageOutROM,phys,lin); |
|
3218 #endif |
|
3219 } |
|
3220 else if(type==SPageInfo::EPagedCode) |
|
3221 { |
|
3222 START_PAGING_BENCHMARK; |
|
3223 |
|
3224 // get linear address of page... |
|
3225 TInt offset = aPageInfo->Offset()<<KPageShift; |
|
3226 __NK_ASSERT_DEBUG(TUint(offset)<iCodeSize); |
|
3227 TLinAddr lin = iCodeLinearBase+offset; |
|
3228 |
|
3229 // get CodeSegMemory... |
|
3230 // NOTE, this cannot die because we hold the RamAlloc mutex, and the CodeSegMemory |
|
3231 // destructor also needs this mutex to do it's cleanup... |
|
3232 DMemModelCodeSegMemory* codeSegMemory = (DMemModelCodeSegMemory*)aPageInfo->Owner(); |
|
3233 __NK_ASSERT_DEBUG(codeSegMemory && codeSegMemory->iPages && codeSegMemory->iIsDemandPaged); |
|
3234 |
|
3235 // remove page from CodeSegMemory (must come before System Lock is released)... |
|
3236 TInt pageNumber = (lin - codeSegMemory->iRamInfo.iCodeRunAddr) >> KPageShift; |
|
3237 __NK_ASSERT_DEBUG(codeSegMemory->iPages[pageNumber] == aPageInfo->PhysAddr()); |
|
3238 codeSegMemory->iPages[pageNumber] = KPhysAddrInvalid; |
|
3239 |
|
3240 // unmap page from all processes it's mapped into... |
|
3241 DoSetCodeFree(aPageInfo,phys,codeSegMemory,lin); |
|
3242 |
|
3243 END_PAGING_BENCHMARK(this, EPagingBmSetCodePageFree); |
|
3244 #ifdef BTRACE_PAGING |
|
3245 BTraceContext8(BTrace::EPaging,BTrace::EPagingPageOutCode,phys,lin); |
|
3246 #endif |
|
3247 } |
|
3248 else if(type==SPageInfo::EPagedCache) |
|
3249 { |
|
3250 // get linear address of page... |
|
3251 TInt offset = aPageInfo->Offset()<<KPageShift; |
|
3252 DMemModelChunk* chunk = (DMemModelChunk*)aPageInfo->Owner(); |
|
3253 __NK_ASSERT_DEBUG(TUint(offset)<TUint(chunk->iMaxSize)); |
|
3254 TLinAddr lin = ((TLinAddr)chunk->iBase)+offset; |
|
3255 |
|
3256 // unmap it... |
|
3257 TInt asid = ((DMemModelProcess*)chunk->iOwningProcess)->iOsAsid; |
|
3258 TPte* pt = PtePtrFromLinAddr(lin,asid); |
|
3259 *pt = KPteNotPresentEntry; |
|
3260 CacheMaintenance::SinglePteUpdated((TLinAddr)pt); |
|
3261 |
|
3262 InvalidateTLBForPage(lin,asid); |
|
3263 |
|
3264 // actually decommit it from chunk... |
|
3265 TInt ptid = ((TLinAddr)pt-KPageTableBase)>>KPageTableShift; |
|
3266 SPageTableInfo& ptinfo=Mmu().iPtInfo[ptid]; |
|
3267 if(!--ptinfo.iCount) |
|
3268 { |
|
3269 chunk->iPageTables[offset>>KChunkShift] = 0xffff; |
|
3270 NKern::UnlockSystem(); |
|
3271 Mmu().DoUnassignPageTable(lin, (TAny*)asid); |
|
3272 Mmu().FreePageTable(ptid); |
|
3273 NKern::LockSystem(); |
|
3274 } |
|
3275 |
|
3276 #ifdef BTRACE_PAGING |
|
3277 BTraceContext8(BTrace::EPaging,BTrace::EPagingPageOutCache,phys,lin); |
|
3278 #endif |
|
3279 } |
|
3280 else if(type==SPageInfo::EPagedFree) |
|
3281 { |
|
3282 // already free... |
|
3283 #ifdef BTRACE_PAGING |
|
3284 BTraceContext4(BTrace::EPaging,BTrace::EPagingPageOutFree,phys); |
|
3285 #endif |
|
3286 // fall through to cache purge code because cache may not have been |
|
3287 // cleaned for this page if PageUnmapped called |
|
3288 } |
|
3289 else |
|
3290 { |
|
3291 __KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: SetFree() with bad page type = %d",aPageInfo->Type())); |
|
3292 Panic(EUnexpectedPageType); |
|
3293 return; |
|
3294 } |
|
3295 |
|
3296 NKern::UnlockSystem(); |
|
3297 |
|
3298 // purge cache for page... |
|
3299 TInt colour = aPageInfo->Offset()&KPageColourMask; |
|
3300 TPte& pte=iPurgePte[colour]; |
|
3301 TLinAddr va=iPurgeAddr+(colour<<KPageShift); |
|
3302 pte=phys|SP_PTE(KArmV6PermRWNO, TheMmu.iCacheMaintenanceTempMapAttr, 1, 1); |
|
3303 CacheMaintenance::SinglePteUpdated((TLinAddr)&pte); |
|
3304 |
|
3305 CacheMaintenance::PageToReuse(va,EMemAttNormalCached, KPhysAddrInvalid); |
|
3306 |
|
3307 pte=0; |
|
3308 CacheMaintenance::SinglePteUpdated((TLinAddr)&pte); |
|
3309 InvalidateTLBForPage(va,KERNEL_MAPPING); |
|
3310 |
|
3311 NKern::LockSystem(); |
|
3312 } |
|
3313 |
|
3314 |
|
3315 void MemModelDemandPaging::NotifyPageFree(TPhysAddr aPage) |
|
3316 { |
|
3317 __KTRACE_OPT(KPAGING, Kern::Printf("MemModelDemandPaging::NotifyPageFree %08x", aPage)); |
|
3318 __ASSERT_SYSTEM_LOCK; |
|
3319 |
|
3320 SPageInfo* pageInfo = SPageInfo::FromPhysAddr(aPage); |
|
3321 __ASSERT_DEBUG(pageInfo->Type()==SPageInfo::EPagedCode, MM::Panic(MM::EUnexpectedPageType)); |
|
3322 RemovePage(pageInfo); |
|
3323 SetFree(pageInfo); |
|
3324 AddAsFreePage(pageInfo); |
|
3325 } |
|
3326 |
|
3327 |
|
3328 TInt MemModelDemandPaging::Fault(TAny* aExceptionInfo) |
|
3329 { |
|
3330 TArmExcInfo& exc=*(TArmExcInfo*)aExceptionInfo; |
|
3331 |
|
3332 // Get faulting address |
|
3333 TLinAddr faultAddress = exc.iFaultAddress; |
|
3334 if(exc.iExcCode==EArmExceptionDataAbort) |
|
3335 { |
|
3336 // Let writes take an exception rather than page in any memory... |
|
3337 if(exc.iFaultStatus&(1<<11)) |
|
3338 return KErrUnknown; |
|
3339 } |
|
3340 else if (exc.iExcCode != EArmExceptionPrefetchAbort) |
|
3341 return KErrUnknown; // Not prefetch or data abort |
|
3342 |
|
3343 // Only handle page translation faults |
|
3344 if((exc.iFaultStatus & 0x40f) != 0x7) |
|
3345 return KErrUnknown; |
|
3346 |
|
3347 DMemModelThread* thread = (DMemModelThread*)TheCurrentThread; |
|
3348 |
|
3349 // check which ragion fault occured in... |
|
3350 TInt asid = 0; // asid != 0 => code paging fault |
|
3351 if(TUint(faultAddress-iRomPagedLinearBase)<iRomPagedSize) |
|
3352 { |
|
3353 // in ROM |
|
3354 } |
|
3355 else if(TUint(faultAddress-iCodeLinearBase)<iCodeSize) |
|
3356 { |
|
3357 // in code |
|
3358 asid = ((DMemModelProcess*)TheScheduler.iAddressSpace)->iOsAsid; |
|
3359 } |
|
3360 else if (thread->iAliasLinAddr && TUint(faultAddress - thread->iAliasLinAddr) < TUint(KPageSize)) |
|
3361 { |
|
3362 // in aliased memory |
|
3363 faultAddress = (faultAddress - thread->iAliasLinAddr) + thread->iAliasTarget; |
|
3364 if(TUint(faultAddress-iCodeLinearBase)>=iCodeSize) |
|
3365 return KErrUnknown; // not in alias of code |
|
3366 asid = thread->iAliasOsAsid; |
|
3367 __NK_ASSERT_DEBUG(asid != 0); |
|
3368 } |
|
3369 else |
|
3370 return KErrUnknown; // Not in pageable region |
|
3371 |
|
3372 // Check if thread holds fast mutex and claim system lock |
|
3373 NFastMutex* fm = NKern::HeldFastMutex(); |
|
3374 TPagingExcTrap* trap = thread->iPagingExcTrap; |
|
3375 if(!fm) |
|
3376 NKern::LockSystem(); |
|
3377 else |
|
3378 { |
|
3379 if(!trap || fm!=&TheScheduler.iLock) |
|
3380 { |
|
3381 __KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: Fault with FM Held! %x (%O pc=%x)",faultAddress,&Kern::CurrentThread(),exc.iR15)); |
|
3382 Panic(EPageFaultWhilstFMHeld); // Not allowed to hold mutexes |
|
3383 } |
|
3384 // restore address space on multiple memory model (because the trap will |
|
3385 // bypass any code which would have done this.)... |
|
3386 DMemModelThread::RestoreAddressSpace(); |
|
3387 |
|
3388 // Current thread already has the system lock... |
|
3389 NKern::FlashSystem(); // Let someone else have a go with the system lock. |
|
3390 } |
|
3391 |
|
3392 // System locked here |
|
3393 |
|
3394 TInt r = KErrNone; |
|
3395 if(thread->IsRealtime()) |
|
3396 r = CheckRealtimeThreadFault(thread, aExceptionInfo); |
|
3397 if (r == KErrNone) |
|
3398 r = HandleFault(exc, faultAddress, asid); |
|
3399 |
|
3400 // Restore system lock state |
|
3401 if (fm != NKern::HeldFastMutex()) |
|
3402 { |
|
3403 if (fm) |
|
3404 NKern::LockSystem(); |
|
3405 else |
|
3406 NKern::UnlockSystem(); |
|
3407 } |
|
3408 |
|
3409 // Deal with XTRAP_PAGING |
|
3410 if(r == KErrNone && trap) |
|
3411 { |
|
3412 trap->Exception(1); // Return from exception trap with result '1' (value>0) |
|
3413 // code doesn't continue beyond this point. |
|
3414 } |
|
3415 |
|
3416 return r; |
|
3417 } |
|
3418 |
|
3419 |
|
3420 |
|
3421 TInt MemModelDemandPaging::HandleFault(TArmExcInfo& aExc, TLinAddr aFaultAddress, TInt aAsid) |
|
3422 { |
|
3423 ++iEventInfo.iPageFaultCount; |
|
3424 |
|
3425 // get page table entry... |
|
3426 TPte* pt = SafePtePtrFromLinAddr(aFaultAddress, aAsid); |
|
3427 if(!pt) |
|
3428 return KErrNotFound; |
|
3429 TPte pte = *pt; |
|
3430 |
|
3431 // Do what is required to make page accessible... |
|
3432 |
|
3433 if(pte&KPtePresentMask) |
|
3434 { |
|
3435 // PTE is present, so assume it has already been dealt with |
|
3436 #ifdef BTRACE_PAGING |
|
3437 BTraceContext12(BTrace::EPaging,BTrace::EPagingPageNop,pte&~KPageMask,aFaultAddress,aExc.iR15); |
|
3438 #endif |
|
3439 return KErrNone; |
|
3440 } |
|
3441 |
|
3442 if(pte!=KPteNotPresentEntry) |
|
3443 { |
|
3444 // PTE alread has a page |
|
3445 SPageInfo* pageInfo = SPageInfo::FromPhysAddr(pte); |
|
3446 if(pageInfo->State()==SPageInfo::EStatePagedDead) |
|
3447 { |
|
3448 // page currently being unmapped, so do that here... |
|
3449 MakePTEInaccessible(pt, KPteNotPresentEntry, aFaultAddress, aAsid); |
|
3450 } |
|
3451 else |
|
3452 { |
|
3453 // page just needs making young again... |
|
3454 *pt = TPte(pte|KArmV6PteSmallPage); // Update page table |
|
3455 CacheMaintenance::SinglePteUpdated((TLinAddr)pt); |
|
3456 Rejuvenate(pageInfo); |
|
3457 #ifdef BTRACE_PAGING |
|
3458 BTraceContext12(BTrace::EPaging,BTrace::EPagingRejuvenate,pte&~KPageMask,aFaultAddress,aExc.iR15); |
|
3459 #endif |
|
3460 return KErrNone; |
|
3461 } |
|
3462 } |
|
3463 |
|
3464 // PTE not present, so page it in... |
|
3465 // check if fault in a CodeSeg... |
|
3466 DMemModelCodeSegMemory* codeSegMemory = NULL; |
|
3467 if (!aAsid) |
|
3468 NKern::ThreadEnterCS(); |
|
3469 else |
|
3470 { |
|
3471 // find CodeSeg... |
|
3472 DMemModelCodeSeg* codeSeg = (DMemModelCodeSeg*)DCodeSeg::CodeSegsByAddress.Find(aFaultAddress); |
|
3473 if (!codeSeg) |
|
3474 return KErrNotFound; |
|
3475 codeSegMemory = codeSeg->Memory(); |
|
3476 if (codeSegMemory==0 || !codeSegMemory->iIsDemandPaged || codeSegMemory->iOsAsids->NotFree(aAsid, 1)) |
|
3477 return KErrNotFound; |
|
3478 |
|
3479 // check if it's paged in but not yet mapped into this process... |
|
3480 TInt pageNumber = (aFaultAddress - codeSegMemory->iRamInfo.iCodeRunAddr) >> KPageShift; |
|
3481 TPhysAddr page = codeSegMemory->iPages[pageNumber]; |
|
3482 if (page != KPhysAddrInvalid) |
|
3483 { |
|
3484 // map it into this process... |
|
3485 SPageInfo* pageInfo = SPageInfo::FromPhysAddr(page); |
|
3486 __NK_ASSERT_DEBUG(pageInfo->State()!=SPageInfo::EStatePagedDead); |
|
3487 *pt = page | (codeSegMemory->iCreator ? KUserCodeLoadPte : KUserCodeRunPte); |
|
3488 CacheMaintenance::SinglePteUpdated((TLinAddr)pt); |
|
3489 Rejuvenate(pageInfo); |
|
3490 #ifdef BTRACE_PAGING |
|
3491 BTraceContext8(BTrace::EPaging,BTrace::EPagingMapCode,page,aFaultAddress); |
|
3492 #endif |
|
3493 return KErrNone; |
|
3494 } |
|
3495 |
|
3496 // open reference on CodeSegMemory |
|
3497 NKern::ThreadEnterCS(); |
|
3498 #ifdef _DEBUG |
|
3499 TInt r = |
|
3500 #endif |
|
3501 codeSegMemory->Open(); |
|
3502 __NK_ASSERT_DEBUG(r==KErrNone); |
|
3503 NKern::FlashSystem(); |
|
3504 } |
|
3505 |
|
3506 #ifdef BTRACE_PAGING |
|
3507 BTraceContext8(BTrace::EPaging,BTrace::EPagingPageInBegin,aFaultAddress,aExc.iR15); |
|
3508 #endif |
|
3509 TInt r = PageIn(aFaultAddress, aAsid, codeSegMemory); |
|
3510 |
|
3511 NKern::UnlockSystem(); |
|
3512 |
|
3513 if(codeSegMemory) |
|
3514 codeSegMemory->Close(); |
|
3515 |
|
3516 NKern::ThreadLeaveCS(); |
|
3517 |
|
3518 return r; |
|
3519 } |
|
3520 |
|
3521 |
|
3522 TInt MemModelDemandPaging::PageIn(TLinAddr aAddress, TInt aAsid, DMemModelCodeSegMemory* aCodeSegMemory) |
|
3523 { |
|
3524 // Get a request object - this may block until one is available |
|
3525 DPagingRequest* req = AcquireRequestObject(); |
|
3526 |
|
3527 // Get page table entry |
|
3528 TPte* pt = SafePtePtrFromLinAddr(aAddress, aAsid); |
|
3529 |
|
3530 // Check page is still required... |
|
3531 if(!pt || *pt!=KPteNotPresentEntry) |
|
3532 { |
|
3533 #ifdef BTRACE_PAGING |
|
3534 BTraceContext0(BTrace::EPaging,BTrace::EPagingPageInUnneeded); |
|
3535 #endif |
|
3536 ReleaseRequestObject(req); |
|
3537 return pt ? KErrNone : KErrNotFound; |
|
3538 } |
|
3539 |
|
3540 ++iEventInfo.iPageInReadCount; |
|
3541 |
|
3542 // Get a free page |
|
3543 SPageInfo* pageInfo = AllocateNewPage(); |
|
3544 __NK_ASSERT_DEBUG(pageInfo); |
|
3545 |
|
3546 // Get physical address of free page |
|
3547 TPhysAddr phys = pageInfo->PhysAddr(); |
|
3548 __NK_ASSERT_DEBUG(phys!=KPhysAddrInvalid); |
|
3549 |
|
3550 // Temporarily map free page |
|
3551 TInt colour = (aAddress>>KPageShift)&KPageColourMask; |
|
3552 __NK_ASSERT_DEBUG((req->iLoadAddr & (KPageColourMask << KPageShift)) == 0); |
|
3553 req->iLoadAddr |= colour << KPageShift; |
|
3554 TLinAddr loadAddr = req->iLoadAddr; |
|
3555 pt = req->iLoadPte+colour; |
|
3556 // *pt = phys | SP_PTE(KArmV6PermRWNO, KArmV6MemAttWTWAWTWA, 0, 1); |
|
3557 *pt = phys | SP_PTE(KArmV6PermRWNO, KNormalUncachedAttr, 0, 1); |
|
3558 CacheMaintenance::SinglePteUpdated((TLinAddr)pt); |
|
3559 |
|
3560 // Read page from backing store |
|
3561 aAddress &= ~KPageMask; |
|
3562 NKern::UnlockSystem(); |
|
3563 |
|
3564 TInt r; |
|
3565 if (!aCodeSegMemory) |
|
3566 r = ReadRomPage(req, aAddress); |
|
3567 else |
|
3568 { |
|
3569 r = ReadCodePage(req, aCodeSegMemory, aAddress); |
|
3570 if (r == KErrNone) |
|
3571 aCodeSegMemory->ApplyCodeFixups((TUint32*)loadAddr, aAddress); |
|
3572 } |
|
3573 if(r!=KErrNone) |
|
3574 Panic(EPageInFailed); |
|
3575 |
|
3576 // make caches consistant... |
|
3577 // Cache::IMB_Range(loadAddr, KPageSize); |
|
3578 *pt = phys | SP_PTE(KArmV6PermRWNO, KNormalCachedAttr, 0, 1); |
|
3579 CacheMaintenance::SinglePteUpdated((TLinAddr)pt); |
|
3580 InvalidateTLBForPage(loadAddr,KERNEL_MAPPING); |
|
3581 CacheMaintenance::CodeChanged(loadAddr, KPageSize, CacheMaintenance::ECPUUncached); |
|
3582 |
|
3583 NKern::LockSystem(); |
|
3584 |
|
3585 // Invalidate temporary mapping |
|
3586 MakeGlobalPTEInaccessible(pt, KPteNotPresentEntry, loadAddr); |
|
3587 |
|
3588 // Release request object now we're finished with it |
|
3589 req->iLoadAddr &= ~(KPageColourMask << KPageShift); |
|
3590 ReleaseRequestObject(req); |
|
3591 |
|
3592 // Get page table entry |
|
3593 pt = SafePtePtrFromLinAddr(aAddress, aAsid); |
|
3594 |
|
3595 // Check page still needs updating |
|
3596 TBool notNeeded = pt==0 || *pt!=KPteNotPresentEntry; |
|
3597 if(aCodeSegMemory) |
|
3598 notNeeded |= aCodeSegMemory->iOsAsids->NotFree(aAsid, 1); |
|
3599 if(notNeeded) |
|
3600 { |
|
3601 // We don't need the new page after all, so put it on the active list as a free page |
|
3602 __KTRACE_OPT(KPAGING,Kern::Printf("DP: PageIn (New page not used)")); |
|
3603 #ifdef BTRACE_PAGING |
|
3604 BTraceContext0(BTrace::EPaging,BTrace::EPagingPageInUnneeded); |
|
3605 #endif |
|
3606 AddAsFreePage(pageInfo); |
|
3607 return pt ? KErrNone : KErrNotFound; |
|
3608 } |
|
3609 |
|
3610 // Update page info |
|
3611 if (!aCodeSegMemory) |
|
3612 pageInfo->SetPagedROM((aAddress-iRomLinearBase)>>KPageShift); |
|
3613 else |
|
3614 { |
|
3615 // Check if page has been paged in and mapped into another process while we were waiting |
|
3616 TInt pageNumber = (aAddress - aCodeSegMemory->iRamInfo.iCodeRunAddr) >> KPageShift; |
|
3617 TPhysAddr page = aCodeSegMemory->iPages[pageNumber]; |
|
3618 if (page != KPhysAddrInvalid) |
|
3619 { |
|
3620 // don't need page we've just paged in... |
|
3621 AddAsFreePage(pageInfo); |
|
3622 |
|
3623 // map existing page into this process... |
|
3624 pageInfo = SPageInfo::FromPhysAddr(page); |
|
3625 __NK_ASSERT_DEBUG(pageInfo->State()!=SPageInfo::EStatePagedDead); |
|
3626 *pt = page | (aCodeSegMemory->iCreator ? KUserCodeLoadPte : KUserCodeRunPte); |
|
3627 CacheMaintenance::SinglePteUpdated((TLinAddr)pt); |
|
3628 #ifdef BTRACE_PAGING |
|
3629 BTraceContext0(BTrace::EPaging,BTrace::EPagingPageInUnneeded); |
|
3630 #endif |
|
3631 Rejuvenate(pageInfo); |
|
3632 return KErrNone; |
|
3633 } |
|
3634 aCodeSegMemory->iPages[pageNumber] = phys; |
|
3635 |
|
3636 pageInfo->SetPagedCode(aCodeSegMemory,(aAddress-Mmu().iUserCodeBase)>>KPageShift); |
|
3637 } |
|
3638 |
|
3639 // Map page into final location |
|
3640 *pt = phys | (aCodeSegMemory ? (aCodeSegMemory->iCreator ? KUserCodeLoadPte : KUserCodeRunPte) : KRomPtePerm); |
|
3641 CacheMaintenance::SinglePteUpdated((TLinAddr)pt); |
|
3642 #ifdef BTRACE_PAGING |
|
3643 TInt subCat = aCodeSegMemory ? BTrace::EPagingPageInCode : BTrace::EPagingPageInROM; |
|
3644 BTraceContext8(BTrace::EPaging,subCat,phys,aAddress); |
|
3645 #endif |
|
3646 |
|
3647 AddAsYoungest(pageInfo); |
|
3648 BalanceAges(); |
|
3649 |
|
3650 return KErrNone; |
|
3651 } |
|
3652 |
|
3653 |
|
3654 inline TUint8 ReadByte(TLinAddr aAddress) |
|
3655 { return *(volatile TUint8*)aAddress; } |
|
3656 |
|
3657 |
|
3658 TInt MemModelDemandPaging::EnsurePagePresent(TLinAddr aPage, DProcess* aProcess) |
|
3659 { |
|
3660 TInt r = KErrBadDescriptor; |
|
3661 XTRAPD(exc,XT_DEFAULT, |
|
3662 if (!aProcess) |
|
3663 { |
|
3664 XTRAP_PAGING_RETRY(CHECK_PAGING_SAFE; ReadByte(aPage);); |
|
3665 r = KErrNone; |
|
3666 } |
|
3667 else |
|
3668 { |
|
3669 DMemModelThread& t=*(DMemModelThread*)TheCurrentThread; |
|
3670 retry: |
|
3671 TInt pagingFault; |
|
3672 XTRAP_PAGING_START(pagingFault); |
|
3673 CHECK_PAGING_SAFE; |
|
3674 // make alias of page in this process |
|
3675 TLinAddr alias_src; |
|
3676 TInt alias_size; |
|
3677 TInt aliasResult = t.Alias(aPage, (DMemModelProcess*)aProcess, 1, EMapAttrReadUser, alias_src, alias_size); |
|
3678 if (aliasResult>=0) |
|
3679 { |
|
3680 // ensure page to be locked is mapped in, by reading from it... |
|
3681 ReadByte(alias_src); |
|
3682 r = KErrNone; |
|
3683 } |
|
3684 XTRAP_PAGING_END; |
|
3685 t.RemoveAlias(); |
|
3686 if(pagingFault>0) |
|
3687 goto retry; |
|
3688 } |
|
3689 ); // end of XTRAPD |
|
3690 if(exc) |
|
3691 return KErrBadDescriptor; |
|
3692 return r; |
|
3693 } |
|
3694 |
|
3695 |
|
3696 TPhysAddr MemModelDemandPaging::LinearToPhysical(TLinAddr aPage, DProcess* aProcess) |
|
3697 { |
|
3698 TInt asid = 0; |
|
3699 if (aProcess) |
|
3700 asid = ((DMemModelProcess*)aProcess)->iOsAsid; |
|
3701 return Mmu().LinearToPhysical(aPage, asid); |
|
3702 } |
|
3703 |
|
3704 |
|
3705 TInt MemModelDemandPaging::PageState(TLinAddr aAddr) |
|
3706 { |
|
3707 DMemModelProcess* process = (DMemModelProcess*)TheCurrentThread->iOwningProcess; |
|
3708 TInt asid = 0; |
|
3709 TPte* ptePtr = 0; |
|
3710 TPte pte = 0; |
|
3711 TInt r = 0; |
|
3712 SPageInfo* pageInfo = NULL; |
|
3713 |
|
3714 NKern::LockSystem(); |
|
3715 |
|
3716 DMemModelCodeSegMemory* codeSegMemory = 0; |
|
3717 if(TUint(aAddr-iRomPagedLinearBase)<iRomPagedSize) |
|
3718 r |= EPageStateInRom; |
|
3719 else if (TUint(aAddr-iCodeLinearBase)<iCodeSize) |
|
3720 { |
|
3721 DMemModelCodeSeg* codeSeg = (DMemModelCodeSeg*)DCodeSeg::CodeSegsByAddress.Find(aAddr); |
|
3722 if(codeSeg) |
|
3723 codeSegMemory = codeSeg->Memory(); |
|
3724 asid = process->iOsAsid; |
|
3725 if (codeSegMemory && codeSegMemory->iOsAsids->NotAllocated(asid, 1)) |
|
3726 { |
|
3727 r |= EPageStateInRamCode; |
|
3728 if (codeSegMemory->iIsDemandPaged) |
|
3729 r |= EPageStatePaged; |
|
3730 } |
|
3731 if(process->iCodeChunk) |
|
3732 r |= EPageStateCodeChunkPresent; |
|
3733 } |
|
3734 |
|
3735 ptePtr = SafePtePtrFromLinAddr(aAddr,asid); |
|
3736 if (!ptePtr) |
|
3737 goto done; |
|
3738 r |= EPageStatePageTablePresent; |
|
3739 pte = *ptePtr; |
|
3740 if (pte == KPteNotPresentEntry) |
|
3741 goto done; |
|
3742 r |= EPageStatePtePresent; |
|
3743 if (pte & KPtePresentMask) |
|
3744 r |= EPageStatePteValid; |
|
3745 |
|
3746 pageInfo = SPageInfo::FromPhysAddr(pte); |
|
3747 r |= pageInfo->Type(); |
|
3748 r |= pageInfo->State()<<8; |
|
3749 |
|
3750 if (codeSegMemory && codeSegMemory->iPages) |
|
3751 { |
|
3752 TPhysAddr phys = pte & ~KPageMask; |
|
3753 TInt pageNumber = (aAddr - codeSegMemory->iRamInfo.iCodeRunAddr) >> KPageShift; |
|
3754 if (codeSegMemory->iPages[pageNumber] == phys) |
|
3755 r |= EPageStatePhysAddrPresent; |
|
3756 } |
|
3757 |
|
3758 done: |
|
3759 NKern::UnlockSystem(); |
|
3760 return r; |
|
3761 } |
|
3762 |
|
3763 |
|
3764 TBool MemModelDemandPaging::NeedsMutexOrderCheck(TLinAddr aStartAddr, TUint aLength) |
|
3765 { |
|
3766 // Don't check mutex order for reads from global area, except for the paged part of rom |
|
3767 TBool rangeInGlobalArea = aStartAddr >= KRomLinearBase; |
|
3768 TBool rangeInPagedRom = iRomPagedLinearBase != 0 && aStartAddr < (iRomLinearBase + iRomSize) && (aStartAddr + aLength) > iRomPagedLinearBase; |
|
3769 return !rangeInGlobalArea || rangeInPagedRom; |
|
3770 } |
|
3771 |
|
3772 |
|
3773 EXPORT_C TBool DDemandPagingLock::Lock(DThread* aThread, TLinAddr aStart, TInt aSize) |
|
3774 { |
|
3775 MemModelDemandPaging* pager = (MemModelDemandPaging*)iThePager; |
|
3776 if(pager) |
|
3777 { |
|
3778 ArmMmu& m = pager->Mmu(); |
|
3779 TLinAddr end = aStart+aSize; |
|
3780 |
|
3781 if ((aStart < TUint(pager->iRomPagedLinearBase+pager->iRomPagedSize) && end > pager->iRomPagedLinearBase) || |
|
3782 (aStart < TUint(m.iUserCodeBase + m.iMaxUserCodeSize) && end > m.iUserCodeBase)) |
|
3783 return pager->ReserveLock(aThread,aStart,aSize,*this); |
|
3784 } |
|
3785 return EFalse; |
|
3786 } |
|
3787 |
|
3788 void ArmMmu::DisablePageModification(DMemModelChunk* aChunk, TInt aOffset) |
|
3789 // |
|
3790 // Mark the page at aOffset in aChunk read-only to prevent it being |
|
3791 // modified while defrag is in progress. Save the required information |
|
3792 // to allow the fault handler to deal with this. |
|
3793 // Call this with the system unlocked. |
|
3794 // |
|
3795 { |
|
3796 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::DisablePageModification() offset=%08x", aOffset)); |
|
3797 |
|
3798 TInt ptid = aChunk->iPageTables[aOffset>>KChunkShift]; |
|
3799 if(ptid == 0xffff) |
|
3800 Panic(EDefragDisablePageFailed); |
|
3801 |
|
3802 NKern::LockSystem(); |
|
3803 TPte* pPte = PageTable(ptid) + ((aOffset&KChunkMask)>>KPageShift); |
|
3804 TPte pte = *pPte; |
|
3805 if ((pte & KArmV6PteSmallPage) != KArmV6PteSmallPage |
|
3806 || SP_PTE_PERM_GET(pte) != (TUint)KArmV6PermRWRW) |
|
3807 Panic(EDefragDisablePageFailed); |
|
3808 |
|
3809 iDisabledAddr = (TLinAddr)(aChunk->iBase) + aOffset; |
|
3810 if (aChunk->iOwningProcess) |
|
3811 iDisabledAddrAsid = ((DMemModelProcess*)(aChunk->iOwningProcess))->iOsAsid; |
|
3812 else |
|
3813 iDisabledAddrAsid = iDisabledAddr<KRomLinearBase ? UNKNOWN_MAPPING : KERNEL_MAPPING; |
|
3814 iDisabledPte = pPte; |
|
3815 iDisabledOldVal = pte; |
|
3816 |
|
3817 *pPte = SP_PTE_PERM_SET(pte, KArmV6PermRORO); |
|
3818 CacheMaintenance::SinglePteUpdated((TLinAddr)pPte); |
|
3819 InvalidateTLBForPage(iDisabledAddr, iDisabledAddrAsid); |
|
3820 NKern::UnlockSystem(); |
|
3821 } |
|
3822 |
|
3823 TInt ArmMmu::RamDefragFault(TAny* aExceptionInfo) |
|
3824 { |
|
3825 TArmExcInfo& exc=*(TArmExcInfo*)aExceptionInfo; |
|
3826 |
|
3827 // Get faulting address |
|
3828 TLinAddr faultAddress; |
|
3829 if(exc.iExcCode==EArmExceptionDataAbort) |
|
3830 { |
|
3831 faultAddress = exc.iFaultAddress; |
|
3832 // Defrag can only cause writes to fault on multiple model |
|
3833 if(!(exc.iFaultStatus&(1<<11))) |
|
3834 return KErrUnknown; |
|
3835 } |
|
3836 else |
|
3837 return KErrUnknown; // Not data abort |
|
3838 |
|
3839 // Only handle page permission faults |
|
3840 if((exc.iFaultStatus & 0x40f) != 0xf) |
|
3841 return KErrUnknown; |
|
3842 |
|
3843 DMemModelThread* thread = (DMemModelThread*)TheCurrentThread; |
|
3844 TInt asid = ((DMemModelProcess*)TheScheduler.iAddressSpace)->iOsAsid; |
|
3845 |
|
3846 TBool aliased = EFalse; |
|
3847 if (thread->iAliasLinAddr && TUint(faultAddress - thread->iAliasLinAddr) < TUint(KPageSize)) |
|
3848 { |
|
3849 // in aliased memory |
|
3850 aliased = ETrue; |
|
3851 faultAddress = (faultAddress - thread->iAliasLinAddr) + thread->iAliasTarget; |
|
3852 asid = thread->iAliasOsAsid; |
|
3853 __NK_ASSERT_DEBUG(asid != 0); |
|
3854 } |
|
3855 |
|
3856 // Take system lock if not already held |
|
3857 NFastMutex* fm = NKern::HeldFastMutex(); |
|
3858 if(!fm) |
|
3859 NKern::LockSystem(); |
|
3860 else if(fm!=&TheScheduler.iLock) |
|
3861 { |
|
3862 __KTRACE_OPT2(KMMU,KPANIC,Kern::Printf("Defrag: Fault with FM Held! %x (%O pc=%x)",faultAddress,&Kern::CurrentThread(),exc.iR15)); |
|
3863 Panic(EDefragFaultWhilstFMHeld); // Not allowed to hold mutexes |
|
3864 } |
|
3865 |
|
3866 TInt r = KErrUnknown; |
|
3867 |
|
3868 // check if write access to the page has already been restored and retry if so |
|
3869 TPte* pt = SafePtePtrFromLinAddr(faultAddress, asid); |
|
3870 if(!pt) |
|
3871 { |
|
3872 r = KErrNotFound; |
|
3873 goto leave; |
|
3874 } |
|
3875 if (SP_PTE_PERM_GET(*pt) == (TUint)KArmV6PermRWRW) |
|
3876 { |
|
3877 r = KErrNone; |
|
3878 goto leave; |
|
3879 } |
|
3880 |
|
3881 // check if the fault occurred in the page we are moving |
|
3882 if ( iDisabledPte |
|
3883 && TUint(faultAddress - iDisabledAddr) < TUint(KPageSize) |
|
3884 && (iDisabledAddrAsid < 0 || asid == iDisabledAddrAsid) ) |
|
3885 { |
|
3886 // restore access to the page |
|
3887 *iDisabledPte = iDisabledOldVal; |
|
3888 CacheMaintenance::SinglePteUpdated((TLinAddr)iDisabledPte); |
|
3889 InvalidateTLBForPage(iDisabledAddr, iDisabledAddrAsid); |
|
3890 if (aliased) |
|
3891 InvalidateTLBForPage(exc.iFaultAddress, ((DMemModelProcess*)TheScheduler.iAddressSpace)->iOsAsid); |
|
3892 iDisabledAddr = 0; |
|
3893 iDisabledAddrAsid = -1; |
|
3894 iDisabledPte = NULL; |
|
3895 iDisabledOldVal = 0; |
|
3896 r = KErrNone; |
|
3897 } |
|
3898 |
|
3899 leave: |
|
3900 // Restore system lock state |
|
3901 if (!fm) |
|
3902 NKern::UnlockSystem(); |
|
3903 |
|
3904 return r; |
|
3905 } |