author | Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com> |
Fri, 11 Jun 2010 15:02:23 +0300 | |
changeset 152 | 657f875b013e |
parent 109 | b3a1d9898418 |
permissions | -rw-r--r-- |
0 | 1 |
// Copyright (c) 1997-2009 Nokia Corporation and/or its subsidiary(-ies). |
2 |
// All rights reserved. |
|
3 |
// This component and the accompanying materials are made available |
|
4 |
// under the terms of the License "Eclipse Public License v1.0" |
|
5 |
// which accompanies this distribution, and is available |
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 |
// |
|
8 |
// Initial Contributors: |
|
9 |
// Nokia Corporation - initial contribution. |
|
10 |
// |
|
11 |
// Contributors: |
|
12 |
// |
|
13 |
// Description: |
|
14 |
// |
|
15 |
||
16 |
#include "arm_mem.h" |
|
17 |
#include "mm.h" |
|
18 |
#include "mmu.h" |
|
19 |
#include "mpager.h" |
|
20 |
||
21 |
#include "cache_maintenance.inl" |
|
22 |
#include "execs.h" |
|
23 |
||
24 |
||
25 |
#ifdef BROADCAST_TLB_MAINTENANCE |
|
26 |
class TTLBIPI : public TGenericIPI |
|
27 |
{ |
|
28 |
public: |
|
29 |
TTLBIPI(); |
|
30 |
static void InvalidateIsr(TGenericIPI*); |
|
31 |
static void WaitAndInvalidateIsr(TGenericIPI*); |
|
32 |
void AddArg(TLinAddr aArg); |
|
33 |
public: |
|
34 |
volatile TInt iFlag; |
|
35 |
TLinAddr iArg; |
|
36 |
}; |
|
37 |
||
38 |
TTLBIPI::TTLBIPI() |
|
39 |
: iFlag(0), iArg(0) |
|
40 |
{ |
|
41 |
} |
|
42 |
||
43 |
void TTLBIPI::InvalidateIsr(TGenericIPI* aPtr) |
|
44 |
{ |
|
45 |
TRACE2(("TLBInv")); |
|
46 |
TTLBIPI& a = *(TTLBIPI*)aPtr; |
|
47 |
TLinAddr arg = a.iArg; |
|
48 |
if (arg==0) |
|
49 |
LocalInvalidateTLB(); |
|
50 |
else if (arg<256) |
|
51 |
LocalInvalidateTLBForAsid(arg); |
|
52 |
else |
|
53 |
LocalInvalidateTLBForPage(arg); |
|
54 |
} |
|
55 |
||
56 |
void TTLBIPI::WaitAndInvalidateIsr(TGenericIPI* aPtr) |
|
57 |
{ |
|
58 |
TRACE2(("TLBWtInv")); |
|
59 |
TTLBIPI& a = *(TTLBIPI*)aPtr; |
|
60 |
while (!a.iFlag) |
|
61 |
{ __chill(); } |
|
62 |
InvalidateIsr(aPtr); |
|
63 |
} |
|
64 |
||
65 |
void TTLBIPI::AddArg(TLinAddr aArg) |
|
66 |
{ |
|
67 |
iArg = aArg; |
|
68 |
NKern::Lock(); |
|
69 |
InvalidateIsr(this); |
|
70 |
QueueAllOther(&InvalidateIsr); |
|
71 |
NKern::Unlock(); |
|
72 |
WaitCompletion(); |
|
73 |
} |
|
74 |
||
75 |
void BroadcastInvalidateTLB(TLinAddr aLinAddrAndAsid) |
|
76 |
{ |
|
77 |
TTLBIPI ipi; |
|
78 |
ipi.AddArg(aLinAddrAndAsid); |
|
79 |
} |
|
80 |
#endif // BROADCAST_TLB_MAINTENANCE |
|
81 |
||
82 |
// |
|
83 |
// Functions for class Mmu |
|
84 |
// |
|
85 |
||
86 |
/** |
|
87 |
Return the physical address of the memory mapped by a Page Table Entry (PTE). |
|
88 |
||
89 |
@param aPte The value contained in the PTE. |
|
90 |
@param aPteIndex The index of the PTE within its page table. |
|
91 |
*/ |
|
92 |
TPhysAddr Mmu::PtePhysAddr(TPte aPte, TUint aPteIndex) |
|
93 |
{ |
|
94 |
if(aPte&KArmV6PteSmallPage) |
|
95 |
return aPte & KPteSmallPageAddrMask; |
|
96 |
if(aPte&KArmV6PteLargePage) |
|
97 |
return (aPte & KPteLargePageAddrMask) + (TPhysAddr(aPteIndex << KPageShift) & KLargePageMask); |
|
98 |
return KPhysAddrInvalid; |
|
99 |
} |
|
100 |
||
101 |
||
102 |
/** |
|
103 |
Return the virtual address of the page table referenced by the given |
|
104 |
Page Directory Entry (PDE) \a aPde. If the PDE doesn't refer to a |
|
105 |
page table then the null-pointer is returned. |
|
106 |
||
107 |
If the page table was not one allocated by the kernel then the |
|
108 |
results are unpredictable and may cause a system fault. |
|
109 |
||
110 |
@pre #MmuLock held. |
|
111 |
*/ |
|
112 |
TPte* Mmu::PageTableFromPde(TPde aPde) |
|
113 |
{ |
|
114 |
if((aPde&KPdePresentMask)==KArmV6PdePageTable) |
|
115 |
{ |
|
116 |
SPageInfo* pi = SPageInfo::FromPhysAddr(aPde); |
|
117 |
return (TPte*)(KPageTableBase+(pi->Index()<<KPageShift)+(aPde&(KPageMask&~KPageTableMask))); |
|
118 |
} |
|
119 |
return 0; |
|
120 |
} |
|
121 |
||
122 |
||
123 |
/** |
|
124 |
Perform the action of #PageTableFromPde but without the possibility of |
|
125 |
a system fault caused the page table not being one allocated by the kernel. |
|
126 |
||
127 |
@pre #MmuLock held. |
|
128 |
*/ |
|
129 |
TPte* Mmu::SafePageTableFromPde(TPde aPde) |
|
130 |
{ |
|
131 |
if((aPde&KPdeTypeMask)==KArmV6PdePageTable) |
|
132 |
{ |
|
133 |
SPageInfo* pi = SPageInfo::SafeFromPhysAddr(aPde&~KPageMask); |
|
134 |
if(pi) |
|
135 |
return (TPte*)(KPageTableBase+(pi->Index()<<KPageShift)+(aPde&(KPageMask&~KPageTableMask))); |
|
136 |
} |
|
137 |
return 0; |
|
138 |
} |
|
139 |
||
140 |
||
141 |
/** |
|
142 |
Return the base phsical address of the section table referenced by the given |
|
143 |
Page Directory Entry (PDE) \a aPde. If the PDE doesn't refer to a |
|
144 |
section then KPhysAddrInvalid is returned. |
|
145 |
||
146 |
@pre #MmuLock held. |
|
147 |
*/ |
|
148 |
TPhysAddr Mmu::SectionBaseFromPde(TPde aPde) |
|
149 |
{ |
|
150 |
if(PdeMapsSection(aPde)) |
|
151 |
return aPde&KPdeSectionAddrMask; |
|
152 |
return KPhysAddrInvalid; |
|
153 |
} |
|
154 |
||
155 |
||
156 |
/** |
|
157 |
Return a pointer to the Page Table Entry (PTE) which maps the |
|
158 |
virtual address \a aAddress in the address space \a aOsAsid. |
|
159 |
||
160 |
If no page table exists or it was not one allocated by the kernel |
|
161 |
then the results are unpredictable and may cause a system fault. |
|
162 |
||
163 |
@pre #MmuLock held. |
|
164 |
*/ |
|
165 |
TPte* Mmu::PtePtrFromLinAddr(TLinAddr aAddress, TInt aOsAsid) |
|
166 |
{ |
|
167 |
TPde pde = PageDirectory(aOsAsid)[aAddress>>KChunkShift]; |
|
168 |
SPageInfo* pi = SPageInfo::FromPhysAddr(pde); |
|
169 |
TPte* pt = (TPte*)(KPageTableBase+(pi->Index()<<KPageShift)+(pde&(KPageMask&~KPageTableMask))); |
|
170 |
pt += (aAddress>>KPageShift)&(KChunkMask>>KPageShift); |
|
171 |
return pt; |
|
172 |
} |
|
173 |
||
174 |
||
175 |
/** |
|
176 |
Perform the action of #PtePtrFromLinAddr but without the possibility |
|
177 |
of a system fault. If the page table is not present or not one |
|
178 |
allocated by the kernel then the null-pointer is returned. |
|
179 |
||
180 |
@pre #MmuLock held. |
|
181 |
*/ |
|
182 |
TPte* Mmu::SafePtePtrFromLinAddr(TLinAddr aAddress, TInt aOsAsid) |
|
183 |
{ |
|
184 |
TPde pde = PageDirectory(aOsAsid)[aAddress>>KChunkShift]; |
|
185 |
TPte* pt = SafePageTableFromPde(pde); |
|
186 |
if(pt) |
|
187 |
pt += (aAddress>>KPageShift)&(KChunkMask>>KPageShift); |
|
188 |
return pt; |
|
189 |
} |
|
190 |
||
191 |
||
192 |
/** |
|
193 |
Return the physical address for the page table whose virtual |
|
194 |
address is \a aPt. |
|
195 |
||
196 |
If the page table was not one allocated by the kernel then the |
|
197 |
results are unpredictable and may cause a system fault. |
|
198 |
||
199 |
@pre #MmuLock held. |
|
200 |
*/ |
|
201 |
TPhysAddr Mmu::PageTablePhysAddr(TPte* aPt) |
|
202 |
{ |
|
203 |
__NK_ASSERT_DEBUG(MmuLock::IsHeld() || PageTablesLockIsHeld()); |
|
204 |
||
205 |
TInt pdeIndex = ((TLinAddr)aPt)>>KChunkShift; |
|
206 |
TPde pde = PageDirectory(KKernelOsAsid)[pdeIndex]; |
|
207 |
__NK_ASSERT_DEBUG((pde&KPdePresentMask)==KArmV6PdePageTable); |
|
208 |
||
209 |
SPageInfo* pi = SPageInfo::FromPhysAddr(pde); |
|
210 |
TPte* pPte = (TPte*)(KPageTableBase+(pi->Index(true)<<KPageShift)+(pde&(KPageMask&~KPageTableMask))); |
|
211 |
TPte pte = pPte[(((TLinAddr)aPt)&KChunkMask)>>KPageShift]; |
|
212 |
__NK_ASSERT_DEBUG(pte & KArmV6PteSmallPage); |
|
213 |
||
214 |
return (pte&KPteSmallPageAddrMask)|(((TLinAddr)aPt)&(KPageMask&~KPageTableMask)); |
|
215 |
} |
|
216 |
||
217 |
||
218 |
/** |
|
219 |
Perform a page table walk to return the physical address of |
|
220 |
the memory mapped at virtual address \a aLinAddr in the |
|
221 |
address space \a aOsAsid. |
|
222 |
||
223 |
If the page table used was not one allocated by the kernel |
|
224 |
then the results are unpredictable and may cause a system fault. |
|
225 |
||
226 |
Use of this function should be avoided, use instead Mmu::LinearToPhysical |
|
227 |
which contains debug assertions for its preconditions. |
|
228 |
||
229 |
@pre #MmuLock held. |
|
230 |
*/ |
|
231 |
TPhysAddr Mmu::UncheckedLinearToPhysical(TLinAddr aLinAddr, TInt aOsAsid) |
|
232 |
{ |
|
233 |
TRACE2(("Mmu::UncheckedLinearToPhysical(%08x,%d)",aLinAddr,aOsAsid)); |
|
234 |
TInt pdeIndex = aLinAddr>>KChunkShift; |
|
235 |
TPde pde = PageDirectory(aOsAsid)[pdeIndex]; |
|
236 |
if ((pde&KPdePresentMask)==KArmV6PdePageTable) |
|
237 |
{ |
|
238 |
SPageInfo* pi = SPageInfo::FromPhysAddr(pde); |
|
239 |
TPte* pPte = (TPte*)(KPageTableBase+(pi->Index(true)<<KPageShift)+(pde&(KPageMask&~KPageTableMask))); |
|
240 |
TPte pte = pPte[(aLinAddr&KChunkMask)>>KPageShift]; |
|
241 |
if (pte & KArmV6PteSmallPage) |
|
242 |
{ |
|
243 |
TPhysAddr pa=(pte&KPteSmallPageAddrMask)|(aLinAddr&~KPteSmallPageAddrMask); |
|
244 |
__KTRACE_OPT(KMMU,Kern::Printf("Mapped with small page - returning %08x",pa)); |
|
245 |
return pa; |
|
246 |
} |
|
247 |
else if (pte & KArmV6PteLargePage) |
|
248 |
{ |
|
249 |
TPhysAddr pa=(pte&KPteLargePageAddrMask)|(aLinAddr&~KPteLargePageAddrMask); |
|
250 |
__KTRACE_OPT(KMMU,Kern::Printf("Mapped with large page - returning %08x",pa)); |
|
251 |
return pa; |
|
252 |
} |
|
253 |
} |
|
254 |
else if ((pde&KPdePresentMask)==KArmV6PdeSection) |
|
255 |
{ |
|
256 |
TPhysAddr pa=(pde&KPdeSectionAddrMask)|(aLinAddr&~KPdeSectionAddrMask); |
|
257 |
__KTRACE_OPT(KMMU,Kern::Printf("Mapped with section - returning %08x",pa)); |
|
258 |
return pa; |
|
259 |
} |
|
260 |
return KPhysAddrInvalid; |
|
261 |
} |
|
262 |
||
263 |
||
264 |
extern TUint32 TTCR(); |
|
265 |
extern TUint32 CPUID(TInt /*aRegNum*/); |
|
266 |
||
267 |
||
268 |
void Mmu::Init1() |
|
269 |
{ |
|
270 |
TRACEB(("Mmu::Init1")); |
|
271 |
||
272 |
// check page local/global page directory split is correct... |
|
273 |
__NK_ASSERT_ALWAYS(TTCR()==1); |
|
274 |
||
275 |
// check cache type is supported and consistent with compile time macros... |
|
276 |
TInt iColourCount = 0; |
|
277 |
TInt dColourCount = 0; |
|
278 |
TUint32 ctr = InternalCache::TypeRegister(); |
|
279 |
TRACEB(("CacheTypeRegister = %08x",ctr)); |
|
280 |
#ifdef __CPU_ARMV6 |
|
281 |
__NK_ASSERT_ALWAYS((ctr>>29)==0); // check ARMv6 format |
|
282 |
if(ctr&0x800) |
|
283 |
iColourCount = 4; |
|
284 |
if(ctr&0x800000) |
|
285 |
dColourCount = 4; |
|
286 |
#else |
|
287 |
__NK_ASSERT_ALWAYS((ctr>>29)==4); // check ARMv7 format |
|
288 |
TUint l1ip = (ctr>>14)&3; // L1 instruction cache indexing and tagging policy |
|
289 |
__NK_ASSERT_ALWAYS(l1ip>=2); // check I cache is physically tagged |
|
290 |
||
291 |
TUint32 clidr = InternalCache::LevelIDRegister(); |
|
292 |
TRACEB(("CacheLevelIDRegister = %08x",clidr)); |
|
293 |
TUint l1type = clidr&7; |
|
294 |
if(l1type) |
|
295 |
{ |
|
296 |
if(l1type==2 || l1type==3 || l1type==4) |
|
297 |
{ |
|
298 |
// we have an L1 data cache... |
|
299 |
TUint32 csir = InternalCache::SizeIdRegister(0,0); |
|
300 |
TUint sets = ((csir>>13)&0x7fff)+1; |
|
109
b3a1d9898418
Revision: 201019
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
301 |
TUint ways = ((csir>>3)&0x3ff); |
b3a1d9898418
Revision: 201019
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
302 |
ways+=1; |
0 | 303 |
TUint lineSizeShift = (csir&7)+4; |
304 |
// assume L1 data cache is VIPT and alias checks broken and so we need data cache colouring... |
|
305 |
dColourCount = (sets<<lineSizeShift)>>KPageShift; |
|
306 |
if(l1type==4) // unified cache, so set instruction cache colour as well... |
|
307 |
iColourCount = (sets<<lineSizeShift)>>KPageShift; |
|
308 |
TRACEB(("L1DCache = 0x%x,0x%x,%d colourCount=%d",sets,ways,lineSizeShift,(sets<<lineSizeShift)>>KPageShift)); |
|
309 |
} |
|
310 |
||
311 |
if(l1type==1 || l1type==3) |
|
312 |
{ |
|
313 |
// we have a separate L1 instruction cache... |
|
314 |
TUint32 csir = InternalCache::SizeIdRegister(1,0); |
|
315 |
TUint sets = ((csir>>13)&0x7fff)+1; |
|
109
b3a1d9898418
Revision: 201019
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
316 |
TUint ways = ((csir>>3)&0x3ff); |
b3a1d9898418
Revision: 201019
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
317 |
ways+=1; |
0 | 318 |
TUint lineSizeShift = (csir&7)+4; |
319 |
iColourCount = (sets<<lineSizeShift)>>KPageShift; |
|
320 |
TRACEB(("L1ICache = 0x%x,0x%x,%d colourCount=%d",sets,ways,lineSizeShift,(sets<<lineSizeShift)>>KPageShift)); |
|
321 |
} |
|
322 |
} |
|
323 |
if(l1ip==3) |
|
324 |
{ |
|
325 |
// PIPT cache, so no colouring restrictions... |
|
326 |
TRACEB(("L1ICache is PIPT")); |
|
327 |
iColourCount = 0; |
|
328 |
} |
|
329 |
else |
|
330 |
{ |
|
331 |
// VIPT cache... |
|
332 |
TRACEB(("L1ICache is VIPT")); |
|
333 |
} |
|
334 |
#endif |
|
335 |
TRACEB(("page colouring counts I=%d, D=%d",iColourCount,dColourCount)); |
|
336 |
__NK_ASSERT_ALWAYS(iColourCount<=KPageColourCount); |
|
337 |
__NK_ASSERT_ALWAYS(dColourCount<=KPageColourCount); |
|
338 |
#ifndef __CPU_I_CACHE_HAS_COLOUR |
|
339 |
__NK_ASSERT_ALWAYS(iColourCount==0); |
|
340 |
#endif |
|
341 |
#ifndef __CPU_D_CACHE_HAS_COLOUR |
|
342 |
__NK_ASSERT_ALWAYS(dColourCount==0); |
|
343 |
#endif |
|
344 |
#ifndef __CPU_CACHE_HAS_COLOUR |
|
345 |
__NK_ASSERT_ALWAYS(iColourCount==0); |
|
346 |
__NK_ASSERT_ALWAYS(dColourCount==0); |
|
347 |
#endif |
|
348 |
||
349 |
// check MMU attributes match our assumptions... |
|
350 |
if(((CPUID(-1)>>16)&0xf)==0xf) // if have new CPUID format.... |
|
351 |
{ |
|
352 |
TUint mmfr1 = CPUID(5); |
|
353 |
TRACEB(("mmfr1 = %08x",mmfr1)); |
|
354 |
#ifdef __CPU_NEEDS_BTAC_FLUSH_AFTER_ASID_CHANGE |
|
355 |
__NK_ASSERT_ALWAYS(((mmfr1>>28)&0xf)==1); // Branch Predictor needs invalidating after ASID change |
|
356 |
#else |
|
357 |
__NK_ASSERT_ALWAYS(((mmfr1>>28)&0xf)>=2); // Branch Predictor doesn't needs invalidating after ASID change |
|
358 |
#endif |
|
359 |
||
360 |
TUint mmfr2 = CPUID(6); |
|
361 |
TRACEB(("mmfr2 = %08x",mmfr2)); |
|
362 |
__NK_ASSERT_ALWAYS(((mmfr2>>20)&0xf)>=2); // check Mem Barrier instructions are supported in CP15 |
|
363 |
||
364 |
TUint mmfr3 = CPUID(7); |
|
365 |
TRACEB(("mmfr3 = %08x",mmfr3)); |
|
366 |
(void)mmfr3; |
|
367 |
||
368 |
#if defined(__SMP__) && !defined(__CPU_ARM11MP__) |
|
369 |
__NK_ASSERT_ALWAYS(((mmfr3>>12)&0xf)>=2); // check Maintenance Broadcast is for all cache and TLB operations |
|
370 |
#endif |
|
371 |
#ifdef __CPU_SUPPORTS_PAGE_TABLE_WALK_TO_L1_CACHE |
|
372 |
__NK_ASSERT_ALWAYS(((mmfr3>>20)&0xf)>=1); // check Coherent Walk for page tables |
|
373 |
#endif |
|
374 |
} |
|
375 |
||
376 |
Arm::DefaultDomainAccess = KDefaultDomainAccess; |
|
377 |
||
378 |
#ifdef __SMP__ |
|
379 |
TInt i; |
|
380 |
for (i=0; i<KMaxCpus; ++i) |
|
381 |
{ |
|
382 |
TSubScheduler& ss = TheSubSchedulers[i]; |
|
383 |
TLinAddr a = KIPCAlias + (i<<KChunkShift); |
|
384 |
ss.i_AliasLinAddr = (TAny*)a; |
|
385 |
ss.i_AliasPdePtr = (TAny*)(KPageDirectoryBase + (a>>KChunkShift)*sizeof(TPde)); |
|
386 |
} |
|
387 |
#endif |
|
388 |
||
389 |
Init1Common(); |
|
390 |
} |
|
391 |
||
392 |
void Mmu::Init2() |
|
393 |
{ |
|
394 |
TRACEB(("Mmu::Init2")); |
|
395 |
||
396 |
Init2Common(); |
|
397 |
} |
|
398 |
||
399 |
DMemoryObject* ExceptionStacks; |
|
400 |
||
401 |
void Mmu::Init2Final() |
|
402 |
{ |
|
403 |
TRACEB(("Mmu::Init2Final")); |
|
404 |
||
405 |
Init2FinalCommon(); |
|
406 |
||
407 |
// initialise memory object for exception stacks... |
|
408 |
TMappingCreateFlags mapFlags = (TMappingCreateFlags)(EMappingCreateFixedVirtual|EMappingCreateReserveAllResources); |
|
409 |
TMemoryAttributes memAttr = EMemoryAttributeStandard; |
|
410 |
TUint size = 4*2*KPageSize; // 4 exception stacks each of one guard page and one mapped page |
|
411 |
size |= 1; // lower bit of size is set if region to be claimed contains gaps |
|
412 |
TInt r = MM::InitFixedKernelMemory(ExceptionStacks, KExcptStacksLinearBase, KExcptStacksLinearEnd, size, EMemoryObjectUnpaged, EMemoryCreateNoWipe, memAttr, mapFlags); |
|
413 |
__NK_ASSERT_ALWAYS(r==KErrNone); |
|
414 |
} |
|
415 |
||
416 |
||
417 |
/** |
|
418 |
Return the page directory entry (PDE) value to use for when mapping page tables intended |
|
419 |
to map memory with the given attributes. |
|
420 |
The returned value has the physical address component being zero, so a page table's physical |
|
421 |
address can be simply ORed in. |
|
422 |
*/ |
|
423 |
TPde Mmu::BlankPde(TMemoryAttributes aAttributes) |
|
424 |
{ |
|
425 |
TPde pde = KArmV6PdePageTable; |
|
426 |
if(aAttributes&EMemoryAttributeUseECC) |
|
427 |
pde |= 1<<9; |
|
428 |
||
429 |
TRACE2(("Mmu::BlankPde(%x) returns 0x%x",aAttributes,pde)); |
|
430 |
return pde; |
|
431 |
} |
|
432 |
||
433 |
||
434 |
/** |
|
435 |
Return the page directory entry (PDE) value to use for when creating a section mapping for memory |
|
436 |
with the given attributes and #TPteType. |
|
437 |
The returned value has the physical address component being zero, so the section's physical address |
|
438 |
can be simply ORed in. |
|
439 |
*/ |
|
440 |
TPde Mmu::BlankSectionPde(TMemoryAttributes aAttributes, TUint aPteType) |
|
441 |
{ |
|
442 |
// reuse existing functions rather than duplicating the logic |
|
443 |
TPde pde = BlankPde(aAttributes); |
|
36
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
444 |
TPte pte = BlankPte(aAttributes, aPteType); |
0 | 445 |
return PageToSectionEntry(pte, pde); |
446 |
} |
|
447 |
||
448 |
||
449 |
/** |
|
450 |
Return the page table entry (PTE) to use when mapping memory pages |
|
451 |
with the given attributes and #TPteType. |
|
452 |
This value has the physical address component being zero, so a page's physical |
|
453 |
address can be simply ORed in. |
|
454 |
*/ |
|
455 |
||
456 |
TPte Mmu::BlankPte(TMemoryAttributes aAttributes, TUint aPteType) |
|
457 |
{ |
|
458 |
TUint attr = CanonicalMemoryAttributes(aAttributes); |
|
459 |
||
460 |
// common PTE setup... |
|
461 |
TPte pte = KArmV6PteSmallPage|KArmV6PteAP0; |
|
462 |
if(aPteType&EPteTypeUserAccess) |
|
463 |
pte |= KArmV6PteAP1; // AP1 = user access |
|
464 |
if((aPteType&EPteTypeWritable)==false) |
|
465 |
pte |= KArmV6PteAP2; // AP2 = !writable |
|
466 |
if(attr&EMemoryAttributeShareable) |
|
467 |
pte |= KArmV6PteS; |
|
468 |
if((aPteType&EPteTypeGlobal)==false) |
|
469 |
pte |= KArmV6PteNG; |
|
470 |
if((aPteType&EPteTypeExecutable)==false) |
|
471 |
pte |= KArmV6PteSmallXN; |
|
472 |
||
473 |
#if defined(__CPU_MEMORY_TYPE_REMAPPING) |
|
474 |
||
475 |
// other PTE bits... |
|
476 |
if(pte&KArmV6PteSmallXN) |
|
477 |
pte |= KArmV6PteSmallTEX1; // TEX1 is a copy of the XN |
|
478 |
||
479 |
// process memory type... |
|
480 |
TUint type = attr&EMemoryAttributeTypeMask; |
|
481 |
pte |= ((type&3)<<2) | ((type&4)<<4); |
|
482 |
||
483 |
#else |
|
484 |
||
485 |
// other PTE bits... |
|
486 |
if((pte&(KArmV6PteAP2|KArmV6PteAP1))==(KArmV6PteAP2|KArmV6PteAP1)) |
|
487 |
pte &= ~KArmV6PteAP0; // clear AP0 if user r/o |
|
488 |
||
489 |
// process memory type... |
|
490 |
TUint texcb; |
|
491 |
switch((TMemoryType)(attr&EMemoryAttributeTypeMask)) |
|
492 |
{ |
|
493 |
case EMemAttStronglyOrdered: |
|
494 |
texcb = KArmV6MemAttSO; |
|
495 |
break; |
|
496 |
case EMemAttDevice: |
|
497 |
if(attr&EMemoryAttributeShareable) |
|
498 |
texcb = KArmV6MemAttSD; |
|
499 |
else |
|
500 |
texcb = KArmV6MemAttSD; // should be KArmV6MemAttNSD? (but this made H4 go bang) |
|
501 |
break; |
|
502 |
case EMemAttNormalUncached: |
|
503 |
texcb = KArmV6MemAttNCNC; |
|
504 |
break; |
|
505 |
case EMemAttNormalCached: |
|
506 |
texcb = KArmV6MemAttWBWAWBWA; |
|
507 |
break; |
|
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
508 |
case EMemAttKernelInternal4: |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
509 |
case EMemAttPlatformSpecific5: |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
510 |
case EMemAttPlatformSpecific6: |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
511 |
case EMemAttPlatformSpecific7: |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
512 |
{ |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
513 |
TUint32 cachingAttr = InternalCache::TypeToCachingAttributes((TMemoryType)(attr&EMemoryAttributeTypeMask)); |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
514 |
switch (cachingAttr) |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
515 |
{ |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
516 |
case EMapAttrFullyBlocking: |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
517 |
texcb = KArmV6MemAttSO; |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
518 |
break; |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
519 |
case EMapAttrBufferedNC: |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
520 |
texcb = KArmV6MemAttSD; |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
521 |
break; |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
522 |
default: |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
523 |
{ |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
524 |
//attr describes normal mapping |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
525 |
//set texcb to b1BBAA where AA is internal and BB is external caching |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
526 |
// TYPE AA/BB |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
527 |
// uncached 0 |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
528 |
// WBWA 1 |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
529 |
// WTRA 2 |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
530 |
// WBRA 3 |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
531 |
texcb = 0x10; |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
532 |
switch (cachingAttr&EMapAttrL1CacheMask) |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
533 |
{ |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
534 |
case EMapAttrL1Uncached: break; |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
535 |
#if defined(__CPU_ARM1136_ERRATUM_399234_FIXED) |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
536 |
case EMapAttrCachedWTRA: texcb |= 2;break; // It is OK to use WT memory |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
537 |
#else |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
538 |
case EMapAttrCachedWTRA:;break; // Erratum not fixed. Use uncached memory instead |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
539 |
#endif |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
540 |
case EMapAttrCachedWBRA: texcb |= 3; break; |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
541 |
default: texcb |= 1;//fully cached (WBWA) |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
542 |
} |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
543 |
switch (cachingAttr&EMapAttrL2CacheMask) |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
544 |
{ |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
545 |
case EMapAttrL2Uncached: break; |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
546 |
case EMapAttrL2CachedWTRA: texcb |= 8;break; |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
547 |
case EMapAttrL2CachedWBRA: texcb |= 0xc; break; |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
548 |
default: texcb |= 4;//fully cached (WBWA) |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
549 |
} |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
550 |
} |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
551 |
} |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
552 |
} |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
553 |
break; |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
554 |
default: |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
555 |
__NK_ASSERT_ALWAYS(0); // undefined memory type |
0 | 556 |
texcb = KArmV6MemAttSO; |
557 |
break; |
|
558 |
} |
|
559 |
pte |= ((texcb&0x1c)<<4) | ((texcb&0x03)<<2); |
|
560 |
||
561 |
#endif |
|
562 |
||
563 |
TRACE2(("Mmu::BlankPte(%x,%x) returns 0x%x",aAttributes,aPteType,pte)); |
|
564 |
return pte; |
|
565 |
} |
|
566 |
||
567 |
||
568 |
/** |
|
569 |
Calculate PDE and PTE which represent a page table mapping for an existing |
|
570 |
section mapping. |
|
571 |
||
572 |
@param[in] aPde The PDE for the existing section mapping. |
|
573 |
@param[out] aPde A PDE for a page table mapping, with physical address == 0. |
|
574 |
||
575 |
@return The PTE value for the first entry in the page table. |
|
576 |
*/ |
|
577 |
TPte Mmu::SectionToPageEntry(TPde& aPde) |
|
578 |
{ |
|
579 |
TPde pde = aPde; |
|
580 |
||
581 |
// calculate new PTE... |
|
582 |
TPte pte = pde&0xc; // copy CB bits |
|
583 |
if(pde&KArmV6PdeSectionXN) |
|
584 |
pte |= KArmV6PteSmallXN; // copy XN bit |
|
585 |
pte |= (pde&(0xff<<10))>>6; // copy NG, S, APX, TEX, AP bits |
|
586 |
pte |= KArmV6PteSmallPage; |
|
587 |
||
588 |
// calculate new PDE... |
|
589 |
pde &= 0x3e0; // keep IMP and DOMAIN |
|
590 |
pde |= KArmV6PdePageTable; |
|
591 |
||
592 |
aPde = pde; |
|
593 |
return pte; |
|
594 |
} |
|
595 |
||
596 |
||
597 |
/** |
|
598 |
Calculate a PDE entry which represents a section mapping for an existing |
|
599 |
page table mapping. |
|
600 |
||
601 |
@pre The existing page table contains mappings for a chunk sized and |
|
602 |
aligned contiguous region. |
|
603 |
||
604 |
@param aPte A PTE from the existing page table. |
|
605 |
@param aPde The existing PDE for the page table mappings. |
|
606 |
(Physical address portion is ignored.) |
|
607 |
||
608 |
@return A PDE entry value for a section mapping. |
|
609 |
*/ |
|
610 |
TPde Mmu::PageToSectionEntry(TPte aPte, TPde aPde) |
|
611 |
{ |
|
612 |
TPde pde = aPde&0x3e0; // keep IMP and DOMAIN |
|
613 |
pde |= aPte&(KPdeSectionAddrMask|0xc); // copy address and CB bits |
|
614 |
if(aPte&KArmV6PteSmallXN) |
|
615 |
pde |= KArmV6PdeSectionXN; // copy XN bit |
|
616 |
pde |= (aPte&(0xff<<4))<<6; // copy NG, S, APX, TEX, AP bits |
|
617 |
pde |= KArmV6PdeSection; |
|
618 |
return pde; |
|
619 |
} |
|
620 |
||
621 |
||
622 |
/** |
|
623 |
Tranform the specified memory attributes into the canonical form relevant to |
|
624 |
the platform the code is running on. This applies defaults and overrides to |
|
625 |
the attributes to return what should be used with the MMU. |
|
626 |
*/ |
|
627 |
TMemoryAttributes Mmu::CanonicalMemoryAttributes(TMemoryAttributes aAttr) |
|
628 |
{ |
|
629 |
TUint attr = aAttr; |
|
630 |
if(attr&EMemoryAttributeDefaultShareable) |
|
631 |
{ |
|
632 |
// sharing not specified, use default... |
|
633 |
#if defined (__CPU_USE_SHARED_MEMORY) |
|
634 |
attr |= EMemoryAttributeShareable; |
|
635 |
#else |
|
636 |
attr &= ~EMemoryAttributeShareable; |
|
637 |
#endif |
|
638 |
} |
|
639 |
||
640 |
#if defined(FAULTY_NONSHARED_DEVICE_MEMORY) |
|
641 |
if((attr&(EMemoryAttributeShareable|EMemoryAttributeTypeMask))==EMemoryAttributeDevice) |
|
642 |
{ |
|
643 |
// make unshared device memory into shared strongly ordered memory... |
|
644 |
attr ^= EMemoryAttributeShareable; |
|
645 |
attr ^= EMemoryAttributeDevice^EMemoryAttributeStronglyOrdered; |
|
646 |
} |
|
647 |
#endif |
|
648 |
||
649 |
#if defined(__SMP__) || defined(__CPU_FORCE_SHARED_MEMORY_IF_CACHED) |
|
650 |
TMemoryType type = (TMemoryType)(attr&KMemoryTypeMask); |
|
651 |
if(CacheMaintenance::IsCached(type)) |
|
652 |
{ |
|
653 |
// force cached memory to be shared memory on SMP systems... |
|
654 |
attr |= EMemoryAttributeShareable; |
|
655 |
} |
|
656 |
#endif |
|
657 |
||
658 |
return (TMemoryAttributes)(attr&EMemoryAttributeMask); |
|
659 |
} |
|
660 |
||
661 |
/** |
|
662 |
Method called to initialise RAM pages when they are allocated for a new use. |
|
663 |
This performs any cache synchronisation required to remove old entries |
|
664 |
and also wipes the contents of the memory (if requested via \a aFlags). |
|
665 |
||
666 |
@param aPageList Pointer to a list of physical addresses for the RAM pages, |
|
667 |
or, if the least significant bit of this value is set, then |
|
668 |
the rest of the value is the physical address of a contiguous |
|
669 |
region of RAM pages being allocated. |
|
670 |
||
671 |
@param aCount The number of pages. |
|
672 |
||
673 |
@param aFlags A set of flag values from #TRamAllocFlags which indicate |
|
674 |
the memory type the pages will be used for and whether |
|
675 |
the contents should be wiped. |
|
676 |
||
677 |
@param aReallocate True, if the RAM pages have already been previously allocated |
|
678 |
and are being reinitilised e.g. by DMemoryManager::ReAllocDecommitted. |
|
679 |
False, to indicate that these pages have been newly allocated (are in |
|
680 |
the SPageInfo::EUnused state.) |
|
681 |
||
682 |
@pre #RamAllocLock held. |
|
683 |
*/ |
|
684 |
void Mmu::PagesAllocated(TPhysAddr* aPageList, TUint aCount, TRamAllocFlags aFlags, TBool aReallocate) |
|
685 |
{ |
|
686 |
TRACE2(("Mmu::PagesAllocated(0x%08x,%d,0x%x,%d)",aPageList, aCount, aFlags, (bool)aReallocate)); |
|
687 |
__NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
|
688 |
||
689 |
TBool wipe = !(aFlags&EAllocNoWipe); // do we need to wipe page contents? |
|
690 |
TUint8 wipeByte = (aFlags&EAllocUseCustomWipeByte) ? (aFlags>>EAllocWipeByteShift)&0xff : 0x03; // value to wipe memory with |
|
691 |
||
692 |
// process each page in turn... |
|
693 |
while(aCount--) |
|
694 |
{ |
|
695 |
// get physical address of next page... |
|
696 |
TPhysAddr pagePhys; |
|
697 |
if((TPhysAddr)aPageList&1) |
|
698 |
{ |
|
699 |
// aPageList is actually the physical address to use... |
|
700 |
pagePhys = (TPhysAddr)aPageList&~1; |
|
701 |
*(TPhysAddr*)&aPageList += KPageSize; |
|
702 |
} |
|
703 |
else |
|
704 |
pagePhys = *aPageList++; |
|
705 |
__NK_ASSERT_DEBUG((pagePhys&KPageMask)==0); |
|
706 |
||
707 |
// get info about page... |
|
708 |
SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys); |
|
709 |
TMemoryType oldType = (TMemoryType)(pi->Flags(true)&KMemoryTypeMask); |
|
710 |
TBool oldTypeNormal = CacheMaintenance::IsNormal(oldType); |
|
711 |
||
712 |
TRACE2(("Mmu::PagesAllocated page=0x%08x, oldType=%d, wipe=%d, colour=%d",pagePhys,oldType,wipe,pi->Index(true)&KPageColourMask)); |
|
713 |
if(wipe || oldTypeNormal) |
|
714 |
{ |
|
715 |
// work out temporary mapping values... |
|
716 |
TUint colour = pi->Index(true)&KPageColourMask; |
|
717 |
TLinAddr tempLinAddr = iTempMap[0].iLinAddr+colour*KPageSize; |
|
718 |
TPte* tempPte = iTempMap[0].iPtePtr+colour; |
|
719 |
||
720 |
if(oldTypeNormal) |
|
721 |
{ |
|
722 |
// cache maintenance required. Prepare temporary mapping. |
|
723 |
*tempPte = pagePhys | iTempPteCacheMaintenance; |
|
724 |
CacheMaintenance::SinglePteUpdated((TLinAddr)tempPte); |
|
725 |
InvalidateTLBForPage(tempLinAddr|KKernelOsAsid); |
|
726 |
||
727 |
// will hold additional arguments in CacheMaintenance::PageToReuse call |
|
728 |
TInt pageToReuseMask = 0; |
|
729 |
||
730 |
// check if old and new mappings are the same. (Wiping needs temporary |
|
731 |
// mapping which may not be the same as the old and new mapping.) |
|
732 |
TMemoryType newType = (TMemoryType)(aFlags&KMemoryTypeMask); // memory type that pages will be used for |
|
733 |
if (!wipe && (newType ==oldType)) |
|
734 |
pageToReuseMask |= CacheMaintenance::EOldAndNewMappingMatch; |
|
735 |
||
736 |
MmuLock::Lock(); |
|
737 |
||
738 |
// decide wether to trigger maintenance of entire cache(s). |
|
739 |
if(CacheMaintenance::IsPageToReuseThresholdReached(iCacheInvalidatePageCount)) |
|
740 |
{ |
|
741 |
// enough pages to make it worth triggering maintenance of entire cache(s) |
|
742 |
pageToReuseMask |= CacheMaintenance::EThresholdReached; |
|
743 |
++iCacheInvalidateCounter; |
|
744 |
iCacheInvalidatePageCount = 0; // all pages will be partially synced |
|
745 |
} |
|
746 |
||
747 |
if(CacheMaintenance::IsCached(oldType) && !aReallocate) |
|
748 |
{ |
|
749 |
if(pi->CacheInvalidateCounter()==(TUint32)iCacheInvalidateCounter) |
|
750 |
{ |
|
751 |
// one less unused page in the L1 cache... |
|
752 |
__NK_ASSERT_DEBUG(iCacheInvalidatePageCount); |
|
753 |
--iCacheInvalidatePageCount; |
|
754 |
} |
|
755 |
else |
|
756 |
{ |
|
757 |
// our page has been already partially maintained in cache |
|
758 |
// by a previous PageToReuse call. |
|
759 |
pageToReuseMask |= CacheMaintenance::EPageHasBeenPartiallySynced; |
|
760 |
} |
|
761 |
} |
|
762 |
||
763 |
MmuLock::Unlock(); |
|
764 |
||
765 |
TBool pageRemovedFromCache = CacheMaintenance::PageToReuse(tempLinAddr, oldType, pagePhys, pageToReuseMask); |
|
766 |
if(pageRemovedFromCache && !aReallocate) |
|
767 |
pi->SetUncached(); |
|
768 |
} |
|
769 |
||
770 |
if(wipe) |
|
771 |
{ |
|
772 |
//We need uncached normal temporary mapping to wipe. Change it if necessary. |
|
773 |
//or , in case of !oldTypeNormal it is not configured yet. |
|
774 |
if (!oldTypeNormal || (CacheMaintenance::TemporaryMapping()!=EMemAttNormalUncached)) |
|
775 |
{ |
|
776 |
*tempPte = pagePhys | iTempPteUncached; |
|
777 |
CacheMaintenance::SinglePteUpdated((TLinAddr)tempPte); |
|
778 |
InvalidateTLBForPage(tempLinAddr|KKernelOsAsid); |
|
779 |
} |
|
780 |
// wipe contents of memory... |
|
781 |
memset((TAny*)tempLinAddr, wipeByte, KPageSize); |
|
782 |
CacheMaintenance::PageToReuse(tempLinAddr, EMemAttNormalUncached, pagePhys); |
|
783 |
} |
|
784 |
||
785 |
// invalidate temporary mapping... |
|
786 |
*tempPte = KPteUnallocatedEntry; |
|
787 |
CacheMaintenance::SinglePteUpdated((TLinAddr)tempPte); |
|
788 |
InvalidateTLBForPage(tempLinAddr|KKernelOsAsid); |
|
789 |
} |
|
790 |
||
791 |
// indicate page has been allocated... |
|
792 |
if(!aReallocate) |
|
793 |
pi->SetAllocated(); |
|
794 |
||
795 |
// loop round for next page... |
|
796 |
} // end of while(aCount--) |
|
797 |
} |
|
798 |
||
799 |
||
800 |
/** |
|
801 |
Method called to update the state of a RAM page when it is freed. |
|
802 |
This sets the page state to SPageInfo::EUnused. |
|
803 |
||
804 |
@param aPageInfo The page information structure for the RAM page. |
|
805 |
||
806 |
@pre #MmuLock held. |
|
807 |
*/ |
|
808 |
void Mmu::PageFreed(SPageInfo* aPageInfo) |
|
809 |
{ |
|
810 |
__NK_ASSERT_DEBUG(MmuLock::IsHeld()); |
|
811 |
||
812 |
if(aPageInfo->Type()==SPageInfo::EUnused) |
|
813 |
return; |
|
814 |
||
815 |
aPageInfo->SetUnused(); |
|
816 |
||
817 |
TMemoryType type = (TMemoryType)(aPageInfo->Flags()&KMemoryTypeMask); |
|
818 |
if(CacheMaintenance::IsCached(type)) |
|
819 |
{ |
|
820 |
// another unused page with L1 cache entries... |
|
821 |
aPageInfo->SetCacheInvalidateCounter(iCacheInvalidateCounter); |
|
822 |
++iCacheInvalidatePageCount; |
|
823 |
} |
|
824 |
||
825 |
TRACE2(("Mmu::PageFreed page=0x%08x type=%d colour=%d",aPageInfo->PhysAddr(),aPageInfo->Flags()&KMemoryTypeMask,aPageInfo->Index()&KPageColourMask)); |
|
826 |
} |
|
827 |
||
828 |
/** |
|
829 |
Remove the contents of RAM pages from any memory caches. |
|
830 |
||
831 |
@param aPages Pointer to a list of physical addresses for the RAM pages, |
|
832 |
or, if the least significant bit of this value is set, then |
|
833 |
the rest of the value is the physical address of a contiguous |
|
834 |
region of RAM pages. |
|
835 |
||
836 |
@param aCount The number of pages. |
|
837 |
||
838 |
@param aAttributes The memory attributes of the pages. |
|
839 |
||
840 |
@param aColour The colour for the first page; |
|
841 |
consecutive pages will be coloured accordingly. |
|
842 |
Only #KPageColourShift least significant bits are used, |
|
843 |
therefore an index into a memory object's memory can be |
|
844 |
used for this value. |
|
845 |
*/ |
|
846 |
void Mmu::CleanAndInvalidatePages(TPhysAddr* aPages, TUint aCount, TMemoryAttributes aAttributes, TUint aColour) |
|
847 |
{ |
|
848 |
TMemoryType type = (TMemoryType)(aAttributes&EMemoryAttributeTypeMask); |
|
849 |
||
850 |
if(!CacheMaintenance::IsNormal(type)) |
|
851 |
{ |
|
852 |
TRACE2(("Mmu::CleanAndInvalidatePages - nothing to do")); |
|
853 |
return; |
|
854 |
} |
|
855 |
||
856 |
RamAllocLock::Lock(); |
|
857 |
||
858 |
while(aCount--) |
|
859 |
{ |
|
860 |
TPhysAddr pagePhys = *aPages++; |
|
861 |
TRACE2(("Mmu::CleanAndInvalidatePages 0x%08x",pagePhys)); |
|
862 |
||
863 |
// work out temporary mapping values... |
|
864 |
aColour &= KPageColourMask; |
|
865 |
TLinAddr tempLinAddr = iTempMap[0].iLinAddr+aColour*KPageSize; |
|
866 |
TPte* tempPte = iTempMap[0].iPtePtr+aColour; |
|
867 |
++aColour; |
|
868 |
||
869 |
// temporarily map page... |
|
870 |
*tempPte = pagePhys | iTempPteCacheMaintenance; |
|
871 |
CacheMaintenance::SinglePteUpdated((TLinAddr)tempPte); |
|
872 |
InvalidateTLBForPage(tempLinAddr|KKernelOsAsid); |
|
873 |
||
874 |
// preserve memory content and remove from cache... |
|
875 |
CacheMaintenance::PageToPreserveAndReuse(tempLinAddr, type, pagePhys); |
|
876 |
||
877 |
// invalidate temporary mapping... |
|
878 |
*tempPte = KPteUnallocatedEntry; |
|
879 |
CacheMaintenance::SinglePteUpdated((TLinAddr)tempPte); |
|
880 |
InvalidateTLBForPage(tempLinAddr|KKernelOsAsid); |
|
881 |
||
882 |
RamAllocLock::Flash(); |
|
883 |
} |
|
884 |
RamAllocLock::Unlock(); |
|
885 |
} |
|
886 |
||
887 |
||
888 |
extern void UnlockIPCAlias(); |
|
889 |
extern void LockIPCAlias(); |
|
890 |
||
891 |
||
892 |
TInt DMemModelThread::Alias(TLinAddr aAddr, DMemModelProcess* aProcess, TInt aSize, TLinAddr& aAliasAddr, TUint& aAliasSize) |
|
893 |
// |
|
894 |
// Set up an alias mapping starting at address aAddr in specified process. |
|
895 |
// Note: Alias is removed if an exception is trapped by DThread::IpcExcHandler. |
|
896 |
// |
|
897 |
{ |
|
898 |
TRACE2(("Thread %O Alias %08x+%x Process %O",this,aAddr,aSize,aProcess)); |
|
899 |
__NK_ASSERT_DEBUG(this==TheCurrentThread); // many bad things can happen if false |
|
900 |
// If there is an existing alias it should be on the same process otherwise |
|
901 |
// the os asid reference may be leaked. |
|
902 |
__NK_ASSERT_DEBUG(!iAliasLinAddr || aProcess == iAliasProcess); |
|
903 |
||
904 |
if(TUint(aAddr^KIPCAlias)<TUint(KIPCAliasAreaSize)) |
|
905 |
return KErrBadDescriptor; // prevent access to alias region |
|
906 |
||
109
b3a1d9898418
Revision: 201019
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
907 |
#ifdef _DEBUG |
b3a1d9898418
Revision: 201019
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
908 |
if (KDebugNum(KFORCEKUPAGEFAULTS)) |
b3a1d9898418
Revision: 201019
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
909 |
{ |
b3a1d9898418
Revision: 201019
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
910 |
TInt r = ThePager.FlushRegion(aProcess, aAddr, aSize); |
b3a1d9898418
Revision: 201019
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
911 |
if (r != KErrNone) |
b3a1d9898418
Revision: 201019
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
912 |
return r; |
b3a1d9898418
Revision: 201019
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
913 |
} |
b3a1d9898418
Revision: 201019
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
914 |
#endif |
b3a1d9898418
Revision: 201019
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
90
diff
changeset
|
915 |
|
0 | 916 |
// Grab the mmu lock before opening a reference on os asid so that this thread |
917 |
// is in an implicit critical section and therefore can't leak the reference by |
|
918 |
// dying before iAliasLinAddr is set. |
|
919 |
MmuLock::Lock(); |
|
920 |
||
921 |
TInt osAsid; |
|
922 |
if (!iAliasLinAddr) |
|
923 |
{// There isn't any existing alias. |
|
924 |
// Open a reference on the aProcess's os asid so that it is not freed and/or reused |
|
925 |
// while we are aliasing an address belonging to it. |
|
926 |
osAsid = aProcess->TryOpenOsAsid(); |
|
927 |
if (osAsid < 0) |
|
928 |
{// Couldn't open os asid so aProcess is no longer running. |
|
929 |
MmuLock::Unlock(); |
|
930 |
return KErrBadDescriptor; |
|
931 |
} |
|
932 |
} |
|
933 |
else |
|
934 |
{ |
|
935 |
// Just read the os asid of the process being aliased we already have a reference on it. |
|
936 |
osAsid = aProcess->OsAsid(); |
|
937 |
} |
|
938 |
||
939 |
// Now we have the os asid check access to kernel memory. |
|
940 |
if(aAddr >= KUserMemoryLimit && osAsid != (TUint)KKernelOsAsid) |
|
941 |
{ |
|
942 |
if (!iAliasLinAddr) |
|
943 |
{// Close the new reference as RemoveAlias won't do as iAliasLinAddr is not set. |
|
36
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
944 |
aProcess->AsyncCloseOsAsid(); // Asynchronous close as this method should be quick. |
0 | 945 |
} |
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
946 |
MmuLock::Unlock(); |
0 | 947 |
return KErrBadDescriptor; // prevent access to supervisor only memory |
948 |
} |
|
949 |
||
950 |
// Now we know all accesses to global memory are safe so check if aAddr is global. |
|
951 |
if(aAddr >= KGlobalMemoryBase) |
|
952 |
{ |
|
953 |
// address is in global section, don't bother aliasing it... |
|
954 |
if (!iAliasLinAddr) |
|
955 |
{// Close the new reference as not required. |
|
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
956 |
aProcess->AsyncCloseOsAsid(); // Asynchronous close as this method should be quick. |
36
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
957 |
MmuLock::Unlock(); |
0 | 958 |
} |
959 |
else |
|
960 |
{// Remove the existing alias as it is not required. |
|
36
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
961 |
DoRemoveAlias(iAliasLinAddr); // Releases mmulock. |
0 | 962 |
} |
963 |
aAliasAddr = aAddr; |
|
964 |
TInt maxSize = KChunkSize-(aAddr&KChunkMask); |
|
965 |
aAliasSize = aSize<maxSize ? aSize : maxSize; |
|
966 |
TRACE2(("DMemModelThread::Alias() abandoned as memory is globally mapped")); |
|
967 |
return KErrNone; |
|
968 |
} |
|
969 |
||
970 |
TPde* pd = Mmu::PageDirectory(osAsid); |
|
971 |
TInt pdeIndex = aAddr>>KChunkShift; |
|
972 |
TPde pde = pd[pdeIndex]; |
|
31
56f325a607ea
Revision: 200951
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
973 |
pde = PDE_IN_DOMAIN(pde, KIPCAliasDomain); // change domain for PDE |
0 | 974 |
// Get os asid, this is the current thread's process so no need for reference. |
975 |
TUint32 local_asid = ((DMemModelProcess*)iOwningProcess)->OsAsid(); |
|
976 |
#ifdef __SMP__ |
|
977 |
TLinAddr aliasAddr; |
|
978 |
#else |
|
979 |
TLinAddr aliasAddr = KIPCAlias+(aAddr&(KChunkMask & ~KPageMask)); |
|
980 |
#endif |
|
981 |
if(pde==iAliasPde && iAliasLinAddr) |
|
982 |
{ |
|
983 |
// pde already aliased, so just update linear address... |
|
984 |
#ifdef __SMP__ |
|
985 |
__NK_ASSERT_DEBUG(iCpuRestoreCookie>=0); |
|
986 |
aliasAddr = iAliasLinAddr & ~KChunkMask; |
|
987 |
aliasAddr |= (aAddr & (KChunkMask & ~KPageMask)); |
|
988 |
#endif |
|
989 |
iAliasLinAddr = aliasAddr; |
|
990 |
} |
|
991 |
else |
|
992 |
{ |
|
993 |
// alias PDE changed... |
|
994 |
if(!iAliasLinAddr) |
|
995 |
{ |
|
996 |
UnlockIPCAlias(); |
|
997 |
TheMmu.iAliasList.Add(&iAliasLink); // add to list if not already aliased |
|
998 |
#ifdef __SMP__ |
|
999 |
__NK_ASSERT_DEBUG(iCpuRestoreCookie==-1); |
|
1000 |
iCpuRestoreCookie = NKern::FreezeCpu(); // temporarily lock current thread to this processor |
|
1001 |
#endif |
|
1002 |
} |
|
1003 |
iAliasPde = pde; |
|
1004 |
iAliasProcess = aProcess; |
|
1005 |
#ifdef __SMP__ |
|
1006 |
TSubScheduler& ss = SubScheduler(); // OK since we are locked to this CPU |
|
1007 |
aliasAddr = TLinAddr(ss.i_AliasLinAddr) + (aAddr & (KChunkMask & ~KPageMask)); |
|
1008 |
iAliasPdePtr = (TPde*)(TLinAddr(ss.i_AliasPdePtr) + (local_asid << KPageDirectoryShift)); |
|
1009 |
#endif |
|
1010 |
iAliasLinAddr = aliasAddr; |
|
1011 |
*iAliasPdePtr = pde; |
|
1012 |
SinglePdeUpdated(iAliasPdePtr); |
|
1013 |
} |
|
1014 |
||
1015 |
TRACE2(("DMemModelThread::Alias() PDEntry=%x, iAliasLinAddr=%x",pde, aliasAddr)); |
|
1016 |
LocalInvalidateTLBForPage(aliasAddr | local_asid); |
|
1017 |
TInt offset = aAddr&KPageMask; |
|
1018 |
aAliasAddr = aliasAddr | offset; |
|
1019 |
TInt maxSize = KPageSize - offset; |
|
1020 |
aAliasSize = aSize<maxSize ? aSize : maxSize; |
|
1021 |
iAliasTarget = aAddr & ~KPageMask; |
|
1022 |
||
1023 |
MmuLock::Unlock(); |
|
1024 |
||
1025 |
return KErrNone; |
|
1026 |
} |
|
1027 |
||
1028 |
||
1029 |
void DMemModelThread::RemoveAlias() |
|
1030 |
// |
|
1031 |
// Remove alias mapping (if present) |
|
1032 |
// |
|
1033 |
{ |
|
1034 |
TRACE2(("Thread %O RemoveAlias", this)); |
|
1035 |
__NK_ASSERT_DEBUG(this==TheCurrentThread); // many bad things can happen if false |
|
1036 |
||
1037 |
TLinAddr addr = iAliasLinAddr; |
|
1038 |
if(addr) |
|
1039 |
{ |
|
1040 |
MmuLock::Lock(); |
|
1041 |
||
36
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1042 |
DoRemoveAlias(addr); // Unlocks mmulock. |
0 | 1043 |
} |
1044 |
} |
|
1045 |
||
1046 |
||
1047 |
/** |
|
1048 |
Remove the alias mapping. |
|
1049 |
||
1050 |
@pre Mmulock held |
|
36
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1051 |
@post MmuLock released. |
0 | 1052 |
*/ |
1053 |
void DMemModelThread::DoRemoveAlias(TLinAddr aAddr) |
|
1054 |
{ |
|
1055 |
LockIPCAlias(); |
|
1056 |
iAliasLinAddr = 0; |
|
1057 |
iAliasPde = KPdeUnallocatedEntry; |
|
1058 |
*iAliasPdePtr = KPdeUnallocatedEntry; |
|
1059 |
SinglePdeUpdated(iAliasPdePtr); |
|
1060 |
__NK_ASSERT_DEBUG((aAddr&KPageMask)==0); |
|
1061 |
// Invalidate the tlb using os asid, no need to open a reference as this |
|
1062 |
// is the current thread's process os asid. |
|
1063 |
LocalInvalidateTLBForPage(aAddr | ((DMemModelProcess*)iOwningProcess)->OsAsid()); |
|
1064 |
iAliasLink.Deque(); |
|
1065 |
#ifdef __SMP__ |
|
1066 |
__NK_ASSERT_DEBUG(iCpuRestoreCookie>=0); |
|
1067 |
NKern::EndFreezeCpu(iCpuRestoreCookie); |
|
1068 |
iCpuRestoreCookie = -1; |
|
1069 |
#endif |
|
36
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1070 |
|
90
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
1071 |
// Must close the os asid while holding MmuLock so we are in an implicit critical section. |
947f0dc9f7a8
Revision: 201015
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
36
diff
changeset
|
1072 |
iAliasProcess->AsyncCloseOsAsid(); // Asynchronous close as this method should be quick. |
36
538db54a451d
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
31
diff
changeset
|
1073 |
MmuLock::Unlock(); |
0 | 1074 |
} |
1075 |
||
1076 |
||
1077 |
TInt M::DemandPagingFault(TAny* aExceptionInfo) |
|
1078 |
{ |
|
1079 |
TArmExcInfo& exc=*(TArmExcInfo*)aExceptionInfo; |
|
1080 |
||
1081 |
// permissions required by faulting memory access... |
|
1082 |
TUint accessPermissions = EUser; // we only allow paging of user memory |
|
1083 |
||
1084 |
// get faulting address... |
|
1085 |
TLinAddr faultAddress = exc.iFaultAddress; |
|
1086 |
if(exc.iExcCode==EArmExceptionPrefetchAbort) |
|
1087 |
{ |
|
1088 |
// fault trying to read code to execute... |
|
1089 |
accessPermissions |= EExecute; |
|
1090 |
} |
|
1091 |
else if(exc.iExcCode!=EArmExceptionDataAbort) |
|
1092 |
return KErrUnknown; // not prefetch or data abort |
|
1093 |
||
1094 |
// check fault type... |
|
1095 |
if((exc.iFaultStatus&0x405) != 5 && (exc.iFaultStatus&0x40f) != 4) |
|
1096 |
return KErrUnknown; // not translation, permission or instruction cache maintenance fault. |
|
1097 |
||
1098 |
// check access type... |
|
1099 |
if(exc.iFaultStatus&(1<<11)) |
|
1100 |
accessPermissions |= EReadWrite; |
|
1101 |
||
1102 |
// let TheMmu handle the fault... |
|
1103 |
return TheMmu.HandlePageFault(exc.iR15, faultAddress, accessPermissions, aExceptionInfo); |
|
1104 |
} |
|
1105 |
||
1106 |