0
|
1 |
// Copyright (c) 1997-2009 Nokia Corporation and/or its subsidiary(-ies).
|
|
2 |
// All rights reserved.
|
|
3 |
// This component and the accompanying materials are made available
|
|
4 |
// under the terms of the License "Eclipse Public License v1.0"
|
|
5 |
// which accompanies this distribution, and is available
|
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
|
|
7 |
//
|
|
8 |
// Initial Contributors:
|
|
9 |
// Nokia Corporation - initial contribution.
|
|
10 |
//
|
|
11 |
// Contributors:
|
|
12 |
//
|
|
13 |
// Description:
|
|
14 |
// e32\memmodel\epoc\multiple\arm\xmmu.cia
|
|
15 |
//
|
|
16 |
//
|
|
17 |
|
|
18 |
#include <arm_mem.h>
|
|
19 |
#include "execs.h"
|
|
20 |
#include <nk_cpu.h>
|
|
21 |
|
|
22 |
#if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
|
|
23 |
// This will also invalidate TLB entry if the third argument (asid) is specified (>=0).
|
|
24 |
// If asid is < 0, the caller is expected to deal with TLB invalidation.
|
|
25 |
__NAKED__ void remove_and_invalidate_page(TPte*, TLinAddr, TInt)
|
|
26 |
{
|
|
27 |
asm("stmfd sp!, {r4-r6,lr} ");
|
|
28 |
asm("mov r6, r2 "); //r6 = asid
|
|
29 |
asm("mov r4, r0 ");
|
|
30 |
asm("mov r5, #1 "); //by default, one cache line to clean
|
|
31 |
|
|
32 |
asm("ldr r3, [r0] "); // r0 = original PTE
|
|
33 |
asm("cmp r2, #0 ");
|
|
34 |
asm("bicpl r1, r1, #0xff ");
|
|
35 |
asm("orrpl r1, r1, r2 "); // if ASID supplied, combine with VA
|
|
36 |
asm("mrs r12, cpsr ");
|
|
37 |
asm("mov r2, #0 ");
|
|
38 |
CPSIDAIF; // interrupts off
|
|
39 |
asm("str r2, [r0], #4 "); // clear PTE
|
|
40 |
asm("tst r3, #3 "); // PTE present?
|
|
41 |
asm("beq 0f "); // if not, done
|
|
42 |
asm("tst r3, #2 "); // small page?
|
|
43 |
asm("bne 1f "); // skip if small
|
|
44 |
|
|
45 |
asm("mov r5, #2 "); // there will be 2 cache lines to clean
|
|
46 |
asm("mov r3, #0 ");
|
|
47 |
asm("str r2, [r0], #4 ");
|
|
48 |
asm("stmia r0!, {r2,r3} "); // clear 16 consecutive PTEs
|
|
49 |
asm("stmia r0!, {r2,r3} ");
|
|
50 |
asm("stmia r0!, {r2,r3} ");
|
|
51 |
asm("stmia r0!, {r2,r3} ");
|
|
52 |
asm("stmia r0!, {r2,r3} ");
|
|
53 |
asm("stmia r0!, {r2,r3} ");
|
|
54 |
asm("stmia r0!, {r2,r3} ");
|
|
55 |
|
|
56 |
asm("1: ");
|
|
57 |
#if defined(__CPU_PAGE_TABLES_FULLY_CACHED)
|
|
58 |
// Clean the changed page table entries from the cache.
|
|
59 |
// On ARM1136, cache line is always 32 bytes.
|
|
60 |
// For small page, a single cache line has to be cached.
|
|
61 |
// For large page, 16 page table entries always fits into two cache lines
|
|
62 |
CLEAN_DCACHE_LINE(,r4);
|
|
63 |
asm("subs r5, r5, #1");
|
|
64 |
asm("addhi r4, r4, #32");// Clean the next cache line as well. Executes ...
|
|
65 |
CLEAN_DCACHE_LINE(hi,r4);// ... only in case of large page table.
|
|
66 |
#endif
|
|
67 |
|
|
68 |
asm("mcr p15, 0, r1, c7, c10, 4 "); // drain write buffer
|
|
69 |
asm("cmp r6, #0"); //is asid valid?
|
|
70 |
|
|
71 |
FLUSH_DTLB_ENTRY(pl,r1); // remove stale TLB entry if asid >= 0
|
|
72 |
FLUSH_ITLB_ENTRY(pl,r1);
|
|
73 |
|
|
74 |
asm("0: ");
|
|
75 |
asm("msr cpsr, r12 ");
|
|
76 |
asm("ldmfd sp!, {r4-r6,pc} "); // if successful, exit
|
|
77 |
}
|
|
78 |
|
|
79 |
// This will also invalidate TLB entry. (The third argument (asid) is assumed to be valid (>=0).)
|
|
80 |
__NAKED__ void remove_and_invalidate_section(TPde*, TLinAddr, TInt)
|
|
81 |
{
|
|
82 |
asm("ldr r3, [r0] "); // r0 = original PDE
|
|
83 |
asm("cmp r2, #0 ");
|
|
84 |
asm("bicpl r1, r1, #0xff ");
|
|
85 |
asm("orrpl r1, r1, r2 "); // if ASID supplied, combine with VA
|
|
86 |
asm("mrs r12, cpsr ");
|
|
87 |
asm("mov r2, #0 ");
|
|
88 |
CPSIDAIF; // interrupts off
|
|
89 |
asm("tst r3, #3 "); // PDE present?
|
|
90 |
asm("beq 0f "); // if not, done
|
|
91 |
asm("str r2, [r0] "); // clear PDE
|
|
92 |
#if defined(__CPU_PAGE_TABLES_FULLY_CACHED)
|
|
93 |
CLEAN_DCACHE_LINE(,r0);
|
|
94 |
#endif
|
|
95 |
asm("mcr p15, 0, r1, c7, c10, 4 "); // drain write buffer
|
|
96 |
FLUSH_DTLB_ENTRY(,r1); // remove stale TLB entry
|
|
97 |
FLUSH_ITLB_ENTRY(,r1);
|
|
98 |
asm("0: ");
|
|
99 |
asm("msr cpsr, r12 ");
|
|
100 |
__JUMP(,lr);
|
|
101 |
}
|
|
102 |
#endif
|
|
103 |
|
|
104 |
|
|
105 |
__NAKED__ void MakeGlobalPTEInaccessible(TPte* aPtePtr, TPte aNewPte, TLinAddr aLinAddr)
|
|
106 |
{
|
|
107 |
asm("mov r3,#0 ");
|
|
108 |
// fall through
|
|
109 |
}
|
|
110 |
|
|
111 |
__NAKED__ void MakePTEInaccessible(TPte* aPtePtr, TPte aNewPte, TLinAddr aLinAddr, TInt aAsid)
|
|
112 |
{
|
|
113 |
asm("bic r2, r2, #0xff ");
|
|
114 |
asm("orr r2, r2, r3 ");
|
|
115 |
#if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
|
|
116 |
CPSIDIF; // interrupts off
|
|
117 |
asm("str r1,[r0]");
|
|
118 |
#if defined(__CPU_PAGE_TABLES_FULLY_CACHED)
|
|
119 |
CLEAN_DCACHE_LINE(,r0);
|
|
120 |
#endif
|
|
121 |
DRAIN_WRITE_BUFFER(,r1,r1);
|
|
122 |
FLUSH_DTLB_ENTRY(,r2); // remove stale TLB entries
|
|
123 |
FLUSH_ITLB_ENTRY(,r2);
|
|
124 |
asm("mov r1, #0");
|
|
125 |
FLUSH_BTB(,r1);
|
|
126 |
CPSIEIF; // interrupts on
|
|
127 |
#else
|
|
128 |
asm("str r1,[r0]");
|
|
129 |
#if defined(__CPU_PAGE_TABLES_FULLY_CACHED)
|
|
130 |
#ifdef __CPU_ARMV7
|
|
131 |
DCCMVAU(r0);
|
|
132 |
ARM_DSBSH;
|
|
133 |
#else
|
|
134 |
CLEAN_DCACHE_LINE(,r0);
|
|
135 |
DRAIN_WRITE_BUFFER(,r1,r1);
|
|
136 |
#endif
|
|
137 |
#endif
|
|
138 |
#ifdef __CPU_ARMV7
|
|
139 |
UTLBIMVA(r2);
|
|
140 |
ARM_DSBSH;
|
|
141 |
ARM_ISBSY;
|
|
142 |
#else
|
|
143 |
FLUSH_DTLB_ENTRY(,r2); // remove stale TLB entries
|
|
144 |
FLUSH_ITLB_ENTRY(,r2);
|
|
145 |
#endif
|
|
146 |
#endif
|
|
147 |
__JUMP(,lr);
|
|
148 |
}
|
|
149 |
|
|
150 |
|
|
151 |
__NAKED__ void InvalidateTLBForPage(TLinAddr /*aLinAddr*/, TInt /*aAsid*/)
|
|
152 |
//
|
|
153 |
// Flush a specified virtual address from the TLB.
|
|
154 |
// If aAsid>0, flush is restricted to ASID=aAsid for non-global entries.
|
|
155 |
// If aAsid=0, Kernel asid is specified - will flush global entry or the entry belonging to local Kernel space.
|
|
156 |
// If aAsid<0, no ASID is specified - will flush all TLB entries with matching VA regardles of ASID (or whether they are
|
|
157 |
// local or global). In the absence of such MMU command, flush-entire-TLB will apply here.
|
|
158 |
{
|
|
159 |
asm("cmp r1, #0 ");
|
|
160 |
asm("bmi 1f ");
|
|
161 |
asm("bic r0, r0, #0xff "); // if aAsid > 0, orr it with linear address in r0.
|
|
162 |
asm("orr r0, r0, r1 ");
|
|
163 |
#ifdef __CPU_ARMV7
|
|
164 |
UTLBIMVA(r0);
|
|
165 |
ARM_DSBSH;
|
|
166 |
ARM_ISBSY;
|
|
167 |
#else
|
|
168 |
FLUSH_DTLB_ENTRY(,r0);
|
|
169 |
FLUSH_ITLB_ENTRY(,r0);
|
|
170 |
#endif
|
|
171 |
__JUMP(,lr);
|
|
172 |
|
|
173 |
asm("1: ");
|
|
174 |
#ifdef __CPU_ARMV7
|
|
175 |
UTLBIALL;
|
|
176 |
ARM_DSBSH;
|
|
177 |
ARM_ISBSY;
|
|
178 |
#else
|
|
179 |
asm("mov r0, #0 ");
|
|
180 |
FLUSH_IDTLB(,r0); // aAsid < 0. There is no coprocessor instruction that will flush all ...
|
|
181 |
// ... entries matching Linear address. Flush entire TLB instead and exit.
|
|
182 |
#endif
|
|
183 |
__JUMP(,lr);
|
|
184 |
}
|
|
185 |
|
|
186 |
__NAKED__ void FlushTLBs()
|
|
187 |
{
|
|
188 |
#ifdef __CPU_ARMV7
|
|
189 |
UTLBIALL;
|
|
190 |
ARM_DSBSH;
|
|
191 |
ARM_ISBSY;
|
|
192 |
#else
|
|
193 |
asm("mov r0, #0 ");
|
|
194 |
FLUSH_IDTLB(,r0);
|
|
195 |
#endif
|
|
196 |
__JUMP(,lr);
|
|
197 |
}
|
|
198 |
|
|
199 |
__NAKED__ TUint32 TTCR()
|
|
200 |
{
|
|
201 |
asm("mrc p15, 0, r0, c2, c0, 2 ");
|
|
202 |
asm("and r0, r0, #7 "); // only bottom 3 bits are defined
|
|
203 |
__JUMP(,lr);
|
|
204 |
}
|
|
205 |
|
|
206 |
GLDEF_C __NAKED__ void __FlushBtb()
|
|
207 |
{
|
|
208 |
#ifdef __CPU_ARMV7
|
|
209 |
#ifdef __SMP__
|
|
210 |
BPIALLIS;
|
|
211 |
#else //def __SMP__
|
|
212 |
BPIALL;
|
|
213 |
#endif // else __SMP__
|
|
214 |
ARM_DSBSH;
|
|
215 |
ARM_ISBSY;
|
|
216 |
#else //def __CPU_ARMV7
|
|
217 |
asm("mov r1, #0");
|
|
218 |
FLUSH_BTB(,r1);
|
|
219 |
#endif //else __CPU_ARMV7
|
|
220 |
__JUMP(,lr);
|
|
221 |
}
|
|
222 |
|
|
223 |
// Generic cache/TLB flush function.
|
|
224 |
// Which things are flushed is determined by aMask.
|
|
225 |
__NAKED__ void ArmMmu::GenericFlush(TUint32 /*aMask*/)
|
|
226 |
{
|
|
227 |
#ifdef __CPU_ARMV7
|
|
228 |
asm("tst r1, #%a0" : : "i" (EFlushDTLB) );
|
|
229 |
asm("tsteq r1, #%a0" : : "i" (EFlushDPermChg) );
|
|
230 |
asm("tsteq r1, #%a0" : : "i" (EFlushITLB) );
|
|
231 |
asm("tsteq r1, #%a0" : : "i" (EFlushIPermChg) );
|
|
232 |
asm("beq 1f ");
|
|
233 |
UTLBIALL;
|
|
234 |
ARM_DSBSH;
|
|
235 |
ARM_ISBSY;
|
|
236 |
asm("1: ");
|
|
237 |
#else
|
|
238 |
asm("mov r2, #0 ");
|
|
239 |
asm("tst r1, #%a0" : : "i" (EFlushDTLB) );
|
|
240 |
asm("tsteq r1, #%a0" : : "i" (EFlushDPermChg) );
|
|
241 |
FLUSH_DTLB(ne,r2);
|
|
242 |
asm("tst r1, #%a0" : : "i" (EFlushITLB) );
|
|
243 |
asm("tsteq r1, #%a0" : : "i" (EFlushIPermChg) );
|
|
244 |
FLUSH_ITLB(ne,r2);
|
|
245 |
#endif
|
|
246 |
__JUMP(,lr);
|
|
247 |
}
|
|
248 |
|
|
249 |
__NAKED__ void ExecHandler::UnlockRamDrive()
|
|
250 |
{
|
|
251 |
asm("ldr r0, [r1, #%a0]" : : "i" (_FOFF(DThread,iOwningProcess)-_FOFF(DThread,iNThread)));
|
|
252 |
asm("ldr r0, [r0, #%a0]" : : "i" _FOFF(DProcess,iS.iCaps));
|
|
253 |
// __KERNEL_CAPABILITY_CHECK
|
|
254 |
asm("tst r0, #%a0 " : : "i" ((TInt)(1<<ECapabilityTCB)));
|
|
255 |
__JUMP(eq,lr); // don't unlock the RAM drive if don't have MediaDD capability
|
|
256 |
|
|
257 |
// fall through to unlock
|
|
258 |
}
|
|
259 |
|
|
260 |
EXPORT_C __NAKED__ void TInternalRamDrive::Unlock()
|
|
261 |
{
|
|
262 |
asm("mrc p15, 0, r0, c3, c0, 0 ");
|
|
263 |
asm("bic r0, r0, #0x0c ");
|
|
264 |
asm("orr r0, r0, #0x04 "); // RAM drive in domain 1
|
|
265 |
asm("mcr p15, 0, r0, c3, c0, 0 ");
|
|
266 |
CPWAIT(,r0);
|
|
267 |
__JUMP(,lr);
|
|
268 |
}
|
|
269 |
|
|
270 |
EXPORT_C __NAKED__ void TInternalRamDrive::Lock()
|
|
271 |
{
|
|
272 |
asm("mrc p15, 0, r0, c3, c0, 0 ");
|
|
273 |
asm("bic r0, r0, #0x0c "); // RAM drive in domain 1
|
|
274 |
asm("mcr p15, 0, r0, c3, c0, 0 ");
|
|
275 |
CPWAIT(,r0);
|
|
276 |
__JUMP(,lr);
|
|
277 |
}
|
|
278 |
|
|
279 |
__NAKED__ void ArmMmu::UnlockAlias()
|
|
280 |
{
|
|
281 |
asm("mrc p15, 0, r0, c3, c0, 0 ");
|
|
282 |
asm("orr r0, r0, #0x10 "); // Alias memory in domain 2
|
|
283 |
asm("mcr p15, 0, r0, c3, c0, 0 ");
|
|
284 |
CPWAIT(,r0);
|
|
285 |
__JUMP(,lr);
|
|
286 |
}
|
|
287 |
|
|
288 |
__NAKED__ void ArmMmu::LockAlias()
|
|
289 |
{
|
|
290 |
asm("mrc p15, 0, r0, c3, c0, 0 ");
|
|
291 |
asm("bic r0, r0, #0x30 "); // Alias memory in domain 2
|
|
292 |
asm("mcr p15, 0, r0, c3, c0, 0 ");
|
|
293 |
CPWAIT(,r0);
|
|
294 |
__JUMP(,lr);
|
|
295 |
}
|
|
296 |
|
|
297 |
|
|
298 |
__NAKED__ void M::LockUserMemory()
|
|
299 |
{
|
|
300 |
USER_MEMORY_GUARD_ON(,r0,r0);
|
|
301 |
__JUMP(,lr);
|
|
302 |
}
|
|
303 |
|
|
304 |
|
|
305 |
__NAKED__ void M::UnlockUserMemory()
|
|
306 |
{
|
|
307 |
USER_MEMORY_GUARD_OFF(,r0,r0);
|
|
308 |
__JUMP(,lr);
|
|
309 |
}
|
|
310 |
|