|
1 /* |
|
2 * SH4 emulation |
|
3 * |
|
4 * Copyright (c) 2005 Samuel Tardieu |
|
5 * |
|
6 * This library is free software; you can redistribute it and/or |
|
7 * modify it under the terms of the GNU Lesser General Public |
|
8 * License as published by the Free Software Foundation; either |
|
9 * version 2 of the License, or (at your option) any later version. |
|
10 * |
|
11 * This library is distributed in the hope that it will be useful, |
|
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
|
14 * Lesser General Public License for more details. |
|
15 * |
|
16 * You should have received a copy of the GNU Lesser General Public |
|
17 * License along with this library; if not, write to the Free Software |
|
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
|
19 */ |
|
20 #include <stdarg.h> |
|
21 #include <stdlib.h> |
|
22 #include <stdio.h> |
|
23 #include <string.h> |
|
24 #include <inttypes.h> |
|
25 #include <signal.h> |
|
26 #include <assert.h> |
|
27 |
|
28 #include "cpu.h" |
|
29 #include "exec-all.h" |
|
30 #include "hw/sh_intc.h" |
|
31 |
|
32 #if defined(CONFIG_USER_ONLY) |
|
33 |
|
34 void do_interrupt (CPUState *env) |
|
35 { |
|
36 env->exception_index = -1; |
|
37 } |
|
38 |
|
39 int cpu_sh4_handle_mmu_fault(CPUState * env, target_ulong address, int rw, |
|
40 int mmu_idx, int is_softmmu) |
|
41 { |
|
42 env->tea = address; |
|
43 env->exception_index = 0; |
|
44 switch (rw) { |
|
45 case 0: |
|
46 env->exception_index = 0x0a0; |
|
47 break; |
|
48 case 1: |
|
49 env->exception_index = 0x0c0; |
|
50 break; |
|
51 case 2: |
|
52 env->exception_index = 0x0a0; |
|
53 break; |
|
54 } |
|
55 return 1; |
|
56 } |
|
57 |
|
58 target_phys_addr_t cpu_get_phys_page_debug(CPUState * env, target_ulong addr) |
|
59 { |
|
60 return addr; |
|
61 } |
|
62 |
|
63 #else /* !CONFIG_USER_ONLY */ |
|
64 |
|
65 #define MMU_OK 0 |
|
66 #define MMU_ITLB_MISS (-1) |
|
67 #define MMU_ITLB_MULTIPLE (-2) |
|
68 #define MMU_ITLB_VIOLATION (-3) |
|
69 #define MMU_DTLB_MISS_READ (-4) |
|
70 #define MMU_DTLB_MISS_WRITE (-5) |
|
71 #define MMU_DTLB_INITIAL_WRITE (-6) |
|
72 #define MMU_DTLB_VIOLATION_READ (-7) |
|
73 #define MMU_DTLB_VIOLATION_WRITE (-8) |
|
74 #define MMU_DTLB_MULTIPLE (-9) |
|
75 #define MMU_DTLB_MISS (-10) |
|
76 #define MMU_IADDR_ERROR (-11) |
|
77 #define MMU_DADDR_ERROR_READ (-12) |
|
78 #define MMU_DADDR_ERROR_WRITE (-13) |
|
79 |
|
80 void do_interrupt(CPUState * env) |
|
81 { |
|
82 int do_irq = env->interrupt_request & CPU_INTERRUPT_HARD; |
|
83 int do_exp, irq_vector = env->exception_index; |
|
84 |
|
85 /* prioritize exceptions over interrupts */ |
|
86 |
|
87 do_exp = env->exception_index != -1; |
|
88 do_irq = do_irq && (env->exception_index == -1); |
|
89 |
|
90 if (env->sr & SR_BL) { |
|
91 if (do_exp && env->exception_index != 0x1e0) { |
|
92 env->exception_index = 0x000; /* masked exception -> reset */ |
|
93 } |
|
94 if (do_irq && !env->intr_at_halt) { |
|
95 return; /* masked */ |
|
96 } |
|
97 env->intr_at_halt = 0; |
|
98 } |
|
99 |
|
100 if (do_irq) { |
|
101 irq_vector = sh_intc_get_pending_vector(env->intc_handle, |
|
102 (env->sr >> 4) & 0xf); |
|
103 if (irq_vector == -1) { |
|
104 return; /* masked */ |
|
105 } |
|
106 } |
|
107 |
|
108 if (loglevel & CPU_LOG_INT) { |
|
109 const char *expname; |
|
110 switch (env->exception_index) { |
|
111 case 0x0e0: |
|
112 expname = "addr_error"; |
|
113 break; |
|
114 case 0x040: |
|
115 expname = "tlb_miss"; |
|
116 break; |
|
117 case 0x0a0: |
|
118 expname = "tlb_violation"; |
|
119 break; |
|
120 case 0x180: |
|
121 expname = "illegal_instruction"; |
|
122 break; |
|
123 case 0x1a0: |
|
124 expname = "slot_illegal_instruction"; |
|
125 break; |
|
126 case 0x800: |
|
127 expname = "fpu_disable"; |
|
128 break; |
|
129 case 0x820: |
|
130 expname = "slot_fpu"; |
|
131 break; |
|
132 case 0x100: |
|
133 expname = "data_write"; |
|
134 break; |
|
135 case 0x060: |
|
136 expname = "dtlb_miss_write"; |
|
137 break; |
|
138 case 0x0c0: |
|
139 expname = "dtlb_violation_write"; |
|
140 break; |
|
141 case 0x120: |
|
142 expname = "fpu_exception"; |
|
143 break; |
|
144 case 0x080: |
|
145 expname = "initial_page_write"; |
|
146 break; |
|
147 case 0x160: |
|
148 expname = "trapa"; |
|
149 break; |
|
150 default: |
|
151 expname = do_irq ? "interrupt" : "???"; |
|
152 break; |
|
153 } |
|
154 fprintf(logfile, "exception 0x%03x [%s] raised\n", |
|
155 irq_vector, expname); |
|
156 cpu_dump_state(env, logfile, fprintf, 0); |
|
157 } |
|
158 |
|
159 env->ssr = env->sr; |
|
160 env->spc = env->pc; |
|
161 env->sgr = env->gregs[15]; |
|
162 env->sr |= SR_BL | SR_MD | SR_RB; |
|
163 |
|
164 if (env->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { |
|
165 /* Branch instruction should be executed again before delay slot. */ |
|
166 env->spc -= 2; |
|
167 /* Clear flags for exception/interrupt routine. */ |
|
168 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL | DELAY_SLOT_TRUE); |
|
169 } |
|
170 if (env->flags & DELAY_SLOT_CLEARME) |
|
171 env->flags = 0; |
|
172 |
|
173 if (do_exp) { |
|
174 env->expevt = env->exception_index; |
|
175 switch (env->exception_index) { |
|
176 case 0x000: |
|
177 case 0x020: |
|
178 case 0x140: |
|
179 env->sr &= ~SR_FD; |
|
180 env->sr |= 0xf << 4; /* IMASK */ |
|
181 env->pc = 0xa0000000; |
|
182 break; |
|
183 case 0x040: |
|
184 case 0x060: |
|
185 env->pc = env->vbr + 0x400; |
|
186 break; |
|
187 case 0x160: |
|
188 env->spc += 2; /* special case for TRAPA */ |
|
189 /* fall through */ |
|
190 default: |
|
191 env->pc = env->vbr + 0x100; |
|
192 break; |
|
193 } |
|
194 return; |
|
195 } |
|
196 |
|
197 if (do_irq) { |
|
198 env->intevt = irq_vector; |
|
199 env->pc = env->vbr + 0x600; |
|
200 return; |
|
201 } |
|
202 } |
|
203 |
|
204 static void update_itlb_use(CPUState * env, int itlbnb) |
|
205 { |
|
206 uint8_t or_mask = 0, and_mask = (uint8_t) - 1; |
|
207 |
|
208 switch (itlbnb) { |
|
209 case 0: |
|
210 and_mask = 0x1f; |
|
211 break; |
|
212 case 1: |
|
213 and_mask = 0xe7; |
|
214 or_mask = 0x80; |
|
215 break; |
|
216 case 2: |
|
217 and_mask = 0xfb; |
|
218 or_mask = 0x50; |
|
219 break; |
|
220 case 3: |
|
221 or_mask = 0x2c; |
|
222 break; |
|
223 } |
|
224 |
|
225 env->mmucr &= (and_mask << 24) | 0x00ffffff; |
|
226 env->mmucr |= (or_mask << 24); |
|
227 } |
|
228 |
|
229 static int itlb_replacement(CPUState * env) |
|
230 { |
|
231 if ((env->mmucr & 0xe0000000) == 0xe0000000) |
|
232 return 0; |
|
233 if ((env->mmucr & 0x98000000) == 0x18000000) |
|
234 return 1; |
|
235 if ((env->mmucr & 0x54000000) == 0x04000000) |
|
236 return 2; |
|
237 if ((env->mmucr & 0x2c000000) == 0x00000000) |
|
238 return 3; |
|
239 assert(0); |
|
240 } |
|
241 |
|
242 /* Find the corresponding entry in the right TLB |
|
243 Return entry, MMU_DTLB_MISS or MMU_DTLB_MULTIPLE |
|
244 */ |
|
245 static int find_tlb_entry(CPUState * env, target_ulong address, |
|
246 tlb_t * entries, uint8_t nbtlb, int use_asid) |
|
247 { |
|
248 int match = MMU_DTLB_MISS; |
|
249 uint32_t start, end; |
|
250 uint8_t asid; |
|
251 int i; |
|
252 |
|
253 asid = env->pteh & 0xff; |
|
254 |
|
255 for (i = 0; i < nbtlb; i++) { |
|
256 if (!entries[i].v) |
|
257 continue; /* Invalid entry */ |
|
258 if (!entries[i].sh && use_asid && entries[i].asid != asid) |
|
259 continue; /* Bad ASID */ |
|
260 #if 0 |
|
261 switch (entries[i].sz) { |
|
262 case 0: |
|
263 size = 1024; /* 1kB */ |
|
264 break; |
|
265 case 1: |
|
266 size = 4 * 1024; /* 4kB */ |
|
267 break; |
|
268 case 2: |
|
269 size = 64 * 1024; /* 64kB */ |
|
270 break; |
|
271 case 3: |
|
272 size = 1024 * 1024; /* 1MB */ |
|
273 break; |
|
274 default: |
|
275 assert(0); |
|
276 } |
|
277 #endif |
|
278 start = (entries[i].vpn << 10) & ~(entries[i].size - 1); |
|
279 end = start + entries[i].size - 1; |
|
280 if (address >= start && address <= end) { /* Match */ |
|
281 if (match != MMU_DTLB_MISS) |
|
282 return MMU_DTLB_MULTIPLE; /* Multiple match */ |
|
283 match = i; |
|
284 } |
|
285 } |
|
286 return match; |
|
287 } |
|
288 |
|
289 static int same_tlb_entry_exists(const tlb_t * haystack, uint8_t nbtlb, |
|
290 const tlb_t * needle) |
|
291 { |
|
292 int i; |
|
293 for (i = 0; i < nbtlb; i++) |
|
294 if (!memcmp(&haystack[i], needle, sizeof(tlb_t))) |
|
295 return 1; |
|
296 return 0; |
|
297 } |
|
298 |
|
299 static void increment_urc(CPUState * env) |
|
300 { |
|
301 uint8_t urb, urc; |
|
302 |
|
303 /* Increment URC */ |
|
304 urb = ((env->mmucr) >> 18) & 0x3f; |
|
305 urc = ((env->mmucr) >> 10) & 0x3f; |
|
306 urc++; |
|
307 if (urc == urb || urc == UTLB_SIZE - 1) |
|
308 urc = 0; |
|
309 env->mmucr = (env->mmucr & 0xffff03ff) | (urc << 10); |
|
310 } |
|
311 |
|
312 /* Find itlb entry - update itlb from utlb if necessary and asked for |
|
313 Return entry, MMU_ITLB_MISS, MMU_ITLB_MULTIPLE or MMU_DTLB_MULTIPLE |
|
314 Update the itlb from utlb if update is not 0 |
|
315 */ |
|
316 int find_itlb_entry(CPUState * env, target_ulong address, |
|
317 int use_asid, int update) |
|
318 { |
|
319 int e, n; |
|
320 |
|
321 e = find_tlb_entry(env, address, env->itlb, ITLB_SIZE, use_asid); |
|
322 if (e == MMU_DTLB_MULTIPLE) |
|
323 e = MMU_ITLB_MULTIPLE; |
|
324 else if (e == MMU_DTLB_MISS && update) { |
|
325 e = find_tlb_entry(env, address, env->utlb, UTLB_SIZE, use_asid); |
|
326 if (e >= 0) { |
|
327 tlb_t * ientry; |
|
328 n = itlb_replacement(env); |
|
329 ientry = &env->itlb[n]; |
|
330 if (ientry->v) { |
|
331 if (!same_tlb_entry_exists(env->utlb, UTLB_SIZE, ientry)) |
|
332 tlb_flush_page(env, ientry->vpn << 10); |
|
333 } |
|
334 *ientry = env->utlb[e]; |
|
335 e = n; |
|
336 } else if (e == MMU_DTLB_MISS) |
|
337 e = MMU_ITLB_MISS; |
|
338 } else if (e == MMU_DTLB_MISS) |
|
339 e = MMU_ITLB_MISS; |
|
340 if (e >= 0) |
|
341 update_itlb_use(env, e); |
|
342 return e; |
|
343 } |
|
344 |
|
345 /* Find utlb entry |
|
346 Return entry, MMU_DTLB_MISS, MMU_DTLB_MULTIPLE */ |
|
347 int find_utlb_entry(CPUState * env, target_ulong address, int use_asid) |
|
348 { |
|
349 /* per utlb access */ |
|
350 increment_urc(env); |
|
351 |
|
352 /* Return entry */ |
|
353 return find_tlb_entry(env, address, env->utlb, UTLB_SIZE, use_asid); |
|
354 } |
|
355 |
|
356 /* Match address against MMU |
|
357 Return MMU_OK, MMU_DTLB_MISS_READ, MMU_DTLB_MISS_WRITE, |
|
358 MMU_DTLB_INITIAL_WRITE, MMU_DTLB_VIOLATION_READ, |
|
359 MMU_DTLB_VIOLATION_WRITE, MMU_ITLB_MISS, |
|
360 MMU_ITLB_MULTIPLE, MMU_ITLB_VIOLATION, |
|
361 MMU_IADDR_ERROR, MMU_DADDR_ERROR_READ, MMU_DADDR_ERROR_WRITE. |
|
362 */ |
|
363 static int get_mmu_address(CPUState * env, target_ulong * physical, |
|
364 int *prot, target_ulong address, |
|
365 int rw, int access_type) |
|
366 { |
|
367 int use_asid, n; |
|
368 tlb_t *matching = NULL; |
|
369 |
|
370 use_asid = (env->mmucr & MMUCR_SV) == 0 || (env->sr & SR_MD) == 0; |
|
371 |
|
372 if (rw == 2) { |
|
373 n = find_itlb_entry(env, address, use_asid, 1); |
|
374 if (n >= 0) { |
|
375 matching = &env->itlb[n]; |
|
376 if ((env->sr & SR_MD) & !(matching->pr & 2)) |
|
377 n = MMU_ITLB_VIOLATION; |
|
378 else |
|
379 *prot = PAGE_READ; |
|
380 } |
|
381 } else { |
|
382 n = find_utlb_entry(env, address, use_asid); |
|
383 if (n >= 0) { |
|
384 matching = &env->utlb[n]; |
|
385 switch ((matching->pr << 1) | ((env->sr & SR_MD) ? 1 : 0)) { |
|
386 case 0: /* 000 */ |
|
387 case 2: /* 010 */ |
|
388 n = (rw == 1) ? MMU_DTLB_VIOLATION_WRITE : |
|
389 MMU_DTLB_VIOLATION_READ; |
|
390 break; |
|
391 case 1: /* 001 */ |
|
392 case 4: /* 100 */ |
|
393 case 5: /* 101 */ |
|
394 if (rw == 1) |
|
395 n = MMU_DTLB_VIOLATION_WRITE; |
|
396 else |
|
397 *prot = PAGE_READ; |
|
398 break; |
|
399 case 3: /* 011 */ |
|
400 case 6: /* 110 */ |
|
401 case 7: /* 111 */ |
|
402 *prot = (rw == 1)? PAGE_WRITE : PAGE_READ; |
|
403 break; |
|
404 } |
|
405 } else if (n == MMU_DTLB_MISS) { |
|
406 n = (rw == 1) ? MMU_DTLB_MISS_WRITE : |
|
407 MMU_DTLB_MISS_READ; |
|
408 } |
|
409 } |
|
410 if (n >= 0) { |
|
411 *physical = ((matching->ppn << 10) & ~(matching->size - 1)) | |
|
412 (address & (matching->size - 1)); |
|
413 if ((rw == 1) & !matching->d) |
|
414 n = MMU_DTLB_INITIAL_WRITE; |
|
415 else |
|
416 n = MMU_OK; |
|
417 } |
|
418 return n; |
|
419 } |
|
420 |
|
421 int get_physical_address(CPUState * env, target_ulong * physical, |
|
422 int *prot, target_ulong address, |
|
423 int rw, int access_type) |
|
424 { |
|
425 /* P1, P2 and P4 areas do not use translation */ |
|
426 if ((address >= 0x80000000 && address < 0xc0000000) || |
|
427 address >= 0xe0000000) { |
|
428 if (!(env->sr & SR_MD) |
|
429 && (address < 0xe0000000 || address > 0xe4000000)) { |
|
430 /* Unauthorized access in user mode (only store queues are available) */ |
|
431 fprintf(stderr, "Unauthorized access\n"); |
|
432 if (rw == 0) |
|
433 return MMU_DADDR_ERROR_READ; |
|
434 else if (rw == 1) |
|
435 return MMU_DADDR_ERROR_WRITE; |
|
436 else |
|
437 return MMU_IADDR_ERROR; |
|
438 } |
|
439 if (address >= 0x80000000 && address < 0xc0000000) { |
|
440 /* Mask upper 3 bits for P1 and P2 areas */ |
|
441 *physical = address & 0x1fffffff; |
|
442 } else { |
|
443 *physical = address; |
|
444 } |
|
445 *prot = PAGE_READ | PAGE_WRITE; |
|
446 return MMU_OK; |
|
447 } |
|
448 |
|
449 /* If MMU is disabled, return the corresponding physical page */ |
|
450 if (!env->mmucr & MMUCR_AT) { |
|
451 *physical = address & 0x1FFFFFFF; |
|
452 *prot = PAGE_READ | PAGE_WRITE; |
|
453 return MMU_OK; |
|
454 } |
|
455 |
|
456 /* We need to resort to the MMU */ |
|
457 return get_mmu_address(env, physical, prot, address, rw, access_type); |
|
458 } |
|
459 |
|
460 int cpu_sh4_handle_mmu_fault(CPUState * env, target_ulong address, int rw, |
|
461 int mmu_idx, int is_softmmu) |
|
462 { |
|
463 target_ulong physical, page_offset, page_size; |
|
464 int prot, ret, access_type; |
|
465 |
|
466 access_type = ACCESS_INT; |
|
467 ret = |
|
468 get_physical_address(env, &physical, &prot, address, rw, |
|
469 access_type); |
|
470 |
|
471 if (ret != MMU_OK) { |
|
472 env->tea = address; |
|
473 switch (ret) { |
|
474 case MMU_ITLB_MISS: |
|
475 case MMU_DTLB_MISS_READ: |
|
476 env->exception_index = 0x040; |
|
477 break; |
|
478 case MMU_DTLB_MULTIPLE: |
|
479 case MMU_ITLB_MULTIPLE: |
|
480 env->exception_index = 0x140; |
|
481 break; |
|
482 case MMU_ITLB_VIOLATION: |
|
483 env->exception_index = 0x0a0; |
|
484 break; |
|
485 case MMU_DTLB_MISS_WRITE: |
|
486 env->exception_index = 0x060; |
|
487 break; |
|
488 case MMU_DTLB_INITIAL_WRITE: |
|
489 env->exception_index = 0x080; |
|
490 break; |
|
491 case MMU_DTLB_VIOLATION_READ: |
|
492 env->exception_index = 0x0a0; |
|
493 break; |
|
494 case MMU_DTLB_VIOLATION_WRITE: |
|
495 env->exception_index = 0x0c0; |
|
496 break; |
|
497 case MMU_IADDR_ERROR: |
|
498 case MMU_DADDR_ERROR_READ: |
|
499 env->exception_index = 0x0c0; |
|
500 break; |
|
501 case MMU_DADDR_ERROR_WRITE: |
|
502 env->exception_index = 0x100; |
|
503 break; |
|
504 default: |
|
505 assert(0); |
|
506 } |
|
507 return 1; |
|
508 } |
|
509 |
|
510 page_size = TARGET_PAGE_SIZE; |
|
511 page_offset = |
|
512 (address - (address & TARGET_PAGE_MASK)) & ~(page_size - 1); |
|
513 address = (address & TARGET_PAGE_MASK) + page_offset; |
|
514 physical = (physical & TARGET_PAGE_MASK) + page_offset; |
|
515 |
|
516 return tlb_set_page(env, address, physical, prot, mmu_idx, is_softmmu); |
|
517 } |
|
518 |
|
519 target_phys_addr_t cpu_get_phys_page_debug(CPUState * env, target_ulong addr) |
|
520 { |
|
521 target_ulong physical; |
|
522 int prot; |
|
523 |
|
524 get_physical_address(env, &physical, &prot, addr, 0, 0); |
|
525 return physical; |
|
526 } |
|
527 |
|
528 void cpu_load_tlb(CPUState * env) |
|
529 { |
|
530 int n = cpu_mmucr_urc(env->mmucr); |
|
531 tlb_t * entry = &env->utlb[n]; |
|
532 |
|
533 if (entry->v) { |
|
534 /* Overwriting valid entry in utlb. */ |
|
535 target_ulong address = entry->vpn << 10; |
|
536 if (!same_tlb_entry_exists(env->itlb, ITLB_SIZE, entry)) { |
|
537 tlb_flush_page(env, address); |
|
538 } |
|
539 } |
|
540 |
|
541 /* Take values into cpu status from registers. */ |
|
542 entry->asid = (uint8_t)cpu_pteh_asid(env->pteh); |
|
543 entry->vpn = cpu_pteh_vpn(env->pteh); |
|
544 entry->v = (uint8_t)cpu_ptel_v(env->ptel); |
|
545 entry->ppn = cpu_ptel_ppn(env->ptel); |
|
546 entry->sz = (uint8_t)cpu_ptel_sz(env->ptel); |
|
547 switch (entry->sz) { |
|
548 case 0: /* 00 */ |
|
549 entry->size = 1024; /* 1K */ |
|
550 break; |
|
551 case 1: /* 01 */ |
|
552 entry->size = 1024 * 4; /* 4K */ |
|
553 break; |
|
554 case 2: /* 10 */ |
|
555 entry->size = 1024 * 64; /* 64K */ |
|
556 break; |
|
557 case 3: /* 11 */ |
|
558 entry->size = 1024 * 1024; /* 1M */ |
|
559 break; |
|
560 default: |
|
561 assert(0); |
|
562 break; |
|
563 } |
|
564 entry->sh = (uint8_t)cpu_ptel_sh(env->ptel); |
|
565 entry->c = (uint8_t)cpu_ptel_c(env->ptel); |
|
566 entry->pr = (uint8_t)cpu_ptel_pr(env->ptel); |
|
567 entry->d = (uint8_t)cpu_ptel_d(env->ptel); |
|
568 entry->wt = (uint8_t)cpu_ptel_wt(env->ptel); |
|
569 entry->sa = (uint8_t)cpu_ptea_sa(env->ptea); |
|
570 entry->tc = (uint8_t)cpu_ptea_tc(env->ptea); |
|
571 } |
|
572 |
|
573 void cpu_sh4_write_mmaped_utlb_addr(CPUSH4State *s, target_phys_addr_t addr, |
|
574 uint32_t mem_value) |
|
575 { |
|
576 int associate = addr & 0x0000080; |
|
577 uint32_t vpn = (mem_value & 0xfffffc00) >> 10; |
|
578 uint8_t d = (uint8_t)((mem_value & 0x00000200) >> 9); |
|
579 uint8_t v = (uint8_t)((mem_value & 0x00000100) >> 8); |
|
580 uint8_t asid = (uint8_t)(mem_value & 0x000000ff); |
|
581 int use_asid = (s->mmucr & MMUCR_SV) == 0 || (s->sr & SR_MD) == 0; |
|
582 |
|
583 if (associate) { |
|
584 int i; |
|
585 tlb_t * utlb_match_entry = NULL; |
|
586 int needs_tlb_flush = 0; |
|
587 |
|
588 /* search UTLB */ |
|
589 for (i = 0; i < UTLB_SIZE; i++) { |
|
590 tlb_t * entry = &s->utlb[i]; |
|
591 if (!entry->v) |
|
592 continue; |
|
593 |
|
594 if (entry->vpn == vpn |
|
595 && (!use_asid || entry->asid == asid || entry->sh)) { |
|
596 if (utlb_match_entry) { |
|
597 /* Multiple TLB Exception */ |
|
598 s->exception_index = 0x140; |
|
599 s->tea = addr; |
|
600 break; |
|
601 } |
|
602 if (entry->v && !v) |
|
603 needs_tlb_flush = 1; |
|
604 entry->v = v; |
|
605 entry->d = d; |
|
606 utlb_match_entry = entry; |
|
607 } |
|
608 increment_urc(s); /* per utlb access */ |
|
609 } |
|
610 |
|
611 /* search ITLB */ |
|
612 for (i = 0; i < ITLB_SIZE; i++) { |
|
613 tlb_t * entry = &s->itlb[i]; |
|
614 if (entry->vpn == vpn |
|
615 && (!use_asid || entry->asid == asid || entry->sh)) { |
|
616 if (entry->v && !v) |
|
617 needs_tlb_flush = 1; |
|
618 if (utlb_match_entry) |
|
619 *entry = *utlb_match_entry; |
|
620 else |
|
621 entry->v = v; |
|
622 break; |
|
623 } |
|
624 } |
|
625 |
|
626 if (needs_tlb_flush) |
|
627 tlb_flush_page(s, vpn << 10); |
|
628 |
|
629 } else { |
|
630 int index = (addr & 0x00003f00) >> 8; |
|
631 tlb_t * entry = &s->utlb[index]; |
|
632 if (entry->v) { |
|
633 /* Overwriting valid entry in utlb. */ |
|
634 target_ulong address = entry->vpn << 10; |
|
635 if (!same_tlb_entry_exists(s->itlb, ITLB_SIZE, entry)) { |
|
636 tlb_flush_page(s, address); |
|
637 } |
|
638 } |
|
639 entry->asid = asid; |
|
640 entry->vpn = vpn; |
|
641 entry->d = d; |
|
642 entry->v = v; |
|
643 increment_urc(s); |
|
644 } |
|
645 } |
|
646 |
|
647 #endif |