|
1 /* |
|
2 * virtual page mapping and translated block handling |
|
3 * |
|
4 * Copyright (c) 2003 Fabrice Bellard |
|
5 * |
|
6 * This library is free software; you can redistribute it and/or |
|
7 * modify it under the terms of the GNU Lesser General Public |
|
8 * License as published by the Free Software Foundation; either |
|
9 * version 2 of the License, or (at your option) any later version. |
|
10 * |
|
11 * This library is distributed in the hope that it will be useful, |
|
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
|
14 * Lesser General Public License for more details. |
|
15 * |
|
16 * You should have received a copy of the GNU Lesser General Public |
|
17 * License along with this library; if not, write to the Free Software |
|
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
|
19 */ |
|
20 #include "config.h" |
|
21 #ifdef _WIN32 |
|
22 #define WIN32_LEAN_AND_MEAN |
|
23 #include <windows.h> |
|
24 #else |
|
25 #include <sys/types.h> |
|
26 #include <sys/mman.h> |
|
27 #endif |
|
28 #include <stdlib.h> |
|
29 #include <stdio.h> |
|
30 #include <stdarg.h> |
|
31 #include <string.h> |
|
32 #include <errno.h> |
|
33 #include <unistd.h> |
|
34 #include <inttypes.h> |
|
35 |
|
36 #include "cpu.h" |
|
37 #include "exec-all.h" |
|
38 #include "qemu-common.h" |
|
39 #include "tcg.h" |
|
40 #include "hw/hw.h" |
|
41 #include "osdep.h" |
|
42 #include "kvm.h" |
|
43 #if defined(CONFIG_USER_ONLY) |
|
44 #include <qemu.h> |
|
45 #endif |
|
46 |
|
47 //#define DEBUG_TB_INVALIDATE |
|
48 //#define DEBUG_FLUSH |
|
49 //#define DEBUG_TLB |
|
50 //#define DEBUG_UNASSIGNED |
|
51 |
|
52 /* make various TB consistency checks */ |
|
53 //#define DEBUG_TB_CHECK |
|
54 //#define DEBUG_TLB_CHECK |
|
55 |
|
56 //#define DEBUG_IOPORT |
|
57 //#define DEBUG_SUBPAGE |
|
58 |
|
59 #if !defined(CONFIG_USER_ONLY) |
|
60 /* TB consistency checks only implemented for usermode emulation. */ |
|
61 #undef DEBUG_TB_CHECK |
|
62 #endif |
|
63 |
|
64 #define SMC_BITMAP_USE_THRESHOLD 10 |
|
65 |
|
66 #define MMAP_AREA_START 0x00000000 |
|
67 #define MMAP_AREA_END 0xa8000000 |
|
68 |
|
69 #if defined(TARGET_SPARC64) |
|
70 #define TARGET_PHYS_ADDR_SPACE_BITS 41 |
|
71 #elif defined(TARGET_SPARC) |
|
72 #define TARGET_PHYS_ADDR_SPACE_BITS 36 |
|
73 #elif defined(TARGET_ALPHA) |
|
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42 |
|
75 #define TARGET_VIRT_ADDR_SPACE_BITS 42 |
|
76 #elif defined(TARGET_PPC64) |
|
77 #define TARGET_PHYS_ADDR_SPACE_BITS 42 |
|
78 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU) |
|
79 #define TARGET_PHYS_ADDR_SPACE_BITS 42 |
|
80 #elif defined(TARGET_I386) && !defined(USE_KQEMU) |
|
81 #define TARGET_PHYS_ADDR_SPACE_BITS 36 |
|
82 #else |
|
83 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */ |
|
84 #define TARGET_PHYS_ADDR_SPACE_BITS 32 |
|
85 #endif |
|
86 |
|
87 static TranslationBlock *tbs; |
|
88 int code_gen_max_blocks; |
|
89 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE]; |
|
90 static int nb_tbs; |
|
91 /* any access to the tbs or the page table must use this lock */ |
|
92 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED; |
|
93 |
|
94 #if defined(__arm__) || defined(__sparc_v9__) |
|
95 /* The prologue must be reachable with a direct jump. ARM and Sparc64 |
|
96 have limited branch ranges (possibly also PPC) so place it in a |
|
97 section close to code segment. */ |
|
98 #define code_gen_section \ |
|
99 __attribute__((__section__(".gen_code"))) \ |
|
100 __attribute__((aligned (32))) |
|
101 #else |
|
102 #define code_gen_section \ |
|
103 __attribute__((aligned (32))) |
|
104 #endif |
|
105 |
|
106 uint8_t code_gen_prologue[1024] code_gen_section; |
|
107 static uint8_t *code_gen_buffer; |
|
108 static unsigned long code_gen_buffer_size; |
|
109 /* threshold to flush the translated code buffer */ |
|
110 static unsigned long code_gen_buffer_max_size; |
|
111 uint8_t *code_gen_ptr; |
|
112 |
|
113 #if !defined(CONFIG_USER_ONLY) |
|
114 ram_addr_t phys_ram_size; |
|
115 int phys_ram_fd; |
|
116 uint8_t *phys_ram_dirty; |
|
117 static int in_migration; |
|
118 static ram_addr_t phys_ram_alloc_offset = 0; |
|
119 #endif |
|
120 |
|
121 CPUState *first_cpu; |
|
122 /* current CPU in the current thread. It is only valid inside |
|
123 cpu_exec() */ |
|
124 CPUState *cpu_single_env; |
|
125 /* 0 = Do not count executed instructions. |
|
126 1 = Precise instruction counting. |
|
127 2 = Adaptive rate instruction counting. */ |
|
128 int use_icount = 0; |
|
129 /* Current instruction counter. While executing translated code this may |
|
130 include some instructions that have not yet been executed. */ |
|
131 int64_t qemu_icount; |
|
132 |
|
133 typedef struct PageDesc { |
|
134 /* list of TBs intersecting this ram page */ |
|
135 TranslationBlock *first_tb; |
|
136 /* in order to optimize self modifying code, we count the number |
|
137 of lookups we do to a given page to use a bitmap */ |
|
138 unsigned int code_write_count; |
|
139 uint8_t *code_bitmap; |
|
140 #if defined(CONFIG_USER_ONLY) |
|
141 unsigned long flags; |
|
142 #endif |
|
143 } PageDesc; |
|
144 |
|
145 typedef struct PhysPageDesc { |
|
146 /* offset in host memory of the page + io_index in the low bits */ |
|
147 ram_addr_t phys_offset; |
|
148 ram_addr_t region_offset; |
|
149 } PhysPageDesc; |
|
150 |
|
151 #define L2_BITS 10 |
|
152 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS) |
|
153 /* XXX: this is a temporary hack for alpha target. |
|
154 * In the future, this is to be replaced by a multi-level table |
|
155 * to actually be able to handle the complete 64 bits address space. |
|
156 */ |
|
157 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS) |
|
158 #else |
|
159 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS) |
|
160 #endif |
|
161 |
|
162 #define L1_SIZE (1 << L1_BITS) |
|
163 #define L2_SIZE (1 << L2_BITS) |
|
164 |
|
165 unsigned long qemu_real_host_page_size; |
|
166 unsigned long qemu_host_page_bits; |
|
167 unsigned long qemu_host_page_size; |
|
168 unsigned long qemu_host_page_mask; |
|
169 |
|
170 /* XXX: for system emulation, it could just be an array */ |
|
171 static PageDesc *l1_map[L1_SIZE]; |
|
172 static PhysPageDesc **l1_phys_map; |
|
173 |
|
174 #if !defined(CONFIG_USER_ONLY) |
|
175 static void io_mem_init(void); |
|
176 |
|
177 /* io memory support */ |
|
178 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4]; |
|
179 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4]; |
|
180 void *io_mem_opaque[IO_MEM_NB_ENTRIES]; |
|
181 static int io_mem_nb; |
|
182 static int io_mem_watch; |
|
183 #endif |
|
184 |
|
185 /* log support */ |
|
186 static const char *logfilename = "/tmp/qemu.log"; |
|
187 FILE *logfile; |
|
188 int loglevel; |
|
189 static int log_append = 0; |
|
190 |
|
191 /* statistics */ |
|
192 static int tlb_flush_count; |
|
193 static int tb_flush_count; |
|
194 static int tb_phys_invalidate_count; |
|
195 |
|
196 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK) |
|
197 typedef struct subpage_t { |
|
198 target_phys_addr_t base; |
|
199 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4]; |
|
200 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4]; |
|
201 void *opaque[TARGET_PAGE_SIZE][2][4]; |
|
202 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4]; |
|
203 } subpage_t; |
|
204 |
|
205 #ifdef _WIN32 |
|
206 static void map_exec(void *addr, long size) |
|
207 { |
|
208 DWORD old_protect; |
|
209 VirtualProtect(addr, size, |
|
210 PAGE_EXECUTE_READWRITE, &old_protect); |
|
211 |
|
212 } |
|
213 #else |
|
214 static void map_exec(void *addr, long size) |
|
215 { |
|
216 unsigned long start, end, page_size; |
|
217 |
|
218 page_size = getpagesize(); |
|
219 start = (unsigned long)addr; |
|
220 start &= ~(page_size - 1); |
|
221 |
|
222 end = (unsigned long)addr + size; |
|
223 end += page_size - 1; |
|
224 end &= ~(page_size - 1); |
|
225 |
|
226 mprotect((void *)start, end - start, |
|
227 PROT_READ | PROT_WRITE | PROT_EXEC); |
|
228 } |
|
229 #endif |
|
230 |
|
231 static void page_init(void) |
|
232 { |
|
233 /* NOTE: we can always suppose that qemu_host_page_size >= |
|
234 TARGET_PAGE_SIZE */ |
|
235 #ifdef _WIN32 |
|
236 { |
|
237 SYSTEM_INFO system_info; |
|
238 |
|
239 GetSystemInfo(&system_info); |
|
240 qemu_real_host_page_size = system_info.dwPageSize; |
|
241 } |
|
242 #else |
|
243 qemu_real_host_page_size = getpagesize(); |
|
244 #endif |
|
245 if (qemu_host_page_size == 0) |
|
246 qemu_host_page_size = qemu_real_host_page_size; |
|
247 if (qemu_host_page_size < TARGET_PAGE_SIZE) |
|
248 qemu_host_page_size = TARGET_PAGE_SIZE; |
|
249 qemu_host_page_bits = 0; |
|
250 while ((1 << qemu_host_page_bits) < qemu_host_page_size) |
|
251 qemu_host_page_bits++; |
|
252 qemu_host_page_mask = ~(qemu_host_page_size - 1); |
|
253 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *)); |
|
254 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *)); |
|
255 |
|
256 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY) |
|
257 { |
|
258 long long startaddr, endaddr; |
|
259 FILE *f; |
|
260 int n; |
|
261 |
|
262 mmap_lock(); |
|
263 last_brk = (unsigned long)sbrk(0); |
|
264 f = fopen("/proc/self/maps", "r"); |
|
265 if (f) { |
|
266 do { |
|
267 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr); |
|
268 if (n == 2) { |
|
269 startaddr = MIN(startaddr, |
|
270 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1); |
|
271 endaddr = MIN(endaddr, |
|
272 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1); |
|
273 page_set_flags(startaddr & TARGET_PAGE_MASK, |
|
274 TARGET_PAGE_ALIGN(endaddr), |
|
275 PAGE_RESERVED); |
|
276 } |
|
277 } while (!feof(f)); |
|
278 fclose(f); |
|
279 } |
|
280 mmap_unlock(); |
|
281 } |
|
282 #endif |
|
283 } |
|
284 |
|
285 static inline PageDesc **page_l1_map(target_ulong index) |
|
286 { |
|
287 #if TARGET_LONG_BITS > 32 |
|
288 /* Host memory outside guest VM. For 32-bit targets we have already |
|
289 excluded high addresses. */ |
|
290 if (index > ((target_ulong)L2_SIZE * L1_SIZE)) |
|
291 return NULL; |
|
292 #endif |
|
293 return &l1_map[index >> L2_BITS]; |
|
294 } |
|
295 |
|
296 static inline PageDesc *page_find_alloc(target_ulong index) |
|
297 { |
|
298 PageDesc **lp, *p; |
|
299 lp = page_l1_map(index); |
|
300 if (!lp) |
|
301 return NULL; |
|
302 |
|
303 p = *lp; |
|
304 if (!p) { |
|
305 /* allocate if not found */ |
|
306 #if defined(CONFIG_USER_ONLY) |
|
307 size_t len = sizeof(PageDesc) * L2_SIZE; |
|
308 /* Don't use qemu_malloc because it may recurse. */ |
|
309 p = mmap(0, len, PROT_READ | PROT_WRITE, |
|
310 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); |
|
311 *lp = p; |
|
312 if (h2g_valid(p)) { |
|
313 unsigned long addr = h2g(p); |
|
314 page_set_flags(addr & TARGET_PAGE_MASK, |
|
315 TARGET_PAGE_ALIGN(addr + len), |
|
316 PAGE_RESERVED); |
|
317 } |
|
318 #else |
|
319 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE); |
|
320 *lp = p; |
|
321 #endif |
|
322 } |
|
323 return p + (index & (L2_SIZE - 1)); |
|
324 } |
|
325 |
|
326 static inline PageDesc *page_find(target_ulong index) |
|
327 { |
|
328 PageDesc **lp, *p; |
|
329 lp = page_l1_map(index); |
|
330 if (!lp) |
|
331 return NULL; |
|
332 |
|
333 p = *lp; |
|
334 if (!p) |
|
335 return 0; |
|
336 return p + (index & (L2_SIZE - 1)); |
|
337 } |
|
338 |
|
339 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc) |
|
340 { |
|
341 void **lp, **p; |
|
342 PhysPageDesc *pd; |
|
343 |
|
344 p = (void **)l1_phys_map; |
|
345 #if TARGET_PHYS_ADDR_SPACE_BITS > 32 |
|
346 |
|
347 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS) |
|
348 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS |
|
349 #endif |
|
350 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1)); |
|
351 p = *lp; |
|
352 if (!p) { |
|
353 /* allocate if not found */ |
|
354 if (!alloc) |
|
355 return NULL; |
|
356 p = qemu_vmalloc(sizeof(void *) * L1_SIZE); |
|
357 memset(p, 0, sizeof(void *) * L1_SIZE); |
|
358 *lp = p; |
|
359 } |
|
360 #endif |
|
361 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1)); |
|
362 pd = *lp; |
|
363 if (!pd) { |
|
364 int i; |
|
365 /* allocate if not found */ |
|
366 if (!alloc) |
|
367 return NULL; |
|
368 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE); |
|
369 *lp = pd; |
|
370 for (i = 0; i < L2_SIZE; i++) |
|
371 pd[i].phys_offset = IO_MEM_UNASSIGNED; |
|
372 } |
|
373 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1)); |
|
374 } |
|
375 |
|
376 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index) |
|
377 { |
|
378 return phys_page_find_alloc(index, 0); |
|
379 } |
|
380 |
|
381 #if !defined(CONFIG_USER_ONLY) |
|
382 static void tlb_protect_code(ram_addr_t ram_addr); |
|
383 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, |
|
384 target_ulong vaddr); |
|
385 #define mmap_lock() do { } while(0) |
|
386 #define mmap_unlock() do { } while(0) |
|
387 #endif |
|
388 |
|
389 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024) |
|
390 |
|
391 #if defined(CONFIG_USER_ONLY) |
|
392 /* Currently it is not recommanded to allocate big chunks of data in |
|
393 user mode. It will change when a dedicated libc will be used */ |
|
394 #define USE_STATIC_CODE_GEN_BUFFER |
|
395 #endif |
|
396 |
|
397 #ifdef USE_STATIC_CODE_GEN_BUFFER |
|
398 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]; |
|
399 #endif |
|
400 |
|
401 static void code_gen_alloc(unsigned long tb_size) |
|
402 { |
|
403 #ifdef USE_STATIC_CODE_GEN_BUFFER |
|
404 code_gen_buffer = static_code_gen_buffer; |
|
405 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE; |
|
406 map_exec(code_gen_buffer, code_gen_buffer_size); |
|
407 #else |
|
408 code_gen_buffer_size = tb_size; |
|
409 if (code_gen_buffer_size == 0) { |
|
410 #if defined(CONFIG_USER_ONLY) |
|
411 /* in user mode, phys_ram_size is not meaningful */ |
|
412 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE; |
|
413 #else |
|
414 /* XXX: needs ajustments */ |
|
415 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4); |
|
416 #endif |
|
417 } |
|
418 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE) |
|
419 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE; |
|
420 /* The code gen buffer location may have constraints depending on |
|
421 the host cpu and OS */ |
|
422 #if defined(__linux__) |
|
423 { |
|
424 int flags; |
|
425 void *start = NULL; |
|
426 |
|
427 flags = MAP_PRIVATE | MAP_ANONYMOUS; |
|
428 #if defined(__x86_64__) |
|
429 flags |= MAP_32BIT; |
|
430 /* Cannot map more than that */ |
|
431 if (code_gen_buffer_size > (800 * 1024 * 1024)) |
|
432 code_gen_buffer_size = (800 * 1024 * 1024); |
|
433 #elif defined(__sparc_v9__) |
|
434 // Map the buffer below 2G, so we can use direct calls and branches |
|
435 flags |= MAP_FIXED; |
|
436 start = (void *) 0x60000000UL; |
|
437 if (code_gen_buffer_size > (512 * 1024 * 1024)) |
|
438 code_gen_buffer_size = (512 * 1024 * 1024); |
|
439 #elif defined(__arm__) |
|
440 /* Map the buffer below 32M, so we can use direct calls and branches */ |
|
441 flags |= MAP_FIXED; |
|
442 start = (void *) 0x01000000UL; |
|
443 if (code_gen_buffer_size > 16 * 1024 * 1024) |
|
444 code_gen_buffer_size = 16 * 1024 * 1024; |
|
445 #endif |
|
446 code_gen_buffer = mmap(start, code_gen_buffer_size, |
|
447 PROT_WRITE | PROT_READ | PROT_EXEC, |
|
448 flags, -1, 0); |
|
449 if (code_gen_buffer == MAP_FAILED) { |
|
450 fprintf(stderr, "Could not allocate dynamic translator buffer\n"); |
|
451 exit(1); |
|
452 } |
|
453 } |
|
454 #elif defined(__FreeBSD__) |
|
455 { |
|
456 int flags; |
|
457 void *addr = NULL; |
|
458 flags = MAP_PRIVATE | MAP_ANONYMOUS; |
|
459 #if defined(__x86_64__) |
|
460 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume |
|
461 * 0x40000000 is free */ |
|
462 flags |= MAP_FIXED; |
|
463 addr = (void *)0x40000000; |
|
464 /* Cannot map more than that */ |
|
465 if (code_gen_buffer_size > (800 * 1024 * 1024)) |
|
466 code_gen_buffer_size = (800 * 1024 * 1024); |
|
467 #endif |
|
468 code_gen_buffer = mmap(addr, code_gen_buffer_size, |
|
469 PROT_WRITE | PROT_READ | PROT_EXEC, |
|
470 flags, -1, 0); |
|
471 if (code_gen_buffer == MAP_FAILED) { |
|
472 fprintf(stderr, "Could not allocate dynamic translator buffer\n"); |
|
473 exit(1); |
|
474 } |
|
475 } |
|
476 #else |
|
477 code_gen_buffer = qemu_malloc(code_gen_buffer_size); |
|
478 if (!code_gen_buffer) { |
|
479 fprintf(stderr, "Could not allocate dynamic translator buffer\n"); |
|
480 exit(1); |
|
481 } |
|
482 map_exec(code_gen_buffer, code_gen_buffer_size); |
|
483 #endif |
|
484 #endif /* !USE_STATIC_CODE_GEN_BUFFER */ |
|
485 map_exec(code_gen_prologue, sizeof(code_gen_prologue)); |
|
486 code_gen_buffer_max_size = code_gen_buffer_size - |
|
487 code_gen_max_block_size(); |
|
488 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE; |
|
489 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock)); |
|
490 } |
|
491 |
|
492 /* Must be called before using the QEMU cpus. 'tb_size' is the size |
|
493 (in bytes) allocated to the translation buffer. Zero means default |
|
494 size. */ |
|
495 void cpu_exec_init_all(unsigned long tb_size) |
|
496 { |
|
497 cpu_gen_init(); |
|
498 code_gen_alloc(tb_size); |
|
499 code_gen_ptr = code_gen_buffer; |
|
500 page_init(); |
|
501 #if !defined(CONFIG_USER_ONLY) |
|
502 io_mem_init(); |
|
503 #endif |
|
504 } |
|
505 |
|
506 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY) |
|
507 |
|
508 #define CPU_COMMON_SAVE_VERSION 1 |
|
509 |
|
510 static void cpu_common_save(QEMUFile *f, void *opaque) |
|
511 { |
|
512 CPUState *env = opaque; |
|
513 |
|
514 qemu_put_be32s(f, &env->halted); |
|
515 qemu_put_be32s(f, &env->interrupt_request); |
|
516 } |
|
517 |
|
518 static int cpu_common_load(QEMUFile *f, void *opaque, int version_id) |
|
519 { |
|
520 CPUState *env = opaque; |
|
521 |
|
522 if (version_id != CPU_COMMON_SAVE_VERSION) |
|
523 return -EINVAL; |
|
524 |
|
525 qemu_get_be32s(f, &env->halted); |
|
526 qemu_get_be32s(f, &env->interrupt_request); |
|
527 tlb_flush(env, 1); |
|
528 |
|
529 return 0; |
|
530 } |
|
531 #endif |
|
532 |
|
533 void cpu_exec_init(CPUState *env) |
|
534 { |
|
535 CPUState **penv; |
|
536 int cpu_index; |
|
537 |
|
538 env->next_cpu = NULL; |
|
539 penv = &first_cpu; |
|
540 cpu_index = 0; |
|
541 while (*penv != NULL) { |
|
542 penv = (CPUState **)&(*penv)->next_cpu; |
|
543 cpu_index++; |
|
544 } |
|
545 env->cpu_index = cpu_index; |
|
546 TAILQ_INIT(&env->breakpoints); |
|
547 TAILQ_INIT(&env->watchpoints); |
|
548 *penv = env; |
|
549 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY) |
|
550 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION, |
|
551 cpu_common_save, cpu_common_load, env); |
|
552 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION, |
|
553 cpu_save, cpu_load, env); |
|
554 #endif |
|
555 } |
|
556 |
|
557 static inline void invalidate_page_bitmap(PageDesc *p) |
|
558 { |
|
559 if (p->code_bitmap) { |
|
560 qemu_free(p->code_bitmap); |
|
561 p->code_bitmap = NULL; |
|
562 } |
|
563 p->code_write_count = 0; |
|
564 } |
|
565 |
|
566 /* set to NULL all the 'first_tb' fields in all PageDescs */ |
|
567 static void page_flush_tb(void) |
|
568 { |
|
569 int i, j; |
|
570 PageDesc *p; |
|
571 |
|
572 for(i = 0; i < L1_SIZE; i++) { |
|
573 p = l1_map[i]; |
|
574 if (p) { |
|
575 for(j = 0; j < L2_SIZE; j++) { |
|
576 p->first_tb = NULL; |
|
577 invalidate_page_bitmap(p); |
|
578 p++; |
|
579 } |
|
580 } |
|
581 } |
|
582 } |
|
583 |
|
584 /* flush all the translation blocks */ |
|
585 /* XXX: tb_flush is currently not thread safe */ |
|
586 void tb_flush(CPUState *env1) |
|
587 { |
|
588 CPUState *env; |
|
589 #if defined(DEBUG_FLUSH) |
|
590 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n", |
|
591 (unsigned long)(code_gen_ptr - code_gen_buffer), |
|
592 nb_tbs, nb_tbs > 0 ? |
|
593 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0); |
|
594 #endif |
|
595 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size) |
|
596 cpu_abort(env1, "Internal error: code buffer overflow\n"); |
|
597 |
|
598 nb_tbs = 0; |
|
599 |
|
600 for(env = first_cpu; env != NULL; env = env->next_cpu) { |
|
601 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *)); |
|
602 } |
|
603 |
|
604 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *)); |
|
605 page_flush_tb(); |
|
606 |
|
607 code_gen_ptr = code_gen_buffer; |
|
608 /* XXX: flush processor icache at this point if cache flush is |
|
609 expensive */ |
|
610 tb_flush_count++; |
|
611 } |
|
612 |
|
613 #ifdef DEBUG_TB_CHECK |
|
614 |
|
615 static void tb_invalidate_check(target_ulong address) |
|
616 { |
|
617 TranslationBlock *tb; |
|
618 int i; |
|
619 address &= TARGET_PAGE_MASK; |
|
620 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) { |
|
621 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) { |
|
622 if (!(address + TARGET_PAGE_SIZE <= tb->pc || |
|
623 address >= tb->pc + tb->size)) { |
|
624 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n", |
|
625 address, (long)tb->pc, tb->size); |
|
626 } |
|
627 } |
|
628 } |
|
629 } |
|
630 |
|
631 /* verify that all the pages have correct rights for code */ |
|
632 static void tb_page_check(void) |
|
633 { |
|
634 TranslationBlock *tb; |
|
635 int i, flags1, flags2; |
|
636 |
|
637 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) { |
|
638 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) { |
|
639 flags1 = page_get_flags(tb->pc); |
|
640 flags2 = page_get_flags(tb->pc + tb->size - 1); |
|
641 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) { |
|
642 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n", |
|
643 (long)tb->pc, tb->size, flags1, flags2); |
|
644 } |
|
645 } |
|
646 } |
|
647 } |
|
648 |
|
649 static void tb_jmp_check(TranslationBlock *tb) |
|
650 { |
|
651 TranslationBlock *tb1; |
|
652 unsigned int n1; |
|
653 |
|
654 /* suppress any remaining jumps to this TB */ |
|
655 tb1 = tb->jmp_first; |
|
656 for(;;) { |
|
657 n1 = (long)tb1 & 3; |
|
658 tb1 = (TranslationBlock *)((long)tb1 & ~3); |
|
659 if (n1 == 2) |
|
660 break; |
|
661 tb1 = tb1->jmp_next[n1]; |
|
662 } |
|
663 /* check end of list */ |
|
664 if (tb1 != tb) { |
|
665 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb); |
|
666 } |
|
667 } |
|
668 |
|
669 #endif |
|
670 |
|
671 /* invalidate one TB */ |
|
672 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb, |
|
673 int next_offset) |
|
674 { |
|
675 TranslationBlock *tb1; |
|
676 for(;;) { |
|
677 tb1 = *ptb; |
|
678 if (tb1 == tb) { |
|
679 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset); |
|
680 break; |
|
681 } |
|
682 ptb = (TranslationBlock **)((char *)tb1 + next_offset); |
|
683 } |
|
684 } |
|
685 |
|
686 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb) |
|
687 { |
|
688 TranslationBlock *tb1; |
|
689 unsigned int n1; |
|
690 |
|
691 for(;;) { |
|
692 tb1 = *ptb; |
|
693 n1 = (long)tb1 & 3; |
|
694 tb1 = (TranslationBlock *)((long)tb1 & ~3); |
|
695 if (tb1 == tb) { |
|
696 *ptb = tb1->page_next[n1]; |
|
697 break; |
|
698 } |
|
699 ptb = &tb1->page_next[n1]; |
|
700 } |
|
701 } |
|
702 |
|
703 static inline void tb_jmp_remove(TranslationBlock *tb, int n) |
|
704 { |
|
705 TranslationBlock *tb1, **ptb; |
|
706 unsigned int n1; |
|
707 |
|
708 ptb = &tb->jmp_next[n]; |
|
709 tb1 = *ptb; |
|
710 if (tb1) { |
|
711 /* find tb(n) in circular list */ |
|
712 for(;;) { |
|
713 tb1 = *ptb; |
|
714 n1 = (long)tb1 & 3; |
|
715 tb1 = (TranslationBlock *)((long)tb1 & ~3); |
|
716 if (n1 == n && tb1 == tb) |
|
717 break; |
|
718 if (n1 == 2) { |
|
719 ptb = &tb1->jmp_first; |
|
720 } else { |
|
721 ptb = &tb1->jmp_next[n1]; |
|
722 } |
|
723 } |
|
724 /* now we can suppress tb(n) from the list */ |
|
725 *ptb = tb->jmp_next[n]; |
|
726 |
|
727 tb->jmp_next[n] = NULL; |
|
728 } |
|
729 } |
|
730 |
|
731 /* reset the jump entry 'n' of a TB so that it is not chained to |
|
732 another TB */ |
|
733 static inline void tb_reset_jump(TranslationBlock *tb, int n) |
|
734 { |
|
735 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n])); |
|
736 } |
|
737 |
|
738 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr) |
|
739 { |
|
740 CPUState *env; |
|
741 PageDesc *p; |
|
742 unsigned int h, n1; |
|
743 target_phys_addr_t phys_pc; |
|
744 TranslationBlock *tb1, *tb2; |
|
745 |
|
746 /* remove the TB from the hash list */ |
|
747 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); |
|
748 h = tb_phys_hash_func(phys_pc); |
|
749 tb_remove(&tb_phys_hash[h], tb, |
|
750 offsetof(TranslationBlock, phys_hash_next)); |
|
751 |
|
752 /* remove the TB from the page list */ |
|
753 if (tb->page_addr[0] != page_addr) { |
|
754 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); |
|
755 tb_page_remove(&p->first_tb, tb); |
|
756 invalidate_page_bitmap(p); |
|
757 } |
|
758 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) { |
|
759 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); |
|
760 tb_page_remove(&p->first_tb, tb); |
|
761 invalidate_page_bitmap(p); |
|
762 } |
|
763 |
|
764 tb_invalidated_flag = 1; |
|
765 |
|
766 /* remove the TB from the hash list */ |
|
767 h = tb_jmp_cache_hash_func(tb->pc); |
|
768 for(env = first_cpu; env != NULL; env = env->next_cpu) { |
|
769 if (env->tb_jmp_cache[h] == tb) |
|
770 env->tb_jmp_cache[h] = NULL; |
|
771 } |
|
772 |
|
773 /* suppress this TB from the two jump lists */ |
|
774 tb_jmp_remove(tb, 0); |
|
775 tb_jmp_remove(tb, 1); |
|
776 |
|
777 /* suppress any remaining jumps to this TB */ |
|
778 tb1 = tb->jmp_first; |
|
779 for(;;) { |
|
780 n1 = (long)tb1 & 3; |
|
781 if (n1 == 2) |
|
782 break; |
|
783 tb1 = (TranslationBlock *)((long)tb1 & ~3); |
|
784 tb2 = tb1->jmp_next[n1]; |
|
785 tb_reset_jump(tb1, n1); |
|
786 tb1->jmp_next[n1] = NULL; |
|
787 tb1 = tb2; |
|
788 } |
|
789 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */ |
|
790 |
|
791 tb_phys_invalidate_count++; |
|
792 } |
|
793 |
|
794 static inline void set_bits(uint8_t *tab, int start, int len) |
|
795 { |
|
796 int end, mask, end1; |
|
797 |
|
798 end = start + len; |
|
799 tab += start >> 3; |
|
800 mask = 0xff << (start & 7); |
|
801 if ((start & ~7) == (end & ~7)) { |
|
802 if (start < end) { |
|
803 mask &= ~(0xff << (end & 7)); |
|
804 *tab |= mask; |
|
805 } |
|
806 } else { |
|
807 *tab++ |= mask; |
|
808 start = (start + 8) & ~7; |
|
809 end1 = end & ~7; |
|
810 while (start < end1) { |
|
811 *tab++ = 0xff; |
|
812 start += 8; |
|
813 } |
|
814 if (start < end) { |
|
815 mask = ~(0xff << (end & 7)); |
|
816 *tab |= mask; |
|
817 } |
|
818 } |
|
819 } |
|
820 |
|
821 static void build_page_bitmap(PageDesc *p) |
|
822 { |
|
823 int n, tb_start, tb_end; |
|
824 TranslationBlock *tb; |
|
825 |
|
826 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8); |
|
827 if (!p->code_bitmap) |
|
828 return; |
|
829 |
|
830 tb = p->first_tb; |
|
831 while (tb != NULL) { |
|
832 n = (long)tb & 3; |
|
833 tb = (TranslationBlock *)((long)tb & ~3); |
|
834 /* NOTE: this is subtle as a TB may span two physical pages */ |
|
835 if (n == 0) { |
|
836 /* NOTE: tb_end may be after the end of the page, but |
|
837 it is not a problem */ |
|
838 tb_start = tb->pc & ~TARGET_PAGE_MASK; |
|
839 tb_end = tb_start + tb->size; |
|
840 if (tb_end > TARGET_PAGE_SIZE) |
|
841 tb_end = TARGET_PAGE_SIZE; |
|
842 } else { |
|
843 tb_start = 0; |
|
844 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); |
|
845 } |
|
846 set_bits(p->code_bitmap, tb_start, tb_end - tb_start); |
|
847 tb = tb->page_next[n]; |
|
848 } |
|
849 } |
|
850 |
|
851 TranslationBlock *tb_gen_code(CPUState *env, |
|
852 target_ulong pc, target_ulong cs_base, |
|
853 int flags, int cflags) |
|
854 { |
|
855 TranslationBlock *tb; |
|
856 uint8_t *tc_ptr; |
|
857 target_ulong phys_pc, phys_page2, virt_page2; |
|
858 int code_gen_size; |
|
859 |
|
860 phys_pc = get_phys_addr_code(env, pc); |
|
861 tb = tb_alloc(pc); |
|
862 if (!tb) { |
|
863 /* flush must be done */ |
|
864 tb_flush(env); |
|
865 /* cannot fail at this point */ |
|
866 tb = tb_alloc(pc); |
|
867 /* Don't forget to invalidate previous TB info. */ |
|
868 tb_invalidated_flag = 1; |
|
869 } |
|
870 tc_ptr = code_gen_ptr; |
|
871 tb->tc_ptr = tc_ptr; |
|
872 tb->cs_base = cs_base; |
|
873 tb->flags = flags; |
|
874 tb->cflags = cflags; |
|
875 cpu_gen_code(env, tb, &code_gen_size); |
|
876 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1)); |
|
877 |
|
878 /* check next page if needed */ |
|
879 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK; |
|
880 phys_page2 = -1; |
|
881 if ((pc & TARGET_PAGE_MASK) != virt_page2) { |
|
882 phys_page2 = get_phys_addr_code(env, virt_page2); |
|
883 } |
|
884 tb_link_phys(tb, phys_pc, phys_page2); |
|
885 return tb; |
|
886 } |
|
887 |
|
888 /* invalidate all TBs which intersect with the target physical page |
|
889 starting in range [start;end[. NOTE: start and end must refer to |
|
890 the same physical page. 'is_cpu_write_access' should be true if called |
|
891 from a real cpu write access: the virtual CPU will exit the current |
|
892 TB if code is modified inside this TB. */ |
|
893 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end, |
|
894 int is_cpu_write_access) |
|
895 { |
|
896 TranslationBlock *tb, *tb_next, *saved_tb; |
|
897 CPUState *env = cpu_single_env; |
|
898 target_ulong tb_start, tb_end; |
|
899 PageDesc *p; |
|
900 int n; |
|
901 #ifdef TARGET_HAS_PRECISE_SMC |
|
902 int current_tb_not_found = is_cpu_write_access; |
|
903 TranslationBlock *current_tb = NULL; |
|
904 int current_tb_modified = 0; |
|
905 target_ulong current_pc = 0; |
|
906 target_ulong current_cs_base = 0; |
|
907 int current_flags = 0; |
|
908 #endif /* TARGET_HAS_PRECISE_SMC */ |
|
909 |
|
910 p = page_find(start >> TARGET_PAGE_BITS); |
|
911 if (!p) |
|
912 return; |
|
913 if (!p->code_bitmap && |
|
914 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD && |
|
915 is_cpu_write_access) { |
|
916 /* build code bitmap */ |
|
917 build_page_bitmap(p); |
|
918 } |
|
919 |
|
920 /* we remove all the TBs in the range [start, end[ */ |
|
921 /* XXX: see if in some cases it could be faster to invalidate all the code */ |
|
922 tb = p->first_tb; |
|
923 while (tb != NULL) { |
|
924 n = (long)tb & 3; |
|
925 tb = (TranslationBlock *)((long)tb & ~3); |
|
926 tb_next = tb->page_next[n]; |
|
927 /* NOTE: this is subtle as a TB may span two physical pages */ |
|
928 if (n == 0) { |
|
929 /* NOTE: tb_end may be after the end of the page, but |
|
930 it is not a problem */ |
|
931 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); |
|
932 tb_end = tb_start + tb->size; |
|
933 } else { |
|
934 tb_start = tb->page_addr[1]; |
|
935 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); |
|
936 } |
|
937 if (!(tb_end <= start || tb_start >= end)) { |
|
938 #ifdef TARGET_HAS_PRECISE_SMC |
|
939 if (current_tb_not_found) { |
|
940 current_tb_not_found = 0; |
|
941 current_tb = NULL; |
|
942 if (env->mem_io_pc) { |
|
943 /* now we have a real cpu fault */ |
|
944 current_tb = tb_find_pc(env->mem_io_pc); |
|
945 } |
|
946 } |
|
947 if (current_tb == tb && |
|
948 (current_tb->cflags & CF_COUNT_MASK) != 1) { |
|
949 /* If we are modifying the current TB, we must stop |
|
950 its execution. We could be more precise by checking |
|
951 that the modification is after the current PC, but it |
|
952 would require a specialized function to partially |
|
953 restore the CPU state */ |
|
954 |
|
955 current_tb_modified = 1; |
|
956 cpu_restore_state(current_tb, env, |
|
957 env->mem_io_pc, NULL); |
|
958 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, |
|
959 ¤t_flags); |
|
960 } |
|
961 #endif /* TARGET_HAS_PRECISE_SMC */ |
|
962 /* we need to do that to handle the case where a signal |
|
963 occurs while doing tb_phys_invalidate() */ |
|
964 saved_tb = NULL; |
|
965 if (env) { |
|
966 saved_tb = env->current_tb; |
|
967 env->current_tb = NULL; |
|
968 } |
|
969 tb_phys_invalidate(tb, -1); |
|
970 if (env) { |
|
971 env->current_tb = saved_tb; |
|
972 if (env->interrupt_request && env->current_tb) |
|
973 cpu_interrupt(env, env->interrupt_request); |
|
974 } |
|
975 } |
|
976 tb = tb_next; |
|
977 } |
|
978 #if !defined(CONFIG_USER_ONLY) |
|
979 /* if no code remaining, no need to continue to use slow writes */ |
|
980 if (!p->first_tb) { |
|
981 invalidate_page_bitmap(p); |
|
982 if (is_cpu_write_access) { |
|
983 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr); |
|
984 } |
|
985 } |
|
986 #endif |
|
987 #ifdef TARGET_HAS_PRECISE_SMC |
|
988 if (current_tb_modified) { |
|
989 /* we generate a block containing just the instruction |
|
990 modifying the memory. It will ensure that it cannot modify |
|
991 itself */ |
|
992 env->current_tb = NULL; |
|
993 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1); |
|
994 cpu_resume_from_signal(env, NULL); |
|
995 } |
|
996 #endif |
|
997 } |
|
998 |
|
999 /* len must be <= 8 and start must be a multiple of len */ |
|
1000 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len) |
|
1001 { |
|
1002 PageDesc *p; |
|
1003 int offset, b; |
|
1004 #if 0 |
|
1005 if (1) { |
|
1006 if (loglevel) { |
|
1007 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n", |
|
1008 cpu_single_env->mem_io_vaddr, len, |
|
1009 cpu_single_env->eip, |
|
1010 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base); |
|
1011 } |
|
1012 } |
|
1013 #endif |
|
1014 p = page_find(start >> TARGET_PAGE_BITS); |
|
1015 if (!p) |
|
1016 return; |
|
1017 if (p->code_bitmap) { |
|
1018 offset = start & ~TARGET_PAGE_MASK; |
|
1019 b = p->code_bitmap[offset >> 3] >> (offset & 7); |
|
1020 if (b & ((1 << len) - 1)) |
|
1021 goto do_invalidate; |
|
1022 } else { |
|
1023 do_invalidate: |
|
1024 tb_invalidate_phys_page_range(start, start + len, 1); |
|
1025 } |
|
1026 } |
|
1027 |
|
1028 #if !defined(CONFIG_SOFTMMU) |
|
1029 static void tb_invalidate_phys_page(target_phys_addr_t addr, |
|
1030 unsigned long pc, void *puc) |
|
1031 { |
|
1032 TranslationBlock *tb; |
|
1033 PageDesc *p; |
|
1034 int n; |
|
1035 #ifdef TARGET_HAS_PRECISE_SMC |
|
1036 TranslationBlock *current_tb = NULL; |
|
1037 CPUState *env = cpu_single_env; |
|
1038 int current_tb_modified = 0; |
|
1039 target_ulong current_pc = 0; |
|
1040 target_ulong current_cs_base = 0; |
|
1041 int current_flags = 0; |
|
1042 #endif |
|
1043 |
|
1044 addr &= TARGET_PAGE_MASK; |
|
1045 p = page_find(addr >> TARGET_PAGE_BITS); |
|
1046 if (!p) |
|
1047 return; |
|
1048 tb = p->first_tb; |
|
1049 #ifdef TARGET_HAS_PRECISE_SMC |
|
1050 if (tb && pc != 0) { |
|
1051 current_tb = tb_find_pc(pc); |
|
1052 } |
|
1053 #endif |
|
1054 while (tb != NULL) { |
|
1055 n = (long)tb & 3; |
|
1056 tb = (TranslationBlock *)((long)tb & ~3); |
|
1057 #ifdef TARGET_HAS_PRECISE_SMC |
|
1058 if (current_tb == tb && |
|
1059 (current_tb->cflags & CF_COUNT_MASK) != 1) { |
|
1060 /* If we are modifying the current TB, we must stop |
|
1061 its execution. We could be more precise by checking |
|
1062 that the modification is after the current PC, but it |
|
1063 would require a specialized function to partially |
|
1064 restore the CPU state */ |
|
1065 |
|
1066 current_tb_modified = 1; |
|
1067 cpu_restore_state(current_tb, env, pc, puc); |
|
1068 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, |
|
1069 ¤t_flags); |
|
1070 } |
|
1071 #endif /* TARGET_HAS_PRECISE_SMC */ |
|
1072 tb_phys_invalidate(tb, addr); |
|
1073 tb = tb->page_next[n]; |
|
1074 } |
|
1075 p->first_tb = NULL; |
|
1076 #ifdef TARGET_HAS_PRECISE_SMC |
|
1077 if (current_tb_modified) { |
|
1078 /* we generate a block containing just the instruction |
|
1079 modifying the memory. It will ensure that it cannot modify |
|
1080 itself */ |
|
1081 env->current_tb = NULL; |
|
1082 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1); |
|
1083 cpu_resume_from_signal(env, puc); |
|
1084 } |
|
1085 #endif |
|
1086 } |
|
1087 #endif |
|
1088 |
|
1089 /* add the tb in the target page and protect it if necessary */ |
|
1090 static inline void tb_alloc_page(TranslationBlock *tb, |
|
1091 unsigned int n, target_ulong page_addr) |
|
1092 { |
|
1093 PageDesc *p; |
|
1094 TranslationBlock *last_first_tb; |
|
1095 |
|
1096 tb->page_addr[n] = page_addr; |
|
1097 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS); |
|
1098 tb->page_next[n] = p->first_tb; |
|
1099 last_first_tb = p->first_tb; |
|
1100 p->first_tb = (TranslationBlock *)((long)tb | n); |
|
1101 invalidate_page_bitmap(p); |
|
1102 |
|
1103 #if defined(TARGET_HAS_SMC) || 1 |
|
1104 |
|
1105 #if defined(CONFIG_USER_ONLY) |
|
1106 if (p->flags & PAGE_WRITE) { |
|
1107 target_ulong addr; |
|
1108 PageDesc *p2; |
|
1109 int prot; |
|
1110 |
|
1111 /* force the host page as non writable (writes will have a |
|
1112 page fault + mprotect overhead) */ |
|
1113 page_addr &= qemu_host_page_mask; |
|
1114 prot = 0; |
|
1115 for(addr = page_addr; addr < page_addr + qemu_host_page_size; |
|
1116 addr += TARGET_PAGE_SIZE) { |
|
1117 |
|
1118 p2 = page_find (addr >> TARGET_PAGE_BITS); |
|
1119 if (!p2) |
|
1120 continue; |
|
1121 prot |= p2->flags; |
|
1122 p2->flags &= ~PAGE_WRITE; |
|
1123 page_get_flags(addr); |
|
1124 } |
|
1125 mprotect(g2h(page_addr), qemu_host_page_size, |
|
1126 (prot & PAGE_BITS) & ~PAGE_WRITE); |
|
1127 #ifdef DEBUG_TB_INVALIDATE |
|
1128 printf("protecting code page: 0x" TARGET_FMT_lx "\n", |
|
1129 page_addr); |
|
1130 #endif |
|
1131 } |
|
1132 #else |
|
1133 /* if some code is already present, then the pages are already |
|
1134 protected. So we handle the case where only the first TB is |
|
1135 allocated in a physical page */ |
|
1136 if (!last_first_tb) { |
|
1137 tlb_protect_code(page_addr); |
|
1138 } |
|
1139 #endif |
|
1140 |
|
1141 #endif /* TARGET_HAS_SMC */ |
|
1142 } |
|
1143 |
|
1144 /* Allocate a new translation block. Flush the translation buffer if |
|
1145 too many translation blocks or too much generated code. */ |
|
1146 TranslationBlock *tb_alloc(target_ulong pc) |
|
1147 { |
|
1148 TranslationBlock *tb; |
|
1149 |
|
1150 if (nb_tbs >= code_gen_max_blocks || |
|
1151 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size) |
|
1152 return NULL; |
|
1153 tb = &tbs[nb_tbs++]; |
|
1154 tb->pc = pc; |
|
1155 tb->cflags = 0; |
|
1156 return tb; |
|
1157 } |
|
1158 |
|
1159 void tb_free(TranslationBlock *tb) |
|
1160 { |
|
1161 /* In practice this is mostly used for single use temporary TB |
|
1162 Ignore the hard cases and just back up if this TB happens to |
|
1163 be the last one generated. */ |
|
1164 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) { |
|
1165 code_gen_ptr = tb->tc_ptr; |
|
1166 nb_tbs--; |
|
1167 } |
|
1168 } |
|
1169 |
|
1170 /* add a new TB and link it to the physical page tables. phys_page2 is |
|
1171 (-1) to indicate that only one page contains the TB. */ |
|
1172 void tb_link_phys(TranslationBlock *tb, |
|
1173 target_ulong phys_pc, target_ulong phys_page2) |
|
1174 { |
|
1175 unsigned int h; |
|
1176 TranslationBlock **ptb; |
|
1177 |
|
1178 /* Grab the mmap lock to stop another thread invalidating this TB |
|
1179 before we are done. */ |
|
1180 mmap_lock(); |
|
1181 /* add in the physical hash table */ |
|
1182 h = tb_phys_hash_func(phys_pc); |
|
1183 ptb = &tb_phys_hash[h]; |
|
1184 tb->phys_hash_next = *ptb; |
|
1185 *ptb = tb; |
|
1186 |
|
1187 /* add in the page list */ |
|
1188 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK); |
|
1189 if (phys_page2 != -1) |
|
1190 tb_alloc_page(tb, 1, phys_page2); |
|
1191 else |
|
1192 tb->page_addr[1] = -1; |
|
1193 |
|
1194 tb->jmp_first = (TranslationBlock *)((long)tb | 2); |
|
1195 tb->jmp_next[0] = NULL; |
|
1196 tb->jmp_next[1] = NULL; |
|
1197 |
|
1198 /* init original jump addresses */ |
|
1199 if (tb->tb_next_offset[0] != 0xffff) |
|
1200 tb_reset_jump(tb, 0); |
|
1201 if (tb->tb_next_offset[1] != 0xffff) |
|
1202 tb_reset_jump(tb, 1); |
|
1203 |
|
1204 #ifdef DEBUG_TB_CHECK |
|
1205 tb_page_check(); |
|
1206 #endif |
|
1207 mmap_unlock(); |
|
1208 } |
|
1209 |
|
1210 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr < |
|
1211 tb[1].tc_ptr. Return NULL if not found */ |
|
1212 TranslationBlock *tb_find_pc(unsigned long tc_ptr) |
|
1213 { |
|
1214 int m_min, m_max, m; |
|
1215 unsigned long v; |
|
1216 TranslationBlock *tb; |
|
1217 |
|
1218 if (nb_tbs <= 0) |
|
1219 return NULL; |
|
1220 if (tc_ptr < (unsigned long)code_gen_buffer || |
|
1221 tc_ptr >= (unsigned long)code_gen_ptr) |
|
1222 return NULL; |
|
1223 /* binary search (cf Knuth) */ |
|
1224 m_min = 0; |
|
1225 m_max = nb_tbs - 1; |
|
1226 while (m_min <= m_max) { |
|
1227 m = (m_min + m_max) >> 1; |
|
1228 tb = &tbs[m]; |
|
1229 v = (unsigned long)tb->tc_ptr; |
|
1230 if (v == tc_ptr) |
|
1231 return tb; |
|
1232 else if (tc_ptr < v) { |
|
1233 m_max = m - 1; |
|
1234 } else { |
|
1235 m_min = m + 1; |
|
1236 } |
|
1237 } |
|
1238 return &tbs[m_max]; |
|
1239 } |
|
1240 |
|
1241 static void tb_reset_jump_recursive(TranslationBlock *tb); |
|
1242 |
|
1243 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n) |
|
1244 { |
|
1245 TranslationBlock *tb1, *tb_next, **ptb; |
|
1246 unsigned int n1; |
|
1247 |
|
1248 tb1 = tb->jmp_next[n]; |
|
1249 if (tb1 != NULL) { |
|
1250 /* find head of list */ |
|
1251 for(;;) { |
|
1252 n1 = (long)tb1 & 3; |
|
1253 tb1 = (TranslationBlock *)((long)tb1 & ~3); |
|
1254 if (n1 == 2) |
|
1255 break; |
|
1256 tb1 = tb1->jmp_next[n1]; |
|
1257 } |
|
1258 /* we are now sure now that tb jumps to tb1 */ |
|
1259 tb_next = tb1; |
|
1260 |
|
1261 /* remove tb from the jmp_first list */ |
|
1262 ptb = &tb_next->jmp_first; |
|
1263 for(;;) { |
|
1264 tb1 = *ptb; |
|
1265 n1 = (long)tb1 & 3; |
|
1266 tb1 = (TranslationBlock *)((long)tb1 & ~3); |
|
1267 if (n1 == n && tb1 == tb) |
|
1268 break; |
|
1269 ptb = &tb1->jmp_next[n1]; |
|
1270 } |
|
1271 *ptb = tb->jmp_next[n]; |
|
1272 tb->jmp_next[n] = NULL; |
|
1273 |
|
1274 /* suppress the jump to next tb in generated code */ |
|
1275 tb_reset_jump(tb, n); |
|
1276 |
|
1277 /* suppress jumps in the tb on which we could have jumped */ |
|
1278 tb_reset_jump_recursive(tb_next); |
|
1279 } |
|
1280 } |
|
1281 |
|
1282 static void tb_reset_jump_recursive(TranslationBlock *tb) |
|
1283 { |
|
1284 tb_reset_jump_recursive2(tb, 0); |
|
1285 tb_reset_jump_recursive2(tb, 1); |
|
1286 } |
|
1287 |
|
1288 #if defined(TARGET_HAS_ICE) |
|
1289 static void breakpoint_invalidate(CPUState *env, target_ulong pc) |
|
1290 { |
|
1291 target_phys_addr_t addr; |
|
1292 target_ulong pd; |
|
1293 ram_addr_t ram_addr; |
|
1294 PhysPageDesc *p; |
|
1295 |
|
1296 addr = cpu_get_phys_page_debug(env, pc); |
|
1297 p = phys_page_find(addr >> TARGET_PAGE_BITS); |
|
1298 if (!p) { |
|
1299 pd = IO_MEM_UNASSIGNED; |
|
1300 } else { |
|
1301 pd = p->phys_offset; |
|
1302 } |
|
1303 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK); |
|
1304 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0); |
|
1305 } |
|
1306 #endif |
|
1307 |
|
1308 /* Add a watchpoint. */ |
|
1309 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len, |
|
1310 int flags, CPUWatchpoint **watchpoint) |
|
1311 { |
|
1312 target_ulong len_mask = ~(len - 1); |
|
1313 CPUWatchpoint *wp; |
|
1314 |
|
1315 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */ |
|
1316 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) { |
|
1317 fprintf(stderr, "qemu: tried to set invalid watchpoint at " |
|
1318 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len); |
|
1319 return -EINVAL; |
|
1320 } |
|
1321 wp = qemu_malloc(sizeof(*wp)); |
|
1322 if (!wp) |
|
1323 return -ENOMEM; |
|
1324 |
|
1325 wp->vaddr = addr; |
|
1326 wp->len_mask = len_mask; |
|
1327 wp->flags = flags; |
|
1328 |
|
1329 /* keep all GDB-injected watchpoints in front */ |
|
1330 if (flags & BP_GDB) |
|
1331 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry); |
|
1332 else |
|
1333 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry); |
|
1334 |
|
1335 tlb_flush_page(env, addr); |
|
1336 |
|
1337 if (watchpoint) |
|
1338 *watchpoint = wp; |
|
1339 return 0; |
|
1340 } |
|
1341 |
|
1342 /* Remove a specific watchpoint. */ |
|
1343 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len, |
|
1344 int flags) |
|
1345 { |
|
1346 target_ulong len_mask = ~(len - 1); |
|
1347 CPUWatchpoint *wp; |
|
1348 |
|
1349 TAILQ_FOREACH(wp, &env->watchpoints, entry) { |
|
1350 if (addr == wp->vaddr && len_mask == wp->len_mask |
|
1351 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) { |
|
1352 cpu_watchpoint_remove_by_ref(env, wp); |
|
1353 return 0; |
|
1354 } |
|
1355 } |
|
1356 return -ENOENT; |
|
1357 } |
|
1358 |
|
1359 /* Remove a specific watchpoint by reference. */ |
|
1360 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint) |
|
1361 { |
|
1362 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry); |
|
1363 |
|
1364 tlb_flush_page(env, watchpoint->vaddr); |
|
1365 |
|
1366 qemu_free(watchpoint); |
|
1367 } |
|
1368 |
|
1369 /* Remove all matching watchpoints. */ |
|
1370 void cpu_watchpoint_remove_all(CPUState *env, int mask) |
|
1371 { |
|
1372 CPUWatchpoint *wp, *next; |
|
1373 |
|
1374 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) { |
|
1375 if (wp->flags & mask) |
|
1376 cpu_watchpoint_remove_by_ref(env, wp); |
|
1377 } |
|
1378 } |
|
1379 |
|
1380 /* Add a breakpoint. */ |
|
1381 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags, |
|
1382 CPUBreakpoint **breakpoint) |
|
1383 { |
|
1384 #if defined(TARGET_HAS_ICE) |
|
1385 CPUBreakpoint *bp; |
|
1386 |
|
1387 bp = qemu_malloc(sizeof(*bp)); |
|
1388 if (!bp) |
|
1389 return -ENOMEM; |
|
1390 |
|
1391 bp->pc = pc; |
|
1392 bp->flags = flags; |
|
1393 |
|
1394 /* keep all GDB-injected breakpoints in front */ |
|
1395 if (flags & BP_GDB) |
|
1396 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry); |
|
1397 else |
|
1398 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry); |
|
1399 |
|
1400 breakpoint_invalidate(env, pc); |
|
1401 |
|
1402 if (breakpoint) |
|
1403 *breakpoint = bp; |
|
1404 return 0; |
|
1405 #else |
|
1406 return -ENOSYS; |
|
1407 #endif |
|
1408 } |
|
1409 |
|
1410 /* Remove a specific breakpoint. */ |
|
1411 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags) |
|
1412 { |
|
1413 #if defined(TARGET_HAS_ICE) |
|
1414 CPUBreakpoint *bp; |
|
1415 |
|
1416 TAILQ_FOREACH(bp, &env->breakpoints, entry) { |
|
1417 if (bp->pc == pc && bp->flags == flags) { |
|
1418 cpu_breakpoint_remove_by_ref(env, bp); |
|
1419 return 0; |
|
1420 } |
|
1421 } |
|
1422 return -ENOENT; |
|
1423 #else |
|
1424 return -ENOSYS; |
|
1425 #endif |
|
1426 } |
|
1427 |
|
1428 /* Remove a specific breakpoint by reference. */ |
|
1429 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint) |
|
1430 { |
|
1431 #if defined(TARGET_HAS_ICE) |
|
1432 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry); |
|
1433 |
|
1434 breakpoint_invalidate(env, breakpoint->pc); |
|
1435 |
|
1436 qemu_free(breakpoint); |
|
1437 #endif |
|
1438 } |
|
1439 |
|
1440 /* Remove all matching breakpoints. */ |
|
1441 void cpu_breakpoint_remove_all(CPUState *env, int mask) |
|
1442 { |
|
1443 #if defined(TARGET_HAS_ICE) |
|
1444 CPUBreakpoint *bp, *next; |
|
1445 |
|
1446 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) { |
|
1447 if (bp->flags & mask) |
|
1448 cpu_breakpoint_remove_by_ref(env, bp); |
|
1449 } |
|
1450 #endif |
|
1451 } |
|
1452 |
|
1453 /* enable or disable single step mode. EXCP_DEBUG is returned by the |
|
1454 CPU loop after each instruction */ |
|
1455 void cpu_single_step(CPUState *env, int enabled) |
|
1456 { |
|
1457 #if defined(TARGET_HAS_ICE) |
|
1458 if (env->singlestep_enabled != enabled) { |
|
1459 env->singlestep_enabled = enabled; |
|
1460 /* must flush all the translated code to avoid inconsistancies */ |
|
1461 /* XXX: only flush what is necessary */ |
|
1462 tb_flush(env); |
|
1463 } |
|
1464 #endif |
|
1465 } |
|
1466 |
|
1467 /* enable or disable low levels log */ |
|
1468 void cpu_set_log(int log_flags) |
|
1469 { |
|
1470 loglevel = log_flags; |
|
1471 if (loglevel && !logfile) { |
|
1472 logfile = fopen(logfilename, log_append ? "a" : "w"); |
|
1473 if (!logfile) { |
|
1474 perror(logfilename); |
|
1475 _exit(1); |
|
1476 } |
|
1477 #if !defined(CONFIG_SOFTMMU) |
|
1478 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */ |
|
1479 { |
|
1480 static char logfile_buf[4096]; |
|
1481 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf)); |
|
1482 } |
|
1483 #else |
|
1484 setvbuf(logfile, NULL, _IOLBF, 0); |
|
1485 #endif |
|
1486 log_append = 1; |
|
1487 } |
|
1488 if (!loglevel && logfile) { |
|
1489 fclose(logfile); |
|
1490 logfile = NULL; |
|
1491 } |
|
1492 } |
|
1493 |
|
1494 void cpu_set_log_filename(const char *filename) |
|
1495 { |
|
1496 logfilename = strdup(filename); |
|
1497 if (logfile) { |
|
1498 fclose(logfile); |
|
1499 logfile = NULL; |
|
1500 } |
|
1501 cpu_set_log(loglevel); |
|
1502 } |
|
1503 |
|
1504 /* mask must never be zero, except for A20 change call */ |
|
1505 void cpu_interrupt(CPUState *env, int mask) |
|
1506 { |
|
1507 #if !defined(USE_NPTL) |
|
1508 TranslationBlock *tb; |
|
1509 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED; |
|
1510 #endif |
|
1511 int old_mask; |
|
1512 |
|
1513 old_mask = env->interrupt_request; |
|
1514 /* FIXME: This is probably not threadsafe. A different thread could |
|
1515 be in the middle of a read-modify-write operation. */ |
|
1516 env->interrupt_request |= mask; |
|
1517 #if defined(USE_NPTL) |
|
1518 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the |
|
1519 problem and hope the cpu will stop of its own accord. For userspace |
|
1520 emulation this often isn't actually as bad as it sounds. Often |
|
1521 signals are used primarily to interrupt blocking syscalls. */ |
|
1522 #else |
|
1523 if (use_icount) { |
|
1524 env->icount_decr.u16.high = 0xffff; |
|
1525 #ifndef CONFIG_USER_ONLY |
|
1526 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means |
|
1527 an async event happened and we need to process it. */ |
|
1528 if (!can_do_io(env) |
|
1529 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) { |
|
1530 cpu_abort(env, "Raised interrupt while not in I/O function"); |
|
1531 } |
|
1532 #endif |
|
1533 } else { |
|
1534 tb = env->current_tb; |
|
1535 /* if the cpu is currently executing code, we must unlink it and |
|
1536 all the potentially executing TB */ |
|
1537 if (tb && !testandset(&interrupt_lock)) { |
|
1538 env->current_tb = NULL; |
|
1539 tb_reset_jump_recursive(tb); |
|
1540 resetlock(&interrupt_lock); |
|
1541 } |
|
1542 } |
|
1543 #endif |
|
1544 } |
|
1545 |
|
1546 void cpu_reset_interrupt(CPUState *env, int mask) |
|
1547 { |
|
1548 env->interrupt_request &= ~mask; |
|
1549 } |
|
1550 |
|
1551 const CPULogItem cpu_log_items[] = { |
|
1552 { CPU_LOG_TB_OUT_ASM, "out_asm", |
|
1553 "show generated host assembly code for each compiled TB" }, |
|
1554 { CPU_LOG_TB_IN_ASM, "in_asm", |
|
1555 "show target assembly code for each compiled TB" }, |
|
1556 { CPU_LOG_TB_OP, "op", |
|
1557 "show micro ops for each compiled TB" }, |
|
1558 { CPU_LOG_TB_OP_OPT, "op_opt", |
|
1559 "show micro ops " |
|
1560 #ifdef TARGET_I386 |
|
1561 "before eflags optimization and " |
|
1562 #endif |
|
1563 "after liveness analysis" }, |
|
1564 { CPU_LOG_INT, "int", |
|
1565 "show interrupts/exceptions in short format" }, |
|
1566 { CPU_LOG_EXEC, "exec", |
|
1567 "show trace before each executed TB (lots of logs)" }, |
|
1568 { CPU_LOG_TB_CPU, "cpu", |
|
1569 "show CPU state before block translation" }, |
|
1570 #ifdef TARGET_I386 |
|
1571 { CPU_LOG_PCALL, "pcall", |
|
1572 "show protected mode far calls/returns/exceptions" }, |
|
1573 #endif |
|
1574 #ifdef DEBUG_IOPORT |
|
1575 { CPU_LOG_IOPORT, "ioport", |
|
1576 "show all i/o ports accesses" }, |
|
1577 #endif |
|
1578 { 0, NULL, NULL }, |
|
1579 }; |
|
1580 |
|
1581 static int cmp1(const char *s1, int n, const char *s2) |
|
1582 { |
|
1583 if (strlen(s2) != n) |
|
1584 return 0; |
|
1585 return memcmp(s1, s2, n) == 0; |
|
1586 } |
|
1587 |
|
1588 /* takes a comma separated list of log masks. Return 0 if error. */ |
|
1589 int cpu_str_to_log_mask(const char *str) |
|
1590 { |
|
1591 const CPULogItem *item; |
|
1592 int mask; |
|
1593 const char *p, *p1; |
|
1594 |
|
1595 p = str; |
|
1596 mask = 0; |
|
1597 for(;;) { |
|
1598 p1 = strchr(p, ','); |
|
1599 if (!p1) |
|
1600 p1 = p + strlen(p); |
|
1601 if(cmp1(p,p1-p,"all")) { |
|
1602 for(item = cpu_log_items; item->mask != 0; item++) { |
|
1603 mask |= item->mask; |
|
1604 } |
|
1605 } else { |
|
1606 for(item = cpu_log_items; item->mask != 0; item++) { |
|
1607 if (cmp1(p, p1 - p, item->name)) |
|
1608 goto found; |
|
1609 } |
|
1610 return 0; |
|
1611 } |
|
1612 found: |
|
1613 mask |= item->mask; |
|
1614 if (*p1 != ',') |
|
1615 break; |
|
1616 p = p1 + 1; |
|
1617 } |
|
1618 return mask; |
|
1619 } |
|
1620 |
|
1621 void cpu_abort(CPUState *env, const char *fmt, ...) |
|
1622 { |
|
1623 va_list ap; |
|
1624 va_list ap2; |
|
1625 |
|
1626 va_start(ap, fmt); |
|
1627 va_copy(ap2, ap); |
|
1628 fprintf(stderr, "qemu: fatal: "); |
|
1629 vfprintf(stderr, fmt, ap); |
|
1630 fprintf(stderr, "\n"); |
|
1631 #ifdef TARGET_I386 |
|
1632 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP); |
|
1633 #else |
|
1634 cpu_dump_state(env, stderr, fprintf, 0); |
|
1635 #endif |
|
1636 if (logfile) { |
|
1637 fprintf(logfile, "qemu: fatal: "); |
|
1638 vfprintf(logfile, fmt, ap2); |
|
1639 fprintf(logfile, "\n"); |
|
1640 #ifdef TARGET_I386 |
|
1641 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP); |
|
1642 #else |
|
1643 cpu_dump_state(env, logfile, fprintf, 0); |
|
1644 #endif |
|
1645 fflush(logfile); |
|
1646 fclose(logfile); |
|
1647 } |
|
1648 va_end(ap2); |
|
1649 va_end(ap); |
|
1650 abort(); |
|
1651 } |
|
1652 |
|
1653 CPUState *cpu_copy(CPUState *env) |
|
1654 { |
|
1655 CPUState *new_env = cpu_init(env->cpu_model_str); |
|
1656 /* preserve chaining and index */ |
|
1657 CPUState *next_cpu = new_env->next_cpu; |
|
1658 int cpu_index = new_env->cpu_index; |
|
1659 memcpy(new_env, env, sizeof(CPUState)); |
|
1660 new_env->next_cpu = next_cpu; |
|
1661 new_env->cpu_index = cpu_index; |
|
1662 return new_env; |
|
1663 } |
|
1664 |
|
1665 #if !defined(CONFIG_USER_ONLY) |
|
1666 |
|
1667 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr) |
|
1668 { |
|
1669 unsigned int i; |
|
1670 |
|
1671 /* Discard jump cache entries for any tb which might potentially |
|
1672 overlap the flushed page. */ |
|
1673 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE); |
|
1674 memset (&env->tb_jmp_cache[i], 0, |
|
1675 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *)); |
|
1676 |
|
1677 i = tb_jmp_cache_hash_page(addr); |
|
1678 memset (&env->tb_jmp_cache[i], 0, |
|
1679 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *)); |
|
1680 } |
|
1681 |
|
1682 /* NOTE: if flush_global is true, also flush global entries (not |
|
1683 implemented yet) */ |
|
1684 void tlb_flush(CPUState *env, int flush_global) |
|
1685 { |
|
1686 int i; |
|
1687 |
|
1688 #if defined(DEBUG_TLB) |
|
1689 printf("tlb_flush:\n"); |
|
1690 #endif |
|
1691 /* must reset current TB so that interrupts cannot modify the |
|
1692 links while we are modifying them */ |
|
1693 env->current_tb = NULL; |
|
1694 |
|
1695 for(i = 0; i < CPU_TLB_SIZE; i++) { |
|
1696 env->tlb_table[0][i].addr_read = -1; |
|
1697 env->tlb_table[0][i].addr_write = -1; |
|
1698 env->tlb_table[0][i].addr_code = -1; |
|
1699 env->tlb_table[1][i].addr_read = -1; |
|
1700 env->tlb_table[1][i].addr_write = -1; |
|
1701 env->tlb_table[1][i].addr_code = -1; |
|
1702 #if (NB_MMU_MODES >= 3) |
|
1703 env->tlb_table[2][i].addr_read = -1; |
|
1704 env->tlb_table[2][i].addr_write = -1; |
|
1705 env->tlb_table[2][i].addr_code = -1; |
|
1706 #if (NB_MMU_MODES == 4) |
|
1707 env->tlb_table[3][i].addr_read = -1; |
|
1708 env->tlb_table[3][i].addr_write = -1; |
|
1709 env->tlb_table[3][i].addr_code = -1; |
|
1710 #endif |
|
1711 #endif |
|
1712 } |
|
1713 |
|
1714 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *)); |
|
1715 |
|
1716 #ifdef USE_KQEMU |
|
1717 if (env->kqemu_enabled) { |
|
1718 kqemu_flush(env, flush_global); |
|
1719 } |
|
1720 #endif |
|
1721 tlb_flush_count++; |
|
1722 } |
|
1723 |
|
1724 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr) |
|
1725 { |
|
1726 if (addr == (tlb_entry->addr_read & |
|
1727 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) || |
|
1728 addr == (tlb_entry->addr_write & |
|
1729 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) || |
|
1730 addr == (tlb_entry->addr_code & |
|
1731 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { |
|
1732 tlb_entry->addr_read = -1; |
|
1733 tlb_entry->addr_write = -1; |
|
1734 tlb_entry->addr_code = -1; |
|
1735 } |
|
1736 } |
|
1737 |
|
1738 void tlb_flush_page(CPUState *env, target_ulong addr) |
|
1739 { |
|
1740 int i; |
|
1741 |
|
1742 #if defined(DEBUG_TLB) |
|
1743 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr); |
|
1744 #endif |
|
1745 /* must reset current TB so that interrupts cannot modify the |
|
1746 links while we are modifying them */ |
|
1747 env->current_tb = NULL; |
|
1748 |
|
1749 addr &= TARGET_PAGE_MASK; |
|
1750 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
|
1751 tlb_flush_entry(&env->tlb_table[0][i], addr); |
|
1752 tlb_flush_entry(&env->tlb_table[1][i], addr); |
|
1753 #if (NB_MMU_MODES >= 3) |
|
1754 tlb_flush_entry(&env->tlb_table[2][i], addr); |
|
1755 #if (NB_MMU_MODES == 4) |
|
1756 tlb_flush_entry(&env->tlb_table[3][i], addr); |
|
1757 #endif |
|
1758 #endif |
|
1759 |
|
1760 tlb_flush_jmp_cache(env, addr); |
|
1761 |
|
1762 #ifdef USE_KQEMU |
|
1763 if (env->kqemu_enabled) { |
|
1764 kqemu_flush_page(env, addr); |
|
1765 } |
|
1766 #endif |
|
1767 } |
|
1768 |
|
1769 /* update the TLBs so that writes to code in the virtual page 'addr' |
|
1770 can be detected */ |
|
1771 static void tlb_protect_code(ram_addr_t ram_addr) |
|
1772 { |
|
1773 cpu_physical_memory_reset_dirty(ram_addr, |
|
1774 ram_addr + TARGET_PAGE_SIZE, |
|
1775 CODE_DIRTY_FLAG); |
|
1776 } |
|
1777 |
|
1778 /* update the TLB so that writes in physical page 'phys_addr' are no longer |
|
1779 tested for self modifying code */ |
|
1780 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, |
|
1781 target_ulong vaddr) |
|
1782 { |
|
1783 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG; |
|
1784 } |
|
1785 |
|
1786 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, |
|
1787 unsigned long start, unsigned long length) |
|
1788 { |
|
1789 unsigned long addr; |
|
1790 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) { |
|
1791 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend; |
|
1792 if ((addr - start) < length) { |
|
1793 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY; |
|
1794 } |
|
1795 } |
|
1796 } |
|
1797 |
|
1798 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, |
|
1799 int dirty_flags) |
|
1800 { |
|
1801 CPUState *env; |
|
1802 unsigned long length, start1; |
|
1803 int i, mask, len; |
|
1804 uint8_t *p; |
|
1805 |
|
1806 start &= TARGET_PAGE_MASK; |
|
1807 end = TARGET_PAGE_ALIGN(end); |
|
1808 |
|
1809 length = end - start; |
|
1810 if (length == 0) |
|
1811 return; |
|
1812 len = length >> TARGET_PAGE_BITS; |
|
1813 #ifdef USE_KQEMU |
|
1814 /* XXX: should not depend on cpu context */ |
|
1815 env = first_cpu; |
|
1816 if (env->kqemu_enabled) { |
|
1817 ram_addr_t addr; |
|
1818 addr = start; |
|
1819 for(i = 0; i < len; i++) { |
|
1820 kqemu_set_notdirty(env, addr); |
|
1821 addr += TARGET_PAGE_SIZE; |
|
1822 } |
|
1823 } |
|
1824 #endif |
|
1825 mask = ~dirty_flags; |
|
1826 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS); |
|
1827 for(i = 0; i < len; i++) |
|
1828 p[i] &= mask; |
|
1829 |
|
1830 /* we modify the TLB cache so that the dirty bit will be set again |
|
1831 when accessing the range */ |
|
1832 /* FIXME: This is wrong if start1 spans multiple regions. */ |
|
1833 start1 = (unsigned long)host_ram_addr(start); |
|
1834 for(env = first_cpu; env != NULL; env = env->next_cpu) { |
|
1835 for(i = 0; i < CPU_TLB_SIZE; i++) |
|
1836 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length); |
|
1837 for(i = 0; i < CPU_TLB_SIZE; i++) |
|
1838 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length); |
|
1839 #if (NB_MMU_MODES >= 3) |
|
1840 for(i = 0; i < CPU_TLB_SIZE; i++) |
|
1841 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length); |
|
1842 #if (NB_MMU_MODES == 4) |
|
1843 for(i = 0; i < CPU_TLB_SIZE; i++) |
|
1844 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length); |
|
1845 #endif |
|
1846 #endif |
|
1847 } |
|
1848 } |
|
1849 |
|
1850 int cpu_physical_memory_set_dirty_tracking(int enable) |
|
1851 { |
|
1852 in_migration = enable; |
|
1853 return 0; |
|
1854 } |
|
1855 |
|
1856 int cpu_physical_memory_get_dirty_tracking(void) |
|
1857 { |
|
1858 return in_migration; |
|
1859 } |
|
1860 |
|
1861 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr) |
|
1862 { |
|
1863 if (kvm_enabled()) |
|
1864 kvm_physical_sync_dirty_bitmap(start_addr, end_addr); |
|
1865 } |
|
1866 |
|
1867 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry) |
|
1868 { |
|
1869 ram_addr_t ram_addr; |
|
1870 |
|
1871 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) { |
|
1872 ram_addr = ram_offset_from_host((uint8_t *)( |
|
1873 (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend)); |
|
1874 if (!cpu_physical_memory_is_dirty(ram_addr)) { |
|
1875 tlb_entry->addr_write |= TLB_NOTDIRTY; |
|
1876 } |
|
1877 } |
|
1878 } |
|
1879 |
|
1880 /* update the TLB according to the current state of the dirty bits */ |
|
1881 void cpu_tlb_update_dirty(CPUState *env) |
|
1882 { |
|
1883 int i; |
|
1884 for(i = 0; i < CPU_TLB_SIZE; i++) |
|
1885 tlb_update_dirty(&env->tlb_table[0][i]); |
|
1886 for(i = 0; i < CPU_TLB_SIZE; i++) |
|
1887 tlb_update_dirty(&env->tlb_table[1][i]); |
|
1888 #if (NB_MMU_MODES >= 3) |
|
1889 for(i = 0; i < CPU_TLB_SIZE; i++) |
|
1890 tlb_update_dirty(&env->tlb_table[2][i]); |
|
1891 #if (NB_MMU_MODES == 4) |
|
1892 for(i = 0; i < CPU_TLB_SIZE; i++) |
|
1893 tlb_update_dirty(&env->tlb_table[3][i]); |
|
1894 #endif |
|
1895 #endif |
|
1896 } |
|
1897 |
|
1898 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr) |
|
1899 { |
|
1900 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) |
|
1901 tlb_entry->addr_write = vaddr; |
|
1902 } |
|
1903 |
|
1904 /* update the TLB corresponding to virtual page vaddr |
|
1905 so that it is no longer dirty */ |
|
1906 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr) |
|
1907 { |
|
1908 int i; |
|
1909 |
|
1910 vaddr &= TARGET_PAGE_MASK; |
|
1911 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
|
1912 tlb_set_dirty1(&env->tlb_table[0][i], vaddr); |
|
1913 tlb_set_dirty1(&env->tlb_table[1][i], vaddr); |
|
1914 #if (NB_MMU_MODES >= 3) |
|
1915 tlb_set_dirty1(&env->tlb_table[2][i], vaddr); |
|
1916 #if (NB_MMU_MODES == 4) |
|
1917 tlb_set_dirty1(&env->tlb_table[3][i], vaddr); |
|
1918 #endif |
|
1919 #endif |
|
1920 } |
|
1921 |
|
1922 /* add a new TLB entry. At most one entry for a given virtual address |
|
1923 is permitted. Return 0 if OK or 2 if the page could not be mapped |
|
1924 (can only happen in non SOFTMMU mode for I/O pages or pages |
|
1925 conflicting with the host address space). */ |
|
1926 int tlb_set_page_exec(CPUState *env, target_ulong vaddr, |
|
1927 target_phys_addr_t paddr, int prot, |
|
1928 int mmu_idx, int is_softmmu) |
|
1929 { |
|
1930 PhysPageDesc *p; |
|
1931 unsigned long pd; |
|
1932 unsigned int index; |
|
1933 target_ulong address; |
|
1934 target_ulong code_address; |
|
1935 target_phys_addr_t addend; |
|
1936 int ret; |
|
1937 CPUTLBEntry *te; |
|
1938 CPUWatchpoint *wp; |
|
1939 target_phys_addr_t iotlb; |
|
1940 |
|
1941 p = phys_page_find(paddr >> TARGET_PAGE_BITS); |
|
1942 if (!p) { |
|
1943 pd = IO_MEM_UNASSIGNED; |
|
1944 } else { |
|
1945 pd = p->phys_offset; |
|
1946 } |
|
1947 #if defined(DEBUG_TLB) |
|
1948 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n", |
|
1949 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd); |
|
1950 #endif |
|
1951 |
|
1952 ret = 0; |
|
1953 address = vaddr; |
|
1954 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) { |
|
1955 /* IO memory case (romd handled later) */ |
|
1956 address |= TLB_MMIO; |
|
1957 } |
|
1958 addend = (unsigned long)host_ram_addr(pd & TARGET_PAGE_MASK); |
|
1959 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) { |
|
1960 /* Normal RAM. */ |
|
1961 iotlb = pd & TARGET_PAGE_MASK; |
|
1962 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM) |
|
1963 iotlb |= IO_MEM_NOTDIRTY; |
|
1964 else |
|
1965 iotlb |= IO_MEM_ROM; |
|
1966 } else { |
|
1967 /* IO handlers are currently passed a phsical address. |
|
1968 It would be nice to pass an offset from the base address |
|
1969 of that region. This would avoid having to special case RAM, |
|
1970 and avoid full address decoding in every device. |
|
1971 We can't use the high bits of pd for this because |
|
1972 IO_MEM_ROMD uses these as a ram address. */ |
|
1973 iotlb = (pd & ~TARGET_PAGE_MASK); |
|
1974 if (p) { |
|
1975 iotlb += p->region_offset; |
|
1976 } else { |
|
1977 iotlb += paddr; |
|
1978 } |
|
1979 } |
|
1980 |
|
1981 code_address = address; |
|
1982 /* Make accesses to pages with watchpoints go via the |
|
1983 watchpoint trap routines. */ |
|
1984 TAILQ_FOREACH(wp, &env->watchpoints, entry) { |
|
1985 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) { |
|
1986 iotlb = io_mem_watch + paddr; |
|
1987 /* TODO: The memory case can be optimized by not trapping |
|
1988 reads of pages with a write breakpoint. */ |
|
1989 address |= TLB_MMIO; |
|
1990 } |
|
1991 } |
|
1992 |
|
1993 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
|
1994 env->iotlb[mmu_idx][index] = iotlb - vaddr; |
|
1995 te = &env->tlb_table[mmu_idx][index]; |
|
1996 te->addend = addend - vaddr; |
|
1997 if (prot & PAGE_READ) { |
|
1998 te->addr_read = address; |
|
1999 } else { |
|
2000 te->addr_read = -1; |
|
2001 } |
|
2002 |
|
2003 if (prot & PAGE_EXEC) { |
|
2004 te->addr_code = code_address; |
|
2005 } else { |
|
2006 te->addr_code = -1; |
|
2007 } |
|
2008 if (prot & PAGE_WRITE) { |
|
2009 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || |
|
2010 (pd & IO_MEM_ROMD)) { |
|
2011 /* Write access calls the I/O callback. */ |
|
2012 te->addr_write = address | TLB_MMIO; |
|
2013 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && |
|
2014 !cpu_physical_memory_is_dirty(pd)) { |
|
2015 te->addr_write = address | TLB_NOTDIRTY; |
|
2016 } else { |
|
2017 te->addr_write = address; |
|
2018 } |
|
2019 } else { |
|
2020 te->addr_write = -1; |
|
2021 } |
|
2022 return ret; |
|
2023 } |
|
2024 |
|
2025 #else |
|
2026 |
|
2027 void tlb_flush(CPUState *env, int flush_global) |
|
2028 { |
|
2029 } |
|
2030 |
|
2031 void tlb_flush_page(CPUState *env, target_ulong addr) |
|
2032 { |
|
2033 } |
|
2034 |
|
2035 int tlb_set_page_exec(CPUState *env, target_ulong vaddr, |
|
2036 target_phys_addr_t paddr, int prot, |
|
2037 int mmu_idx, int is_softmmu) |
|
2038 { |
|
2039 return 0; |
|
2040 } |
|
2041 |
|
2042 /* dump memory mappings */ |
|
2043 void page_dump(FILE *f) |
|
2044 { |
|
2045 unsigned long start, end; |
|
2046 int i, j, prot, prot1; |
|
2047 PageDesc *p; |
|
2048 |
|
2049 fprintf(f, "%-8s %-8s %-8s %s\n", |
|
2050 "start", "end", "size", "prot"); |
|
2051 start = -1; |
|
2052 end = -1; |
|
2053 prot = 0; |
|
2054 for(i = 0; i <= L1_SIZE; i++) { |
|
2055 if (i < L1_SIZE) |
|
2056 p = l1_map[i]; |
|
2057 else |
|
2058 p = NULL; |
|
2059 for(j = 0;j < L2_SIZE; j++) { |
|
2060 if (!p) |
|
2061 prot1 = 0; |
|
2062 else |
|
2063 prot1 = p[j].flags; |
|
2064 if (prot1 != prot) { |
|
2065 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS); |
|
2066 if (start != -1) { |
|
2067 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n", |
|
2068 start, end, end - start, |
|
2069 prot & PAGE_READ ? 'r' : '-', |
|
2070 prot & PAGE_WRITE ? 'w' : '-', |
|
2071 prot & PAGE_EXEC ? 'x' : '-'); |
|
2072 } |
|
2073 if (prot1 != 0) |
|
2074 start = end; |
|
2075 else |
|
2076 start = -1; |
|
2077 prot = prot1; |
|
2078 } |
|
2079 if (!p) |
|
2080 break; |
|
2081 } |
|
2082 } |
|
2083 } |
|
2084 |
|
2085 int page_get_flags(target_ulong address) |
|
2086 { |
|
2087 PageDesc *p; |
|
2088 |
|
2089 p = page_find(address >> TARGET_PAGE_BITS); |
|
2090 if (!p) |
|
2091 return 0; |
|
2092 return p->flags; |
|
2093 } |
|
2094 |
|
2095 /* modify the flags of a page and invalidate the code if |
|
2096 necessary. The flag PAGE_WRITE_ORG is positionned automatically |
|
2097 depending on PAGE_WRITE */ |
|
2098 void page_set_flags(target_ulong start, target_ulong end, int flags) |
|
2099 { |
|
2100 PageDesc *p; |
|
2101 target_ulong addr; |
|
2102 |
|
2103 /* mmap_lock should already be held. */ |
|
2104 start = start & TARGET_PAGE_MASK; |
|
2105 end = TARGET_PAGE_ALIGN(end); |
|
2106 if (flags & PAGE_WRITE) |
|
2107 flags |= PAGE_WRITE_ORG; |
|
2108 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) { |
|
2109 p = page_find_alloc(addr >> TARGET_PAGE_BITS); |
|
2110 /* We may be called for host regions that are outside guest |
|
2111 address space. */ |
|
2112 if (!p) |
|
2113 return; |
|
2114 /* if the write protection is set, then we invalidate the code |
|
2115 inside */ |
|
2116 if (!(p->flags & PAGE_WRITE) && |
|
2117 (flags & PAGE_WRITE) && |
|
2118 p->first_tb) { |
|
2119 tb_invalidate_phys_page(addr, 0, NULL); |
|
2120 } |
|
2121 p->flags = flags; |
|
2122 } |
|
2123 } |
|
2124 |
|
2125 int page_check_range(target_ulong start, target_ulong len, int flags) |
|
2126 { |
|
2127 PageDesc *p; |
|
2128 target_ulong end; |
|
2129 target_ulong addr; |
|
2130 |
|
2131 if (start + len < start) |
|
2132 /* we've wrapped around */ |
|
2133 return -1; |
|
2134 |
|
2135 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */ |
|
2136 start = start & TARGET_PAGE_MASK; |
|
2137 |
|
2138 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) { |
|
2139 p = page_find(addr >> TARGET_PAGE_BITS); |
|
2140 if( !p ) |
|
2141 return -1; |
|
2142 if( !(p->flags & PAGE_VALID) ) |
|
2143 return -1; |
|
2144 |
|
2145 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) |
|
2146 return -1; |
|
2147 if (flags & PAGE_WRITE) { |
|
2148 if (!(p->flags & PAGE_WRITE_ORG)) |
|
2149 return -1; |
|
2150 /* unprotect the page if it was put read-only because it |
|
2151 contains translated code */ |
|
2152 if (!(p->flags & PAGE_WRITE)) { |
|
2153 if (!page_unprotect(addr, 0, NULL)) |
|
2154 return -1; |
|
2155 } |
|
2156 return 0; |
|
2157 } |
|
2158 } |
|
2159 return 0; |
|
2160 } |
|
2161 |
|
2162 /* called from signal handler: invalidate the code and unprotect the |
|
2163 page. Return TRUE if the fault was succesfully handled. */ |
|
2164 int page_unprotect(target_ulong address, unsigned long pc, void *puc) |
|
2165 { |
|
2166 unsigned int page_index, prot, pindex; |
|
2167 PageDesc *p, *p1; |
|
2168 target_ulong host_start, host_end, addr; |
|
2169 |
|
2170 /* Technically this isn't safe inside a signal handler. However we |
|
2171 know this only ever happens in a synchronous SEGV handler, so in |
|
2172 practice it seems to be ok. */ |
|
2173 mmap_lock(); |
|
2174 |
|
2175 host_start = address & qemu_host_page_mask; |
|
2176 page_index = host_start >> TARGET_PAGE_BITS; |
|
2177 p1 = page_find(page_index); |
|
2178 if (!p1) { |
|
2179 mmap_unlock(); |
|
2180 return 0; |
|
2181 } |
|
2182 host_end = host_start + qemu_host_page_size; |
|
2183 p = p1; |
|
2184 prot = 0; |
|
2185 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) { |
|
2186 prot |= p->flags; |
|
2187 p++; |
|
2188 } |
|
2189 /* if the page was really writable, then we change its |
|
2190 protection back to writable */ |
|
2191 if (prot & PAGE_WRITE_ORG) { |
|
2192 pindex = (address - host_start) >> TARGET_PAGE_BITS; |
|
2193 if (!(p1[pindex].flags & PAGE_WRITE)) { |
|
2194 mprotect((void *)g2h(host_start), qemu_host_page_size, |
|
2195 (prot & PAGE_BITS) | PAGE_WRITE); |
|
2196 p1[pindex].flags |= PAGE_WRITE; |
|
2197 /* and since the content will be modified, we must invalidate |
|
2198 the corresponding translated code. */ |
|
2199 tb_invalidate_phys_page(address, pc, puc); |
|
2200 #ifdef DEBUG_TB_CHECK |
|
2201 tb_invalidate_check(address); |
|
2202 #endif |
|
2203 mmap_unlock(); |
|
2204 return 1; |
|
2205 } |
|
2206 } |
|
2207 mmap_unlock(); |
|
2208 return 0; |
|
2209 } |
|
2210 |
|
2211 static inline void tlb_set_dirty(CPUState *env, |
|
2212 unsigned long addr, target_ulong vaddr) |
|
2213 { |
|
2214 } |
|
2215 #endif /* defined(CONFIG_USER_ONLY) */ |
|
2216 |
|
2217 #if !defined(CONFIG_USER_ONLY) |
|
2218 |
|
2219 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, |
|
2220 ram_addr_t memory, ram_addr_t region_offset); |
|
2221 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys, |
|
2222 ram_addr_t orig_memory, ram_addr_t region_offset); |
|
2223 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \ |
|
2224 need_subpage) \ |
|
2225 do { \ |
|
2226 if (addr > start_addr) \ |
|
2227 start_addr2 = 0; \ |
|
2228 else { \ |
|
2229 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \ |
|
2230 if (start_addr2 > 0) \ |
|
2231 need_subpage = 1; \ |
|
2232 } \ |
|
2233 \ |
|
2234 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \ |
|
2235 end_addr2 = TARGET_PAGE_SIZE - 1; \ |
|
2236 else { \ |
|
2237 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \ |
|
2238 if (end_addr2 < TARGET_PAGE_SIZE - 1) \ |
|
2239 need_subpage = 1; \ |
|
2240 } \ |
|
2241 } while (0) |
|
2242 |
|
2243 /* register physical memory. 'size' must be a multiple of the target |
|
2244 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an |
|
2245 io memory page. The address used when calling the IO function is |
|
2246 the offset from the start of the region, plus region_offset. Both |
|
2247 start_region and regon_offset are rounded down to a page boundary |
|
2248 before calculating this offset. This should not be a problem unless |
|
2249 the low bits of start_addr and region_offset differ. */ |
|
2250 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr, |
|
2251 ram_addr_t size, |
|
2252 ram_addr_t phys_offset, |
|
2253 ram_addr_t region_offset) |
|
2254 { |
|
2255 target_phys_addr_t addr, end_addr; |
|
2256 PhysPageDesc *p; |
|
2257 CPUState *env; |
|
2258 ram_addr_t orig_size = size; |
|
2259 void *subpage; |
|
2260 |
|
2261 #ifdef USE_KQEMU |
|
2262 /* XXX: should not depend on cpu context */ |
|
2263 env = first_cpu; |
|
2264 if (env->kqemu_enabled) { |
|
2265 kqemu_set_phys_mem(start_addr, size, phys_offset); |
|
2266 } |
|
2267 #endif |
|
2268 if (kvm_enabled()) |
|
2269 kvm_set_phys_mem(start_addr, size, phys_offset); |
|
2270 |
|
2271 region_offset &= TARGET_PAGE_MASK; |
|
2272 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK; |
|
2273 end_addr = start_addr + (target_phys_addr_t)size; |
|
2274 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) { |
|
2275 p = phys_page_find(addr >> TARGET_PAGE_BITS); |
|
2276 if (p && p->phys_offset != IO_MEM_UNASSIGNED) { |
|
2277 ram_addr_t orig_memory = p->phys_offset; |
|
2278 target_phys_addr_t start_addr2, end_addr2; |
|
2279 int need_subpage = 0; |
|
2280 |
|
2281 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, |
|
2282 need_subpage); |
|
2283 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) { |
|
2284 if (!(orig_memory & IO_MEM_SUBPAGE)) { |
|
2285 subpage = subpage_init((addr & TARGET_PAGE_MASK), |
|
2286 &p->phys_offset, orig_memory, |
|
2287 p->region_offset); |
|
2288 } else { |
|
2289 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK) |
|
2290 >> IO_MEM_SHIFT]; |
|
2291 } |
|
2292 subpage_register(subpage, start_addr2, end_addr2, phys_offset, |
|
2293 region_offset); |
|
2294 p->region_offset = 0; |
|
2295 } else { |
|
2296 p->phys_offset = phys_offset; |
|
2297 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM || |
|
2298 (phys_offset & IO_MEM_ROMD)) |
|
2299 phys_offset += TARGET_PAGE_SIZE; |
|
2300 } |
|
2301 } else { |
|
2302 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1); |
|
2303 p->phys_offset = phys_offset; |
|
2304 p->region_offset = region_offset; |
|
2305 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM || |
|
2306 (phys_offset & IO_MEM_ROMD)) { |
|
2307 phys_offset += TARGET_PAGE_SIZE; |
|
2308 } else { |
|
2309 target_phys_addr_t start_addr2, end_addr2; |
|
2310 int need_subpage = 0; |
|
2311 |
|
2312 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, |
|
2313 end_addr2, need_subpage); |
|
2314 |
|
2315 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) { |
|
2316 subpage = subpage_init((addr & TARGET_PAGE_MASK), |
|
2317 &p->phys_offset, IO_MEM_UNASSIGNED, |
|
2318 0); |
|
2319 subpage_register(subpage, start_addr2, end_addr2, |
|
2320 phys_offset, region_offset); |
|
2321 p->region_offset = 0; |
|
2322 } |
|
2323 } |
|
2324 } |
|
2325 region_offset += TARGET_PAGE_SIZE; |
|
2326 } |
|
2327 |
|
2328 /* since each CPU stores ram addresses in its TLB cache, we must |
|
2329 reset the modified entries */ |
|
2330 /* XXX: slow ! */ |
|
2331 for(env = first_cpu; env != NULL; env = env->next_cpu) { |
|
2332 tlb_flush(env, 1); |
|
2333 } |
|
2334 } |
|
2335 |
|
2336 /* XXX: temporary until new memory mapping API */ |
|
2337 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr) |
|
2338 { |
|
2339 PhysPageDesc *p; |
|
2340 |
|
2341 p = phys_page_find(addr >> TARGET_PAGE_BITS); |
|
2342 if (!p) |
|
2343 return IO_MEM_UNASSIGNED; |
|
2344 return p->phys_offset; |
|
2345 } |
|
2346 |
|
2347 /* Get ram offset from physical ram. */ |
|
2348 ram_addr_t get_ram_offset_phys(target_phys_addr_t addr) |
|
2349 { |
|
2350 ram_addr_t pd; |
|
2351 pd = cpu_get_physical_page_desc(addr); |
|
2352 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) { |
|
2353 fprintf(stderr, "Bad ram physical address " TARGET_FMT_plx "\n", addr); |
|
2354 return 0; |
|
2355 } |
|
2356 return (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); |
|
2357 } |
|
2358 |
|
2359 #ifdef USE_KQEMU |
|
2360 /* FIXME: kqemu needs to be fixed and this removed. */ |
|
2361 uint8_t *kqemu_phys_ram_base; |
|
2362 #endif |
|
2363 |
|
2364 typedef struct ram_region { |
|
2365 ram_addr_t offset; |
|
2366 ram_addr_t size; |
|
2367 uint8_t *host; |
|
2368 struct ram_region *next; |
|
2369 } ram_region; |
|
2370 |
|
2371 static ram_region *ram_regions; |
|
2372 |
|
2373 uint8_t *host_ram_addr(ram_addr_t offset) |
|
2374 { |
|
2375 ram_region *r; |
|
2376 ram_region *prev; |
|
2377 ram_region **prev_p; |
|
2378 #ifdef USE_KQEMU |
|
2379 if (kqemu_allowed) { |
|
2380 return kqemu_phys_ram_base + offset; |
|
2381 } |
|
2382 #endif |
|
2383 prev = NULL; |
|
2384 prev_p = NULL; |
|
2385 r = ram_regions; |
|
2386 while (r && (r->offset > offset || r->offset + r->size <= offset)) { |
|
2387 if (prev) |
|
2388 prev_p = &prev->next; |
|
2389 else |
|
2390 prev_p = &ram_regions; |
|
2391 prev = r; |
|
2392 r = r->next; |
|
2393 } |
|
2394 if (!r) { |
|
2395 fprintf(stderr, "Bogus ram offset 0x%lx\n", (unsigned long)offset); |
|
2396 abort(); |
|
2397 } |
|
2398 /* Move this region towards the start of the list. */ |
|
2399 if (prev) { |
|
2400 *prev_p = r; |
|
2401 prev->next = r->next; |
|
2402 r->next = prev; |
|
2403 } |
|
2404 return r->host + offset - r->offset; |
|
2405 } |
|
2406 |
|
2407 ram_addr_t ram_offset_from_host(uint8_t *addr) |
|
2408 { |
|
2409 ram_region *r; |
|
2410 #ifdef USE_KQEMU |
|
2411 if (kqemu_allowed) { |
|
2412 return addr - kqemu_phys_ram_base; |
|
2413 } |
|
2414 #endif |
|
2415 r = ram_regions; |
|
2416 while (r && (r->host > addr || r->host + r->size <= addr)) { |
|
2417 r = r->next; |
|
2418 } |
|
2419 if (!r) { |
|
2420 fprintf(stderr, "Bogus host ram address 0x%lx\n", (unsigned long)addr); |
|
2421 abort(); |
|
2422 } |
|
2423 return r->offset + addr - r->host;; |
|
2424 } |
|
2425 |
|
2426 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size) |
|
2427 { |
|
2428 if (kvm_enabled()) |
|
2429 kvm_coalesce_mmio_region(addr, size); |
|
2430 } |
|
2431 |
|
2432 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size) |
|
2433 { |
|
2434 if (kvm_enabled()) |
|
2435 kvm_uncoalesce_mmio_region(addr, size); |
|
2436 } |
|
2437 |
|
2438 /* XXX: better than nothing */ |
|
2439 ram_addr_t qemu_ram_alloc(ram_addr_t size) |
|
2440 { |
|
2441 ram_addr_t addr; |
|
2442 #ifdef USE_KQEMU |
|
2443 if (kqemu_allowed) { |
|
2444 if ((phys_ram_alloc_offset + size) > phys_ram_size) { |
|
2445 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n", |
|
2446 (uint64_t)size, (uint64_t)phys_ram_size); |
|
2447 abort(); |
|
2448 } |
|
2449 addr = phys_ram_alloc_offset; |
|
2450 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size); |
|
2451 } else |
|
2452 #endif |
|
2453 { |
|
2454 ram_region *r = qemu_mallocz(sizeof(*r)); |
|
2455 size = TARGET_PAGE_ALIGN(size); |
|
2456 addr = phys_ram_alloc_offset; |
|
2457 phys_ram_alloc_offset += size; |
|
2458 r->offset = addr; |
|
2459 r->size = size; |
|
2460 r->host = qemu_vmalloc(size); |
|
2461 r->next = ram_regions; |
|
2462 ram_regions = r; |
|
2463 phys_ram_dirty = qemu_realloc(phys_ram_dirty, |
|
2464 phys_ram_alloc_offset >> TARGET_PAGE_BITS); |
|
2465 memset(phys_ram_dirty + (addr >> TARGET_PAGE_BITS), |
|
2466 0xff, size >> TARGET_PAGE_BITS); |
|
2467 } |
|
2468 return addr; |
|
2469 } |
|
2470 |
|
2471 void qemu_ram_free(ram_addr_t addr) |
|
2472 { |
|
2473 /* TODO: Implement this. */ |
|
2474 } |
|
2475 |
|
2476 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr) |
|
2477 { |
|
2478 #ifdef DEBUG_UNASSIGNED |
|
2479 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); |
|
2480 #endif |
|
2481 #if defined(TARGET_SPARC) || defined(TARGET_CRIS) |
|
2482 do_unassigned_access(addr, 0, 0, 0, 1); |
|
2483 #endif |
|
2484 return 0; |
|
2485 } |
|
2486 |
|
2487 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr) |
|
2488 { |
|
2489 #ifdef DEBUG_UNASSIGNED |
|
2490 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); |
|
2491 #endif |
|
2492 #if defined(TARGET_SPARC) || defined(TARGET_CRIS) |
|
2493 do_unassigned_access(addr, 0, 0, 0, 2); |
|
2494 #endif |
|
2495 return 0; |
|
2496 } |
|
2497 |
|
2498 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr) |
|
2499 { |
|
2500 #ifdef DEBUG_UNASSIGNED |
|
2501 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); |
|
2502 #endif |
|
2503 #if defined(TARGET_SPARC) || defined(TARGET_CRIS) |
|
2504 do_unassigned_access(addr, 0, 0, 0, 4); |
|
2505 #endif |
|
2506 return 0; |
|
2507 } |
|
2508 |
|
2509 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val) |
|
2510 { |
|
2511 #ifdef DEBUG_UNASSIGNED |
|
2512 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val); |
|
2513 #endif |
|
2514 #if defined(TARGET_SPARC) || defined(TARGET_CRIS) |
|
2515 do_unassigned_access(addr, 1, 0, 0, 1); |
|
2516 #endif |
|
2517 } |
|
2518 |
|
2519 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val) |
|
2520 { |
|
2521 #ifdef DEBUG_UNASSIGNED |
|
2522 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val); |
|
2523 #endif |
|
2524 #if defined(TARGET_SPARC) || defined(TARGET_CRIS) |
|
2525 do_unassigned_access(addr, 1, 0, 0, 2); |
|
2526 #endif |
|
2527 } |
|
2528 |
|
2529 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val) |
|
2530 { |
|
2531 #ifdef DEBUG_UNASSIGNED |
|
2532 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val); |
|
2533 #endif |
|
2534 #if defined(TARGET_SPARC) || defined(TARGET_CRIS) |
|
2535 do_unassigned_access(addr, 1, 0, 0, 4); |
|
2536 #endif |
|
2537 } |
|
2538 |
|
2539 static CPUReadMemoryFunc *unassigned_mem_read[3] = { |
|
2540 unassigned_mem_readb, |
|
2541 unassigned_mem_readw, |
|
2542 unassigned_mem_readl, |
|
2543 }; |
|
2544 |
|
2545 static CPUWriteMemoryFunc *unassigned_mem_write[3] = { |
|
2546 unassigned_mem_writeb, |
|
2547 unassigned_mem_writew, |
|
2548 unassigned_mem_writel, |
|
2549 }; |
|
2550 |
|
2551 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr, |
|
2552 uint32_t val) |
|
2553 { |
|
2554 int dirty_flags; |
|
2555 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; |
|
2556 if (!(dirty_flags & CODE_DIRTY_FLAG)) { |
|
2557 #if !defined(CONFIG_USER_ONLY) |
|
2558 tb_invalidate_phys_page_fast(ram_addr, 1); |
|
2559 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; |
|
2560 #endif |
|
2561 } |
|
2562 stb_p(host_ram_addr(ram_addr), val); |
|
2563 #ifdef USE_KQEMU |
|
2564 if (cpu_single_env->kqemu_enabled && |
|
2565 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK) |
|
2566 kqemu_modify_page(cpu_single_env, ram_addr); |
|
2567 #endif |
|
2568 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); |
|
2569 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags; |
|
2570 /* we remove the notdirty callback only if the code has been |
|
2571 flushed */ |
|
2572 if (dirty_flags == 0xff) |
|
2573 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr); |
|
2574 } |
|
2575 |
|
2576 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr, |
|
2577 uint32_t val) |
|
2578 { |
|
2579 int dirty_flags; |
|
2580 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; |
|
2581 if (!(dirty_flags & CODE_DIRTY_FLAG)) { |
|
2582 #if !defined(CONFIG_USER_ONLY) |
|
2583 tb_invalidate_phys_page_fast(ram_addr, 2); |
|
2584 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; |
|
2585 #endif |
|
2586 } |
|
2587 stw_p(host_ram_addr(ram_addr), val); |
|
2588 #ifdef USE_KQEMU |
|
2589 if (cpu_single_env->kqemu_enabled && |
|
2590 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK) |
|
2591 kqemu_modify_page(cpu_single_env, ram_addr); |
|
2592 #endif |
|
2593 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); |
|
2594 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags; |
|
2595 /* we remove the notdirty callback only if the code has been |
|
2596 flushed */ |
|
2597 if (dirty_flags == 0xff) |
|
2598 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr); |
|
2599 } |
|
2600 |
|
2601 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr, |
|
2602 uint32_t val) |
|
2603 { |
|
2604 int dirty_flags; |
|
2605 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; |
|
2606 if (!(dirty_flags & CODE_DIRTY_FLAG)) { |
|
2607 #if !defined(CONFIG_USER_ONLY) |
|
2608 tb_invalidate_phys_page_fast(ram_addr, 4); |
|
2609 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; |
|
2610 #endif |
|
2611 } |
|
2612 stl_p(host_ram_addr(ram_addr), val); |
|
2613 #ifdef USE_KQEMU |
|
2614 if (cpu_single_env->kqemu_enabled && |
|
2615 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK) |
|
2616 kqemu_modify_page(cpu_single_env, ram_addr); |
|
2617 #endif |
|
2618 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); |
|
2619 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags; |
|
2620 /* we remove the notdirty callback only if the code has been |
|
2621 flushed */ |
|
2622 if (dirty_flags == 0xff) |
|
2623 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr); |
|
2624 } |
|
2625 |
|
2626 static CPUReadMemoryFunc *error_mem_read[3] = { |
|
2627 NULL, /* never used */ |
|
2628 NULL, /* never used */ |
|
2629 NULL, /* never used */ |
|
2630 }; |
|
2631 |
|
2632 static CPUWriteMemoryFunc *notdirty_mem_write[3] = { |
|
2633 notdirty_mem_writeb, |
|
2634 notdirty_mem_writew, |
|
2635 notdirty_mem_writel, |
|
2636 }; |
|
2637 |
|
2638 /* Generate a debug exception if a watchpoint has been hit. */ |
|
2639 static void check_watchpoint(int offset, int len_mask, int flags) |
|
2640 { |
|
2641 CPUState *env = cpu_single_env; |
|
2642 target_ulong pc, cs_base; |
|
2643 TranslationBlock *tb; |
|
2644 target_ulong vaddr; |
|
2645 CPUWatchpoint *wp; |
|
2646 int cpu_flags; |
|
2647 |
|
2648 if (env->watchpoint_hit) { |
|
2649 /* We re-entered the check after replacing the TB. Now raise |
|
2650 * the debug interrupt so that is will trigger after the |
|
2651 * current instruction. */ |
|
2652 cpu_interrupt(env, CPU_INTERRUPT_DEBUG); |
|
2653 return; |
|
2654 } |
|
2655 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset; |
|
2656 TAILQ_FOREACH(wp, &env->watchpoints, entry) { |
|
2657 if ((vaddr == (wp->vaddr & len_mask) || |
|
2658 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) { |
|
2659 wp->flags |= BP_WATCHPOINT_HIT; |
|
2660 if (!env->watchpoint_hit) { |
|
2661 env->watchpoint_hit = wp; |
|
2662 tb = tb_find_pc(env->mem_io_pc); |
|
2663 if (!tb) { |
|
2664 cpu_abort(env, "check_watchpoint: could not find TB for " |
|
2665 "pc=%p", (void *)env->mem_io_pc); |
|
2666 } |
|
2667 cpu_restore_state(tb, env, env->mem_io_pc, NULL); |
|
2668 tb_phys_invalidate(tb, -1); |
|
2669 if (wp->flags & BP_STOP_BEFORE_ACCESS) { |
|
2670 env->exception_index = EXCP_DEBUG; |
|
2671 } else { |
|
2672 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags); |
|
2673 tb_gen_code(env, pc, cs_base, cpu_flags, 1); |
|
2674 } |
|
2675 cpu_resume_from_signal(env, NULL); |
|
2676 } |
|
2677 } else { |
|
2678 wp->flags &= ~BP_WATCHPOINT_HIT; |
|
2679 } |
|
2680 } |
|
2681 } |
|
2682 |
|
2683 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks, |
|
2684 so these check for a hit then pass through to the normal out-of-line |
|
2685 phys routines. */ |
|
2686 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr) |
|
2687 { |
|
2688 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ); |
|
2689 return ldub_phys(addr); |
|
2690 } |
|
2691 |
|
2692 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr) |
|
2693 { |
|
2694 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ); |
|
2695 return lduw_phys(addr); |
|
2696 } |
|
2697 |
|
2698 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr) |
|
2699 { |
|
2700 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ); |
|
2701 return ldl_phys(addr); |
|
2702 } |
|
2703 |
|
2704 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr, |
|
2705 uint32_t val) |
|
2706 { |
|
2707 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE); |
|
2708 stb_phys(addr, val); |
|
2709 } |
|
2710 |
|
2711 static void watch_mem_writew(void *opaque, target_phys_addr_t addr, |
|
2712 uint32_t val) |
|
2713 { |
|
2714 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE); |
|
2715 stw_phys(addr, val); |
|
2716 } |
|
2717 |
|
2718 static void watch_mem_writel(void *opaque, target_phys_addr_t addr, |
|
2719 uint32_t val) |
|
2720 { |
|
2721 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE); |
|
2722 stl_phys(addr, val); |
|
2723 } |
|
2724 |
|
2725 static CPUReadMemoryFunc *watch_mem_read[3] = { |
|
2726 watch_mem_readb, |
|
2727 watch_mem_readw, |
|
2728 watch_mem_readl, |
|
2729 }; |
|
2730 |
|
2731 static CPUWriteMemoryFunc *watch_mem_write[3] = { |
|
2732 watch_mem_writeb, |
|
2733 watch_mem_writew, |
|
2734 watch_mem_writel, |
|
2735 }; |
|
2736 |
|
2737 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr, |
|
2738 unsigned int len) |
|
2739 { |
|
2740 uint32_t ret; |
|
2741 unsigned int idx; |
|
2742 |
|
2743 idx = SUBPAGE_IDX(addr); |
|
2744 #if defined(DEBUG_SUBPAGE) |
|
2745 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__, |
|
2746 mmio, len, addr, idx); |
|
2747 #endif |
|
2748 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], |
|
2749 addr + mmio->region_offset[idx][0][len]); |
|
2750 |
|
2751 return ret; |
|
2752 } |
|
2753 |
|
2754 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr, |
|
2755 uint32_t value, unsigned int len) |
|
2756 { |
|
2757 unsigned int idx; |
|
2758 |
|
2759 idx = SUBPAGE_IDX(addr); |
|
2760 #if defined(DEBUG_SUBPAGE) |
|
2761 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__, |
|
2762 mmio, len, addr, idx, value); |
|
2763 #endif |
|
2764 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], |
|
2765 addr + mmio->region_offset[idx][1][len], |
|
2766 value); |
|
2767 } |
|
2768 |
|
2769 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr) |
|
2770 { |
|
2771 #if defined(DEBUG_SUBPAGE) |
|
2772 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr); |
|
2773 #endif |
|
2774 |
|
2775 return subpage_readlen(opaque, addr, 0); |
|
2776 } |
|
2777 |
|
2778 static void subpage_writeb (void *opaque, target_phys_addr_t addr, |
|
2779 uint32_t value) |
|
2780 { |
|
2781 #if defined(DEBUG_SUBPAGE) |
|
2782 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value); |
|
2783 #endif |
|
2784 subpage_writelen(opaque, addr, value, 0); |
|
2785 } |
|
2786 |
|
2787 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr) |
|
2788 { |
|
2789 #if defined(DEBUG_SUBPAGE) |
|
2790 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr); |
|
2791 #endif |
|
2792 |
|
2793 return subpage_readlen(opaque, addr, 1); |
|
2794 } |
|
2795 |
|
2796 static void subpage_writew (void *opaque, target_phys_addr_t addr, |
|
2797 uint32_t value) |
|
2798 { |
|
2799 #if defined(DEBUG_SUBPAGE) |
|
2800 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value); |
|
2801 #endif |
|
2802 subpage_writelen(opaque, addr, value, 1); |
|
2803 } |
|
2804 |
|
2805 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr) |
|
2806 { |
|
2807 #if defined(DEBUG_SUBPAGE) |
|
2808 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr); |
|
2809 #endif |
|
2810 |
|
2811 return subpage_readlen(opaque, addr, 2); |
|
2812 } |
|
2813 |
|
2814 static void subpage_writel (void *opaque, |
|
2815 target_phys_addr_t addr, uint32_t value) |
|
2816 { |
|
2817 #if defined(DEBUG_SUBPAGE) |
|
2818 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value); |
|
2819 #endif |
|
2820 subpage_writelen(opaque, addr, value, 2); |
|
2821 } |
|
2822 |
|
2823 static CPUReadMemoryFunc *subpage_read[] = { |
|
2824 &subpage_readb, |
|
2825 &subpage_readw, |
|
2826 &subpage_readl, |
|
2827 }; |
|
2828 |
|
2829 static CPUWriteMemoryFunc *subpage_write[] = { |
|
2830 &subpage_writeb, |
|
2831 &subpage_writew, |
|
2832 &subpage_writel, |
|
2833 }; |
|
2834 |
|
2835 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, |
|
2836 ram_addr_t memory, ram_addr_t region_offset) |
|
2837 { |
|
2838 int idx, eidx; |
|
2839 unsigned int i; |
|
2840 |
|
2841 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE) |
|
2842 return -1; |
|
2843 idx = SUBPAGE_IDX(start); |
|
2844 eidx = SUBPAGE_IDX(end); |
|
2845 #if defined(DEBUG_SUBPAGE) |
|
2846 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__, |
|
2847 mmio, start, end, idx, eidx, memory); |
|
2848 #endif |
|
2849 memory >>= IO_MEM_SHIFT; |
|
2850 for (; idx <= eidx; idx++) { |
|
2851 for (i = 0; i < 4; i++) { |
|
2852 if (io_mem_read[memory][i]) { |
|
2853 mmio->mem_read[idx][i] = &io_mem_read[memory][i]; |
|
2854 mmio->opaque[idx][0][i] = io_mem_opaque[memory]; |
|
2855 mmio->region_offset[idx][0][i] = region_offset; |
|
2856 } |
|
2857 if (io_mem_write[memory][i]) { |
|
2858 mmio->mem_write[idx][i] = &io_mem_write[memory][i]; |
|
2859 mmio->opaque[idx][1][i] = io_mem_opaque[memory]; |
|
2860 mmio->region_offset[idx][1][i] = region_offset; |
|
2861 } |
|
2862 } |
|
2863 } |
|
2864 |
|
2865 return 0; |
|
2866 } |
|
2867 |
|
2868 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys, |
|
2869 ram_addr_t orig_memory, ram_addr_t region_offset) |
|
2870 { |
|
2871 subpage_t *mmio; |
|
2872 int subpage_memory; |
|
2873 |
|
2874 mmio = qemu_mallocz(sizeof(subpage_t)); |
|
2875 if (mmio != NULL) { |
|
2876 mmio->base = base; |
|
2877 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio); |
|
2878 #if defined(DEBUG_SUBPAGE) |
|
2879 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__, |
|
2880 mmio, base, TARGET_PAGE_SIZE, subpage_memory); |
|
2881 #endif |
|
2882 *phys = subpage_memory | IO_MEM_SUBPAGE; |
|
2883 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory, |
|
2884 region_offset); |
|
2885 } |
|
2886 |
|
2887 return mmio; |
|
2888 } |
|
2889 |
|
2890 static void io_mem_init(void) |
|
2891 { |
|
2892 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL); |
|
2893 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL); |
|
2894 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL); |
|
2895 io_mem_nb = 5; |
|
2896 |
|
2897 io_mem_watch = cpu_register_io_memory(0, watch_mem_read, |
|
2898 watch_mem_write, NULL); |
|
2899 #ifdef USE_KQEMU |
|
2900 if (kqemu_allowed) { |
|
2901 /* alloc dirty bits array */ |
|
2902 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS); |
|
2903 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS); |
|
2904 } |
|
2905 #endif |
|
2906 } |
|
2907 |
|
2908 /* mem_read and mem_write are arrays of functions containing the |
|
2909 function to access byte (index 0), word (index 1) and dword (index |
|
2910 2). Functions can be omitted with a NULL function pointer. The |
|
2911 registered functions may be modified dynamically later. |
|
2912 If io_index is non zero, the corresponding io zone is |
|
2913 modified. If it is zero, a new io zone is allocated. The return |
|
2914 value can be used with cpu_register_physical_memory(). (-1) is |
|
2915 returned if error. */ |
|
2916 int cpu_register_io_memory(int io_index, |
|
2917 CPUReadMemoryFunc **mem_read, |
|
2918 CPUWriteMemoryFunc **mem_write, |
|
2919 void *opaque) |
|
2920 { |
|
2921 int i, subwidth = 0; |
|
2922 |
|
2923 if (io_index <= 0) { |
|
2924 if (io_mem_nb >= IO_MEM_NB_ENTRIES) |
|
2925 return -1; |
|
2926 io_index = io_mem_nb++; |
|
2927 } else { |
|
2928 if (io_index >= IO_MEM_NB_ENTRIES) |
|
2929 return -1; |
|
2930 } |
|
2931 |
|
2932 for(i = 0;i < 3; i++) { |
|
2933 if (!mem_read[i] || !mem_write[i]) |
|
2934 subwidth = IO_MEM_SUBWIDTH; |
|
2935 io_mem_read[io_index][i] = mem_read[i]; |
|
2936 io_mem_write[io_index][i] = mem_write[i]; |
|
2937 } |
|
2938 io_mem_opaque[io_index] = opaque; |
|
2939 return (io_index << IO_MEM_SHIFT) | subwidth; |
|
2940 } |
|
2941 |
|
2942 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index) |
|
2943 { |
|
2944 return io_mem_write[io_index >> IO_MEM_SHIFT]; |
|
2945 } |
|
2946 |
|
2947 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index) |
|
2948 { |
|
2949 return io_mem_read[io_index >> IO_MEM_SHIFT]; |
|
2950 } |
|
2951 |
|
2952 #endif /* !defined(CONFIG_USER_ONLY) */ |
|
2953 |
|
2954 /* physical memory access (slow version, mainly for debug) */ |
|
2955 #if defined(CONFIG_USER_ONLY) |
|
2956 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, |
|
2957 int len, int is_write) |
|
2958 { |
|
2959 int l, flags; |
|
2960 target_ulong page; |
|
2961 void * p; |
|
2962 |
|
2963 while (len > 0) { |
|
2964 page = addr & TARGET_PAGE_MASK; |
|
2965 l = (page + TARGET_PAGE_SIZE) - addr; |
|
2966 if (l > len) |
|
2967 l = len; |
|
2968 flags = page_get_flags(page); |
|
2969 if (!(flags & PAGE_VALID)) |
|
2970 return; |
|
2971 if (is_write) { |
|
2972 if (!(flags & PAGE_WRITE)) |
|
2973 return; |
|
2974 /* XXX: this code should not depend on lock_user */ |
|
2975 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0))) |
|
2976 /* FIXME - should this return an error rather than just fail? */ |
|
2977 return; |
|
2978 memcpy(p, buf, l); |
|
2979 unlock_user(p, addr, l); |
|
2980 } else { |
|
2981 if (!(flags & PAGE_READ)) |
|
2982 return; |
|
2983 /* XXX: this code should not depend on lock_user */ |
|
2984 if (!(p = lock_user(VERIFY_READ, addr, l, 1))) |
|
2985 /* FIXME - should this return an error rather than just fail? */ |
|
2986 return; |
|
2987 memcpy(buf, p, l); |
|
2988 unlock_user(p, addr, 0); |
|
2989 } |
|
2990 len -= l; |
|
2991 buf += l; |
|
2992 addr += l; |
|
2993 } |
|
2994 } |
|
2995 |
|
2996 #else |
|
2997 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, |
|
2998 int len, int is_write) |
|
2999 { |
|
3000 int l, io_index; |
|
3001 uint8_t *ptr; |
|
3002 uint32_t val; |
|
3003 target_phys_addr_t page; |
|
3004 unsigned long pd; |
|
3005 PhysPageDesc *p; |
|
3006 |
|
3007 while (len > 0) { |
|
3008 page = addr & TARGET_PAGE_MASK; |
|
3009 l = (page + TARGET_PAGE_SIZE) - addr; |
|
3010 if (l > len) |
|
3011 l = len; |
|
3012 p = phys_page_find(page >> TARGET_PAGE_BITS); |
|
3013 if (!p) { |
|
3014 pd = IO_MEM_UNASSIGNED; |
|
3015 } else { |
|
3016 pd = p->phys_offset; |
|
3017 } |
|
3018 |
|
3019 if (is_write) { |
|
3020 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { |
|
3021 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); |
|
3022 if (p) |
|
3023 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; |
|
3024 /* XXX: could force cpu_single_env to NULL to avoid |
|
3025 potential bugs */ |
|
3026 if (l >= 4 && ((addr & 3) == 0)) { |
|
3027 /* 32 bit write access */ |
|
3028 val = ldl_p(buf); |
|
3029 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); |
|
3030 l = 4; |
|
3031 } else if (l >= 2 && ((addr & 1) == 0)) { |
|
3032 /* 16 bit write access */ |
|
3033 val = lduw_p(buf); |
|
3034 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val); |
|
3035 l = 2; |
|
3036 } else { |
|
3037 /* 8 bit write access */ |
|
3038 val = ldub_p(buf); |
|
3039 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val); |
|
3040 l = 1; |
|
3041 } |
|
3042 } else { |
|
3043 unsigned long addr1; |
|
3044 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); |
|
3045 /* RAM case */ |
|
3046 ptr = host_ram_addr(addr1); |
|
3047 memcpy(ptr, buf, l); |
|
3048 if (!cpu_physical_memory_is_dirty(addr1)) { |
|
3049 /* invalidate code */ |
|
3050 tb_invalidate_phys_page_range(addr1, addr1 + l, 0); |
|
3051 /* set dirty bit */ |
|
3052 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= |
|
3053 (0xff & ~CODE_DIRTY_FLAG); |
|
3054 } |
|
3055 } |
|
3056 } else { |
|
3057 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && |
|
3058 !(pd & IO_MEM_ROMD)) { |
|
3059 /* I/O case */ |
|
3060 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); |
|
3061 if (p) |
|
3062 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; |
|
3063 if (l >= 4 && ((addr & 3) == 0)) { |
|
3064 /* 32 bit read access */ |
|
3065 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr); |
|
3066 stl_p(buf, val); |
|
3067 l = 4; |
|
3068 } else if (l >= 2 && ((addr & 1) == 0)) { |
|
3069 /* 16 bit read access */ |
|
3070 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr); |
|
3071 stw_p(buf, val); |
|
3072 l = 2; |
|
3073 } else { |
|
3074 /* 8 bit read access */ |
|
3075 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr); |
|
3076 stb_p(buf, val); |
|
3077 l = 1; |
|
3078 } |
|
3079 } else { |
|
3080 /* RAM case */ |
|
3081 ptr = host_ram_addr((pd & TARGET_PAGE_MASK) |
|
3082 + (addr & ~TARGET_PAGE_MASK)); |
|
3083 memcpy(buf, ptr, l); |
|
3084 } |
|
3085 } |
|
3086 len -= l; |
|
3087 buf += l; |
|
3088 addr += l; |
|
3089 } |
|
3090 } |
|
3091 |
|
3092 /* used for ROM loading : can write in RAM and ROM */ |
|
3093 void cpu_physical_memory_write_rom(target_phys_addr_t addr, |
|
3094 const uint8_t *buf, int len) |
|
3095 { |
|
3096 int l; |
|
3097 uint8_t *ptr; |
|
3098 target_phys_addr_t page; |
|
3099 unsigned long pd; |
|
3100 PhysPageDesc *p; |
|
3101 |
|
3102 while (len > 0) { |
|
3103 page = addr & TARGET_PAGE_MASK; |
|
3104 l = (page + TARGET_PAGE_SIZE) - addr; |
|
3105 if (l > len) |
|
3106 l = len; |
|
3107 p = phys_page_find(page >> TARGET_PAGE_BITS); |
|
3108 if (!p) { |
|
3109 pd = IO_MEM_UNASSIGNED; |
|
3110 } else { |
|
3111 pd = p->phys_offset; |
|
3112 } |
|
3113 |
|
3114 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM && |
|
3115 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM && |
|
3116 !(pd & IO_MEM_ROMD)) { |
|
3117 /* do nothing */ |
|
3118 } else { |
|
3119 unsigned long addr1; |
|
3120 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); |
|
3121 /* ROM/RAM case */ |
|
3122 ptr = host_ram_addr(addr1); |
|
3123 memcpy(ptr, buf, l); |
|
3124 } |
|
3125 len -= l; |
|
3126 buf += l; |
|
3127 addr += l; |
|
3128 } |
|
3129 } |
|
3130 |
|
3131 |
|
3132 /* warning: addr must be aligned */ |
|
3133 uint32_t ldl_phys(target_phys_addr_t addr) |
|
3134 { |
|
3135 int io_index; |
|
3136 uint8_t *ptr; |
|
3137 uint32_t val; |
|
3138 unsigned long pd; |
|
3139 PhysPageDesc *p; |
|
3140 |
|
3141 p = phys_page_find(addr >> TARGET_PAGE_BITS); |
|
3142 if (!p) { |
|
3143 pd = IO_MEM_UNASSIGNED; |
|
3144 } else { |
|
3145 pd = p->phys_offset; |
|
3146 } |
|
3147 |
|
3148 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && |
|
3149 !(pd & IO_MEM_ROMD)) { |
|
3150 /* I/O case */ |
|
3151 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); |
|
3152 if (p) |
|
3153 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; |
|
3154 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr); |
|
3155 } else { |
|
3156 /* RAM case */ |
|
3157 ptr = host_ram_addr((pd & TARGET_PAGE_MASK) + |
|
3158 (addr & ~TARGET_PAGE_MASK)); |
|
3159 val = ldl_p(ptr); |
|
3160 } |
|
3161 return val; |
|
3162 } |
|
3163 |
|
3164 /* warning: addr must be aligned */ |
|
3165 uint64_t ldq_phys(target_phys_addr_t addr) |
|
3166 { |
|
3167 int io_index; |
|
3168 uint8_t *ptr; |
|
3169 uint64_t val; |
|
3170 unsigned long pd; |
|
3171 PhysPageDesc *p; |
|
3172 |
|
3173 p = phys_page_find(addr >> TARGET_PAGE_BITS); |
|
3174 if (!p) { |
|
3175 pd = IO_MEM_UNASSIGNED; |
|
3176 } else { |
|
3177 pd = p->phys_offset; |
|
3178 } |
|
3179 |
|
3180 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && |
|
3181 !(pd & IO_MEM_ROMD)) { |
|
3182 /* I/O case */ |
|
3183 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); |
|
3184 if (p) |
|
3185 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; |
|
3186 #ifdef TARGET_WORDS_BIGENDIAN |
|
3187 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32; |
|
3188 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4); |
|
3189 #else |
|
3190 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr); |
|
3191 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32; |
|
3192 #endif |
|
3193 } else { |
|
3194 /* RAM case */ |
|
3195 ptr = host_ram_addr((pd & TARGET_PAGE_MASK) + |
|
3196 (addr & ~TARGET_PAGE_MASK)); |
|
3197 val = ldq_p(ptr); |
|
3198 } |
|
3199 return val; |
|
3200 } |
|
3201 |
|
3202 /* XXX: optimize */ |
|
3203 uint32_t ldub_phys(target_phys_addr_t addr) |
|
3204 { |
|
3205 uint8_t val; |
|
3206 cpu_physical_memory_read(addr, &val, 1); |
|
3207 return val; |
|
3208 } |
|
3209 |
|
3210 /* XXX: optimize */ |
|
3211 uint32_t lduw_phys(target_phys_addr_t addr) |
|
3212 { |
|
3213 uint16_t val; |
|
3214 cpu_physical_memory_read(addr, (uint8_t *)&val, 2); |
|
3215 return tswap16(val); |
|
3216 } |
|
3217 |
|
3218 /* warning: addr must be aligned. The ram page is not masked as dirty |
|
3219 and the code inside is not invalidated. It is useful if the dirty |
|
3220 bits are used to track modified PTEs */ |
|
3221 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val) |
|
3222 { |
|
3223 int io_index; |
|
3224 uint8_t *ptr; |
|
3225 unsigned long pd; |
|
3226 PhysPageDesc *p; |
|
3227 |
|
3228 p = phys_page_find(addr >> TARGET_PAGE_BITS); |
|
3229 if (!p) { |
|
3230 pd = IO_MEM_UNASSIGNED; |
|
3231 } else { |
|
3232 pd = p->phys_offset; |
|
3233 } |
|
3234 |
|
3235 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { |
|
3236 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); |
|
3237 if (p) |
|
3238 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; |
|
3239 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); |
|
3240 } else { |
|
3241 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); |
|
3242 ptr = host_ram_addr(addr1); |
|
3243 stl_p(ptr, val); |
|
3244 |
|
3245 if (unlikely(in_migration)) { |
|
3246 if (!cpu_physical_memory_is_dirty(addr1)) { |
|
3247 /* invalidate code */ |
|
3248 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0); |
|
3249 /* set dirty bit */ |
|
3250 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= |
|
3251 (0xff & ~CODE_DIRTY_FLAG); |
|
3252 } |
|
3253 } |
|
3254 } |
|
3255 } |
|
3256 |
|
3257 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val) |
|
3258 { |
|
3259 int io_index; |
|
3260 uint8_t *ptr; |
|
3261 unsigned long pd; |
|
3262 PhysPageDesc *p; |
|
3263 |
|
3264 p = phys_page_find(addr >> TARGET_PAGE_BITS); |
|
3265 if (!p) { |
|
3266 pd = IO_MEM_UNASSIGNED; |
|
3267 } else { |
|
3268 pd = p->phys_offset; |
|
3269 } |
|
3270 |
|
3271 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { |
|
3272 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); |
|
3273 if (p) |
|
3274 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; |
|
3275 #ifdef TARGET_WORDS_BIGENDIAN |
|
3276 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32); |
|
3277 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val); |
|
3278 #else |
|
3279 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); |
|
3280 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32); |
|
3281 #endif |
|
3282 } else { |
|
3283 ptr = host_ram_addr((pd & TARGET_PAGE_MASK) + |
|
3284 (addr & ~TARGET_PAGE_MASK)); |
|
3285 stq_p(ptr, val); |
|
3286 } |
|
3287 } |
|
3288 |
|
3289 /* warning: addr must be aligned */ |
|
3290 void stl_phys(target_phys_addr_t addr, uint32_t val) |
|
3291 { |
|
3292 int io_index; |
|
3293 uint8_t *ptr; |
|
3294 unsigned long pd; |
|
3295 PhysPageDesc *p; |
|
3296 |
|
3297 p = phys_page_find(addr >> TARGET_PAGE_BITS); |
|
3298 if (!p) { |
|
3299 pd = IO_MEM_UNASSIGNED; |
|
3300 } else { |
|
3301 pd = p->phys_offset; |
|
3302 } |
|
3303 |
|
3304 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { |
|
3305 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); |
|
3306 if (p) |
|
3307 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; |
|
3308 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); |
|
3309 } else { |
|
3310 unsigned long addr1; |
|
3311 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); |
|
3312 /* RAM case */ |
|
3313 ptr = host_ram_addr(addr1); |
|
3314 stl_p(ptr, val); |
|
3315 if (!cpu_physical_memory_is_dirty(addr1)) { |
|
3316 /* invalidate code */ |
|
3317 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0); |
|
3318 /* set dirty bit */ |
|
3319 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= |
|
3320 (0xff & ~CODE_DIRTY_FLAG); |
|
3321 } |
|
3322 } |
|
3323 } |
|
3324 |
|
3325 /* XXX: optimize */ |
|
3326 void stb_phys(target_phys_addr_t addr, uint32_t val) |
|
3327 { |
|
3328 uint8_t v = val; |
|
3329 cpu_physical_memory_write(addr, &v, 1); |
|
3330 } |
|
3331 |
|
3332 /* XXX: optimize */ |
|
3333 void stw_phys(target_phys_addr_t addr, uint32_t val) |
|
3334 { |
|
3335 uint16_t v = tswap16(val); |
|
3336 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2); |
|
3337 } |
|
3338 |
|
3339 /* XXX: optimize */ |
|
3340 void stq_phys(target_phys_addr_t addr, uint64_t val) |
|
3341 { |
|
3342 val = tswap64(val); |
|
3343 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8); |
|
3344 } |
|
3345 |
|
3346 #endif |
|
3347 |
|
3348 /* virtual memory access for debug */ |
|
3349 int cpu_memory_rw_debug(CPUState *env, target_ulong addr, |
|
3350 uint8_t *buf, int len, int is_write) |
|
3351 { |
|
3352 int l; |
|
3353 target_phys_addr_t phys_addr; |
|
3354 target_ulong page; |
|
3355 |
|
3356 while (len > 0) { |
|
3357 page = addr & TARGET_PAGE_MASK; |
|
3358 phys_addr = cpu_get_phys_page_debug(env, page); |
|
3359 /* if no physical page mapped, return an error */ |
|
3360 if (phys_addr == -1) |
|
3361 return -1; |
|
3362 l = (page + TARGET_PAGE_SIZE) - addr; |
|
3363 if (l > len) |
|
3364 l = len; |
|
3365 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK), |
|
3366 buf, l, is_write); |
|
3367 len -= l; |
|
3368 buf += l; |
|
3369 addr += l; |
|
3370 } |
|
3371 return 0; |
|
3372 } |
|
3373 |
|
3374 /* in deterministic execution mode, instructions doing device I/Os |
|
3375 must be at the end of the TB */ |
|
3376 void cpu_io_recompile(CPUState *env, void *retaddr) |
|
3377 { |
|
3378 TranslationBlock *tb; |
|
3379 uint32_t n, cflags; |
|
3380 target_ulong pc, cs_base; |
|
3381 uint64_t flags; |
|
3382 |
|
3383 tb = tb_find_pc((unsigned long)retaddr); |
|
3384 if (!tb) { |
|
3385 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", |
|
3386 retaddr); |
|
3387 } |
|
3388 n = env->icount_decr.u16.low + tb->icount; |
|
3389 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL); |
|
3390 /* Calculate how many instructions had been executed before the fault |
|
3391 occurred. */ |
|
3392 n = n - env->icount_decr.u16.low; |
|
3393 /* Generate a new TB ending on the I/O insn. */ |
|
3394 n++; |
|
3395 /* On MIPS and SH, delay slot instructions can only be restarted if |
|
3396 they were already the first instruction in the TB. If this is not |
|
3397 the first instruction in a TB then re-execute the preceding |
|
3398 branch. */ |
|
3399 #if defined(TARGET_MIPS) |
|
3400 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) { |
|
3401 env->active_tc.PC -= 4; |
|
3402 env->icount_decr.u16.low++; |
|
3403 env->hflags &= ~MIPS_HFLAG_BMASK; |
|
3404 } |
|
3405 #elif defined(TARGET_SH4) |
|
3406 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0 |
|
3407 && n > 1) { |
|
3408 env->pc -= 2; |
|
3409 env->icount_decr.u16.low++; |
|
3410 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL); |
|
3411 } |
|
3412 #endif |
|
3413 /* This should never happen. */ |
|
3414 if (n > CF_COUNT_MASK) |
|
3415 cpu_abort(env, "TB too big during recompile"); |
|
3416 |
|
3417 cflags = n | CF_LAST_IO; |
|
3418 pc = tb->pc; |
|
3419 cs_base = tb->cs_base; |
|
3420 flags = tb->flags; |
|
3421 tb_phys_invalidate(tb, -1); |
|
3422 /* FIXME: In theory this could raise an exception. In practice |
|
3423 we have already translated the block once so it's probably ok. */ |
|
3424 tb_gen_code(env, pc, cs_base, flags, cflags); |
|
3425 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not |
|
3426 the first in the TB) then we end up generating a whole new TB and |
|
3427 repeating the fault, which is horribly inefficient. |
|
3428 Better would be to execute just this insn uncached, or generate a |
|
3429 second new TB. */ |
|
3430 cpu_resume_from_signal(env, NULL); |
|
3431 } |
|
3432 |
|
3433 void dump_exec_info(FILE *f, |
|
3434 int (*cpu_fprintf)(FILE *f, const char *fmt, ...)) |
|
3435 { |
|
3436 int i, target_code_size, max_target_code_size; |
|
3437 int direct_jmp_count, direct_jmp2_count, cross_page; |
|
3438 TranslationBlock *tb; |
|
3439 |
|
3440 target_code_size = 0; |
|
3441 max_target_code_size = 0; |
|
3442 cross_page = 0; |
|
3443 direct_jmp_count = 0; |
|
3444 direct_jmp2_count = 0; |
|
3445 for(i = 0; i < nb_tbs; i++) { |
|
3446 tb = &tbs[i]; |
|
3447 target_code_size += tb->size; |
|
3448 if (tb->size > max_target_code_size) |
|
3449 max_target_code_size = tb->size; |
|
3450 if (tb->page_addr[1] != -1) |
|
3451 cross_page++; |
|
3452 if (tb->tb_next_offset[0] != 0xffff) { |
|
3453 direct_jmp_count++; |
|
3454 if (tb->tb_next_offset[1] != 0xffff) { |
|
3455 direct_jmp2_count++; |
|
3456 } |
|
3457 } |
|
3458 } |
|
3459 /* XXX: avoid using doubles ? */ |
|
3460 cpu_fprintf(f, "Translation buffer state:\n"); |
|
3461 cpu_fprintf(f, "gen code size %ld/%ld\n", |
|
3462 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size); |
|
3463 cpu_fprintf(f, "TB count %d/%d\n", |
|
3464 nb_tbs, code_gen_max_blocks); |
|
3465 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n", |
|
3466 nb_tbs ? target_code_size / nb_tbs : 0, |
|
3467 max_target_code_size); |
|
3468 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n", |
|
3469 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0, |
|
3470 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0); |
|
3471 cpu_fprintf(f, "cross page TB count %d (%d%%)\n", |
|
3472 cross_page, |
|
3473 nb_tbs ? (cross_page * 100) / nb_tbs : 0); |
|
3474 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n", |
|
3475 direct_jmp_count, |
|
3476 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0, |
|
3477 direct_jmp2_count, |
|
3478 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0); |
|
3479 cpu_fprintf(f, "\nStatistics:\n"); |
|
3480 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count); |
|
3481 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count); |
|
3482 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count); |
|
3483 tcg_dump_info(f, cpu_fprintf); |
|
3484 } |
|
3485 |
|
3486 #if !defined(CONFIG_USER_ONLY) |
|
3487 |
|
3488 #define MMUSUFFIX _cmmu |
|
3489 #define GETPC() NULL |
|
3490 #define env cpu_single_env |
|
3491 #define SOFTMMU_CODE_ACCESS |
|
3492 |
|
3493 #define SHIFT 0 |
|
3494 #include "softmmu_template.h" |
|
3495 |
|
3496 #define SHIFT 1 |
|
3497 #include "softmmu_template.h" |
|
3498 |
|
3499 #define SHIFT 2 |
|
3500 #include "softmmu_template.h" |
|
3501 |
|
3502 #define SHIFT 3 |
|
3503 #include "softmmu_template.h" |
|
3504 |
|
3505 #undef env |
|
3506 |
|
3507 #endif |