symbian-qemu-0.9.1-12/qemu-symbian-svp/exec-all.h
changeset 1 2fb8b9db1c86
equal deleted inserted replaced
0:ffa851df0825 1:2fb8b9db1c86
       
     1 /*
       
     2  * internal execution defines for qemu
       
     3  *
       
     4  *  Copyright (c) 2003 Fabrice Bellard
       
     5  *
       
     6  * This library is free software; you can redistribute it and/or
       
     7  * modify it under the terms of the GNU Lesser General Public
       
     8  * License as published by the Free Software Foundation; either
       
     9  * version 2 of the License, or (at your option) any later version.
       
    10  *
       
    11  * This library is distributed in the hope that it will be useful,
       
    12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
       
    13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
       
    14  * Lesser General Public License for more details.
       
    15  *
       
    16  * You should have received a copy of the GNU Lesser General Public
       
    17  * License along with this library; if not, write to the Free Software
       
    18  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
       
    19  */
       
    20 
       
    21 #ifndef _EXEC_ALL_H_
       
    22 #define _EXEC_ALL_H_
       
    23 /* allow to see translation results - the slowdown should be negligible, so we leave it */
       
    24 #define DEBUG_DISAS
       
    25 
       
    26 /* is_jmp field values */
       
    27 #define DISAS_NEXT    0 /* next instruction can be analyzed */
       
    28 #define DISAS_JUMP    1 /* only pc was modified dynamically */
       
    29 #define DISAS_UPDATE  2 /* cpu state was modified dynamically */
       
    30 #define DISAS_TB_JUMP 3 /* only pc was modified statically */
       
    31 
       
    32 typedef struct TranslationBlock TranslationBlock;
       
    33 
       
    34 /* XXX: make safe guess about sizes */
       
    35 /* Most instructions only generate a few ops.  However load multiple
       
    36    instructions (inparticular the NEON array load instructions) expand to a
       
    37    very large number of ops.  */
       
    38 #define MAX_OP_PER_INSTR 200
       
    39 /* A Call op needs up to 6 + 2N parameters (N = number of arguments).  */
       
    40 #define MAX_OPC_PARAM 10
       
    41 #define OPC_BUF_SIZE 512
       
    42 #define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
       
    43 
       
    44 /* Maximum size a TCG op can expand to.  This is complicated because a
       
    45    single op may require several host instructions and regirster reloads.
       
    46    For now take a wild guess at 128 bytes, which should allow at least
       
    47    a couple of fixup instructions per argument.  */
       
    48 #define TCG_MAX_OP_SIZE 128
       
    49 
       
    50 #define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM)
       
    51 
       
    52 extern target_ulong gen_opc_pc[OPC_BUF_SIZE];
       
    53 extern target_ulong gen_opc_npc[OPC_BUF_SIZE];
       
    54 extern uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
       
    55 extern uint8_t gen_opc_instr_start[OPC_BUF_SIZE];
       
    56 extern uint16_t gen_opc_icount[OPC_BUF_SIZE];
       
    57 extern target_ulong gen_opc_jump_pc[2];
       
    58 extern uint32_t gen_opc_hflags[OPC_BUF_SIZE];
       
    59 
       
    60 typedef void (GenOpFunc)(void);
       
    61 typedef void (GenOpFunc1)(long);
       
    62 typedef void (GenOpFunc2)(long, long);
       
    63 typedef void (GenOpFunc3)(long, long, long);
       
    64 
       
    65 #include "qemu-log.h"
       
    66 
       
    67 void gen_intermediate_code(CPUState *env, struct TranslationBlock *tb);
       
    68 void gen_intermediate_code_pc(CPUState *env, struct TranslationBlock *tb);
       
    69 void gen_pc_load(CPUState *env, struct TranslationBlock *tb,
       
    70                  unsigned long searched_pc, int pc_pos, void *puc);
       
    71 
       
    72 unsigned long code_gen_max_block_size(void);
       
    73 void cpu_gen_init(void);
       
    74 int cpu_gen_code(CPUState *env, struct TranslationBlock *tb,
       
    75                  int *gen_code_size_ptr);
       
    76 int cpu_restore_state(struct TranslationBlock *tb,
       
    77                       CPUState *env, unsigned long searched_pc,
       
    78                       void *puc);
       
    79 int cpu_restore_state_copy(struct TranslationBlock *tb,
       
    80                            CPUState *env, unsigned long searched_pc,
       
    81                            void *puc);
       
    82 void cpu_resume_from_signal(CPUState *env1, void *puc);
       
    83 void cpu_io_recompile(CPUState *env, void *retaddr);
       
    84 TranslationBlock *tb_gen_code(CPUState *env, 
       
    85                               target_ulong pc, target_ulong cs_base, int flags,
       
    86                               int cflags);
       
    87 void cpu_exec_init(CPUState *env);
       
    88 void cpu_loop_exit(void);
       
    89 int page_unprotect(target_ulong address, unsigned long pc, void *puc);
       
    90 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
       
    91                                    int is_cpu_write_access);
       
    92 void tb_invalidate_page_range(target_ulong start, target_ulong end);
       
    93 void tlb_flush_page(CPUState *env, target_ulong addr);
       
    94 void tlb_flush(CPUState *env, int flush_global);
       
    95 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
       
    96                       target_phys_addr_t paddr, int prot,
       
    97                       int mmu_idx, int is_softmmu);
       
    98 static inline int tlb_set_page(CPUState *env1, target_ulong vaddr,
       
    99                                target_phys_addr_t paddr, int prot,
       
   100                                int mmu_idx, int is_softmmu)
       
   101 {
       
   102     if (prot & PAGE_READ)
       
   103         prot |= PAGE_EXEC;
       
   104     return tlb_set_page_exec(env1, vaddr, paddr, prot, mmu_idx, is_softmmu);
       
   105 }
       
   106 
       
   107 #define CODE_GEN_ALIGN           16 /* must be >= of the size of a icache line */
       
   108 
       
   109 #define CODE_GEN_PHYS_HASH_BITS     15
       
   110 #define CODE_GEN_PHYS_HASH_SIZE     (1 << CODE_GEN_PHYS_HASH_BITS)
       
   111 
       
   112 #define MIN_CODE_GEN_BUFFER_SIZE     (1024 * 1024)
       
   113 
       
   114 /* estimated block size for TB allocation */
       
   115 /* XXX: use a per code average code fragment size and modulate it
       
   116    according to the host CPU */
       
   117 #if defined(CONFIG_SOFTMMU)
       
   118 #define CODE_GEN_AVG_BLOCK_SIZE 128
       
   119 #else
       
   120 #define CODE_GEN_AVG_BLOCK_SIZE 64
       
   121 #endif
       
   122 
       
   123 #if defined(__powerpc__) || defined(__x86_64__) || defined(__arm__)
       
   124 #define USE_DIRECT_JUMP
       
   125 #endif
       
   126 #if defined(__i386__) && !defined(_WIN32)
       
   127 #define USE_DIRECT_JUMP
       
   128 #endif
       
   129 
       
   130 struct TranslationBlock {
       
   131     target_ulong pc;   /* simulated PC corresponding to this block (EIP + CS base) */
       
   132     target_ulong cs_base; /* CS base for this block */
       
   133     uint64_t flags; /* flags defining in which context the code was generated */
       
   134     uint16_t size;      /* size of target code for this block (1 <=
       
   135                            size <= TARGET_PAGE_SIZE) */
       
   136     uint16_t cflags;    /* compile flags */
       
   137 #define CF_COUNT_MASK  0x7fff
       
   138 #define CF_LAST_IO     0x8000 /* Last insn may be an IO access.  */
       
   139 
       
   140     uint8_t *tc_ptr;    /* pointer to the translated code */
       
   141     /* next matching tb for physical address. */
       
   142     struct TranslationBlock *phys_hash_next;
       
   143     /* first and second physical page containing code. The lower bit
       
   144        of the pointer tells the index in page_next[] */
       
   145     struct TranslationBlock *page_next[2];
       
   146     target_ulong page_addr[2];
       
   147 
       
   148     /* the following data are used to directly call another TB from
       
   149        the code of this one. */
       
   150     uint16_t tb_next_offset[2]; /* offset of original jump target */
       
   151 #ifdef USE_DIRECT_JUMP
       
   152     uint16_t tb_jmp_offset[4]; /* offset of jump instruction */
       
   153 #else
       
   154     unsigned long tb_next[2]; /* address of jump generated code */
       
   155 #endif
       
   156     /* list of TBs jumping to this one. This is a circular list using
       
   157        the two least significant bits of the pointers to tell what is
       
   158        the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
       
   159        jmp_first */
       
   160     struct TranslationBlock *jmp_next[2];
       
   161     struct TranslationBlock *jmp_first;
       
   162     uint32_t icount;
       
   163 };
       
   164 
       
   165 static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
       
   166 {
       
   167     target_ulong tmp;
       
   168     tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
       
   169     return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
       
   170 }
       
   171 
       
   172 static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
       
   173 {
       
   174     target_ulong tmp;
       
   175     tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
       
   176     return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
       
   177 	    | (tmp & TB_JMP_ADDR_MASK));
       
   178 }
       
   179 
       
   180 static inline unsigned int tb_phys_hash_func(unsigned long pc)
       
   181 {
       
   182     return pc & (CODE_GEN_PHYS_HASH_SIZE - 1);
       
   183 }
       
   184 
       
   185 TranslationBlock *tb_alloc(target_ulong pc);
       
   186 void tb_free(TranslationBlock *tb);
       
   187 void tb_flush(CPUState *env);
       
   188 void tb_link_phys(TranslationBlock *tb,
       
   189                   target_ulong phys_pc, target_ulong phys_page2);
       
   190 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr);
       
   191 
       
   192 extern TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
       
   193 extern uint8_t *code_gen_ptr;
       
   194 extern int code_gen_max_blocks;
       
   195 
       
   196 #if defined(USE_DIRECT_JUMP)
       
   197 
       
   198 #if defined(__powerpc__)
       
   199 extern void ppc_tb_set_jmp_target(unsigned long jmp_addr, unsigned long addr);
       
   200 #define tb_set_jmp_target1 ppc_tb_set_jmp_target
       
   201 #elif defined(__i386__) || defined(__x86_64__)
       
   202 static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
       
   203 {
       
   204     /* patch the branch destination */
       
   205     *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
       
   206     /* no need to flush icache explicitly */
       
   207 }
       
   208 #elif defined(__arm__)
       
   209 static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
       
   210 {
       
   211 #if QEMU_GNUC_PREREQ(4, 1)
       
   212     void __clear_cache(char *beg, char *end);
       
   213 #else
       
   214     register unsigned long _beg __asm ("a1");
       
   215     register unsigned long _end __asm ("a2");
       
   216     register unsigned long _flg __asm ("a3");
       
   217 #endif
       
   218 
       
   219     /* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */
       
   220     *(uint32_t *)jmp_addr |= ((addr - (jmp_addr + 8)) >> 2) & 0xffffff;
       
   221 
       
   222 #if QEMU_GNUC_PREREQ(4, 1)
       
   223     __clear_cache((char *) jmp_addr, (char *) jmp_addr + 4);
       
   224 #else
       
   225     /* flush icache */
       
   226     _beg = jmp_addr;
       
   227     _end = jmp_addr + 4;
       
   228     _flg = 0;
       
   229     __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg));
       
   230 #endif
       
   231 }
       
   232 #endif
       
   233 
       
   234 static inline void tb_set_jmp_target(TranslationBlock *tb,
       
   235                                      int n, unsigned long addr)
       
   236 {
       
   237     unsigned long offset;
       
   238 
       
   239     offset = tb->tb_jmp_offset[n];
       
   240     tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr);
       
   241     offset = tb->tb_jmp_offset[n + 2];
       
   242     if (offset != 0xffff)
       
   243         tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr);
       
   244 }
       
   245 
       
   246 #else
       
   247 
       
   248 /* set the jump target */
       
   249 static inline void tb_set_jmp_target(TranslationBlock *tb,
       
   250                                      int n, unsigned long addr)
       
   251 {
       
   252     tb->tb_next[n] = addr;
       
   253 }
       
   254 
       
   255 #endif
       
   256 
       
   257 static inline void tb_add_jump(TranslationBlock *tb, int n,
       
   258                                TranslationBlock *tb_next)
       
   259 {
       
   260     /* NOTE: this test is only needed for thread safety */
       
   261     if (!tb->jmp_next[n]) {
       
   262         /* patch the native jump address */
       
   263         tb_set_jmp_target(tb, n, (unsigned long)tb_next->tc_ptr);
       
   264 
       
   265         /* add in TB jmp circular list */
       
   266         tb->jmp_next[n] = tb_next->jmp_first;
       
   267         tb_next->jmp_first = (TranslationBlock *)((long)(tb) | (n));
       
   268     }
       
   269 }
       
   270 
       
   271 TranslationBlock *tb_find_pc(unsigned long pc_ptr);
       
   272 
       
   273 extern CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
       
   274 extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
       
   275 extern void *io_mem_opaque[IO_MEM_NB_ENTRIES];
       
   276 
       
   277 #include "qemu-lock.h"
       
   278 
       
   279 extern spinlock_t tb_lock;
       
   280 
       
   281 extern int tb_invalidated_flag;
       
   282 
       
   283 #if !defined(CONFIG_USER_ONLY)
       
   284 
       
   285 void tlb_fill(target_ulong addr, int is_write, int mmu_idx,
       
   286               void *retaddr);
       
   287 
       
   288 #include "softmmu_defs.h"
       
   289 
       
   290 #define ACCESS_TYPE (NB_MMU_MODES + 1)
       
   291 #define MEMSUFFIX _code
       
   292 #define env cpu_single_env
       
   293 
       
   294 #define DATA_SIZE 1
       
   295 #include "softmmu_header.h"
       
   296 
       
   297 #define DATA_SIZE 2
       
   298 #include "softmmu_header.h"
       
   299 
       
   300 #define DATA_SIZE 4
       
   301 #include "softmmu_header.h"
       
   302 
       
   303 #define DATA_SIZE 8
       
   304 #include "softmmu_header.h"
       
   305 
       
   306 #undef ACCESS_TYPE
       
   307 #undef MEMSUFFIX
       
   308 #undef env
       
   309 
       
   310 #endif
       
   311 
       
   312 #if defined(CONFIG_USER_ONLY)
       
   313 static inline target_ulong get_phys_addr_code(CPUState *env1, target_ulong addr)
       
   314 {
       
   315     return addr;
       
   316 }
       
   317 #else
       
   318 /* NOTE: this function can trigger an exception */
       
   319 /* NOTE2: the returned address is not exactly the physical address: it
       
   320    is the offset relative to phys_ram_base */
       
   321 static inline target_ulong get_phys_addr_code(CPUState *env1, target_ulong addr)
       
   322 {
       
   323     int mmu_idx, page_index, pd;
       
   324 
       
   325     page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
       
   326     mmu_idx = cpu_mmu_index(env1);
       
   327     if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
       
   328                  (addr & TARGET_PAGE_MASK))) {
       
   329         ldub_code(addr);
       
   330     }
       
   331     pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
       
   332     if (pd > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
       
   333 #if defined(TARGET_SPARC) || defined(TARGET_MIPS)
       
   334         do_unassigned_access(addr, 0, 1, 0, 4);
       
   335 #else
       
   336         cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
       
   337 #endif
       
   338     }
       
   339     return ram_offset_from_host((uint8_t *)(addr + env1->tlb_table[mmu_idx][page_index].addend));
       
   340 }
       
   341 
       
   342 /* Deterministic execution requires that IO only be performed on the last
       
   343    instruction of a TB so that interrupts take effect immediately.  */
       
   344 static inline int can_do_io(CPUState *env)
       
   345 {
       
   346     if (!use_icount)
       
   347         return 1;
       
   348 
       
   349     /* If not executing code then assume we are ok.  */
       
   350     if (!env->current_tb)
       
   351         return 1;
       
   352 
       
   353     return env->can_do_io != 0;
       
   354 }
       
   355 #endif
       
   356 
       
   357 #ifdef USE_KQEMU
       
   358 #define KQEMU_MODIFY_PAGE_MASK (0xff & ~(VGA_DIRTY_FLAG | CODE_DIRTY_FLAG))
       
   359 
       
   360 #define MSR_QPI_COMMBASE 0xfabe0010
       
   361 
       
   362 int kqemu_init(CPUState *env);
       
   363 int kqemu_cpu_exec(CPUState *env);
       
   364 void kqemu_flush_page(CPUState *env, target_ulong addr);
       
   365 void kqemu_flush(CPUState *env, int global);
       
   366 void kqemu_set_notdirty(CPUState *env, ram_addr_t ram_addr);
       
   367 void kqemu_modify_page(CPUState *env, ram_addr_t ram_addr);
       
   368 void kqemu_set_phys_mem(uint64_t start_addr, ram_addr_t size, 
       
   369                         ram_addr_t phys_offset);
       
   370 void kqemu_cpu_interrupt(CPUState *env);
       
   371 void kqemu_record_dump(void);
       
   372 
       
   373 extern uint32_t kqemu_comm_base;
       
   374 
       
   375 static inline int kqemu_is_ok(CPUState *env)
       
   376 {
       
   377     return(env->kqemu_enabled &&
       
   378            (env->cr[0] & CR0_PE_MASK) &&
       
   379            !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
       
   380            (env->eflags & IF_MASK) &&
       
   381            !(env->eflags & VM_MASK) &&
       
   382            (env->kqemu_enabled == 2 ||
       
   383             ((env->hflags & HF_CPL_MASK) == 3 &&
       
   384              (env->eflags & IOPL_MASK) != IOPL_MASK)));
       
   385 }
       
   386 
       
   387 #endif
       
   388 
       
   389 typedef void (CPUDebugExcpHandler)(CPUState *env);
       
   390 
       
   391 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler);
       
   392 #endif