symbian-qemu-0.9.1-12/qemu-symbian-svp/cpu-exec.c
changeset 1 2fb8b9db1c86
equal deleted inserted replaced
0:ffa851df0825 1:2fb8b9db1c86
       
     1 /*
       
     2  *  i386 emulator main execution loop
       
     3  *
       
     4  *  Copyright (c) 2003-2005 Fabrice Bellard
       
     5  *
       
     6  * This library is free software; you can redistribute it and/or
       
     7  * modify it under the terms of the GNU Lesser General Public
       
     8  * License as published by the Free Software Foundation; either
       
     9  * version 2 of the License, or (at your option) any later version.
       
    10  *
       
    11  * This library is distributed in the hope that it will be useful,
       
    12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
       
    13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
       
    14  * Lesser General Public License for more details.
       
    15  *
       
    16  * You should have received a copy of the GNU Lesser General Public
       
    17  * License along with this library; if not, write to the Free Software
       
    18  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
       
    19  */
       
    20 #include "config.h"
       
    21 #define CPU_NO_GLOBAL_REGS
       
    22 #include "exec.h"
       
    23 #include "disas.h"
       
    24 #include "tcg.h"
       
    25 #include "kvm.h"
       
    26 
       
    27 #if !defined(CONFIG_SOFTMMU)
       
    28 #undef EAX
       
    29 #undef ECX
       
    30 #undef EDX
       
    31 #undef EBX
       
    32 #undef ESP
       
    33 #undef EBP
       
    34 #undef ESI
       
    35 #undef EDI
       
    36 #undef EIP
       
    37 #include <signal.h>
       
    38 #ifdef __linux__
       
    39 #include <sys/ucontext.h>
       
    40 #endif
       
    41 #endif
       
    42 
       
    43 #if defined(__sparc__) && !defined(HOST_SOLARIS)
       
    44 // Work around ugly bugs in glibc that mangle global register contents
       
    45 #undef env
       
    46 #define env cpu_single_env
       
    47 #endif
       
    48 
       
    49 int tb_invalidated_flag;
       
    50 
       
    51 //#define DEBUG_EXEC
       
    52 //#define DEBUG_SIGNAL
       
    53 
       
    54 void cpu_loop_exit(void)
       
    55 {
       
    56     /* NOTE: the register at this point must be saved by hand because
       
    57        longjmp restore them */
       
    58     regs_to_env();
       
    59     longjmp(env->jmp_env, 1);
       
    60 }
       
    61 
       
    62 /* exit the current TB from a signal handler. The host registers are
       
    63    restored in a state compatible with the CPU emulator
       
    64  */
       
    65 void cpu_resume_from_signal(CPUState *env1, void *puc)
       
    66 {
       
    67 #if !defined(CONFIG_SOFTMMU)
       
    68 #ifdef __linux__
       
    69     struct ucontext *uc = puc;
       
    70 #elif defined(__OpenBSD__)
       
    71     struct sigcontext *uc = puc;
       
    72 #endif
       
    73 #endif
       
    74 
       
    75     env = env1;
       
    76 
       
    77     /* XXX: restore cpu registers saved in host registers */
       
    78 
       
    79 #if !defined(CONFIG_SOFTMMU)
       
    80     if (puc) {
       
    81         /* XXX: use siglongjmp ? */
       
    82 #ifdef __linux__
       
    83         sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
       
    84 #elif defined(__OpenBSD__)
       
    85         sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
       
    86 #endif
       
    87     }
       
    88 #endif
       
    89     env->exception_index = -1;
       
    90     longjmp(env->jmp_env, 1);
       
    91 }
       
    92 
       
    93 /* Execute the code without caching the generated code. An interpreter
       
    94    could be used if available. */
       
    95 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
       
    96 {
       
    97     unsigned long next_tb;
       
    98     TranslationBlock *tb;
       
    99 
       
   100     /* Should never happen.
       
   101        We only end up here when an existing TB is too long.  */
       
   102     if (max_cycles > CF_COUNT_MASK)
       
   103         max_cycles = CF_COUNT_MASK;
       
   104 
       
   105     tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
       
   106                      max_cycles);
       
   107     env->current_tb = tb;
       
   108     /* execute the generated code */
       
   109     next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
       
   110 
       
   111     if ((next_tb & 3) == 2) {
       
   112         /* Restore PC.  This may happen if async event occurs before
       
   113            the TB starts executing.  */
       
   114         cpu_pc_from_tb(env, tb);
       
   115     }
       
   116     tb_phys_invalidate(tb, -1);
       
   117     tb_free(tb);
       
   118 }
       
   119 
       
   120 static TranslationBlock *tb_find_slow(target_ulong pc,
       
   121                                       target_ulong cs_base,
       
   122                                       uint64_t flags)
       
   123 {
       
   124     TranslationBlock *tb, **ptb1;
       
   125     unsigned int h;
       
   126     target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
       
   127 
       
   128     tb_invalidated_flag = 0;
       
   129 
       
   130     regs_to_env(); /* XXX: do it just before cpu_gen_code() */
       
   131 
       
   132     /* find translated block using physical mappings */
       
   133     phys_pc = get_phys_addr_code(env, pc);
       
   134     phys_page1 = phys_pc & TARGET_PAGE_MASK;
       
   135     phys_page2 = -1;
       
   136     h = tb_phys_hash_func(phys_pc);
       
   137     ptb1 = &tb_phys_hash[h];
       
   138     for(;;) {
       
   139         tb = *ptb1;
       
   140         if (!tb)
       
   141             goto not_found;
       
   142         if (tb->pc == pc &&
       
   143             tb->page_addr[0] == phys_page1 &&
       
   144             tb->cs_base == cs_base &&
       
   145             tb->flags == flags) {
       
   146             /* check next page if needed */
       
   147             if (tb->page_addr[1] != -1) {
       
   148                 virt_page2 = (pc & TARGET_PAGE_MASK) +
       
   149                     TARGET_PAGE_SIZE;
       
   150                 phys_page2 = get_phys_addr_code(env, virt_page2);
       
   151                 if (tb->page_addr[1] == phys_page2)
       
   152                     goto found;
       
   153             } else {
       
   154                 goto found;
       
   155             }
       
   156         }
       
   157         ptb1 = &tb->phys_hash_next;
       
   158     }
       
   159  not_found:
       
   160    /* if no translated code available, then translate it now */
       
   161     tb = tb_gen_code(env, pc, cs_base, flags, 0);
       
   162 
       
   163  found:
       
   164     /* we add the TB in the virtual pc hash table */
       
   165     env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
       
   166     return tb;
       
   167 }
       
   168 
       
   169 static inline TranslationBlock *tb_find_fast(void)
       
   170 {
       
   171     TranslationBlock *tb;
       
   172     target_ulong cs_base, pc;
       
   173     int flags;
       
   174 
       
   175     /* we record a subset of the CPU state. It will
       
   176        always be the same before a given translated block
       
   177        is executed. */
       
   178     cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
       
   179     tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
       
   180     if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
       
   181                  tb->flags != flags)) {
       
   182         tb = tb_find_slow(pc, cs_base, flags);
       
   183     }
       
   184     return tb;
       
   185 }
       
   186 
       
   187 static CPUDebugExcpHandler *debug_excp_handler;
       
   188 
       
   189 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
       
   190 {
       
   191     CPUDebugExcpHandler *old_handler = debug_excp_handler;
       
   192 
       
   193     debug_excp_handler = handler;
       
   194     return old_handler;
       
   195 }
       
   196 
       
   197 static void cpu_handle_debug_exception(CPUState *env)
       
   198 {
       
   199     CPUWatchpoint *wp;
       
   200 
       
   201     if (!env->watchpoint_hit)
       
   202         TAILQ_FOREACH(wp, &env->watchpoints, entry)
       
   203             wp->flags &= ~BP_WATCHPOINT_HIT;
       
   204 
       
   205     if (debug_excp_handler)
       
   206         debug_excp_handler(env);
       
   207 }
       
   208 
       
   209 /* main execution loop */
       
   210 
       
   211 int cpu_exec(CPUState *env1)
       
   212 {
       
   213 #define DECLARE_HOST_REGS 1
       
   214 #include "hostregs_helper.h"
       
   215     int ret, interrupt_request;
       
   216     TranslationBlock *tb;
       
   217     uint8_t *tc_ptr;
       
   218     unsigned long next_tb;
       
   219 
       
   220     if (cpu_halted(env1) == EXCP_HALTED)
       
   221         return EXCP_HALTED;
       
   222 
       
   223     cpu_single_env = env1;
       
   224 
       
   225     /* first we save global registers */
       
   226 #define SAVE_HOST_REGS 1
       
   227 #include "hostregs_helper.h"
       
   228     env = env1;
       
   229 
       
   230     env_to_regs();
       
   231 #if defined(TARGET_I386)
       
   232     /* put eflags in CPU temporary format */
       
   233     CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
       
   234     DF = 1 - (2 * ((env->eflags >> 10) & 1));
       
   235     CC_OP = CC_OP_EFLAGS;
       
   236     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
       
   237 #elif defined(TARGET_SPARC)
       
   238 #elif defined(TARGET_M68K)
       
   239     env->cc_op = CC_OP_FLAGS;
       
   240     env->cc_dest = env->sr & 0xf;
       
   241     env->cc_x = (env->sr >> 4) & 1;
       
   242 #elif defined(TARGET_ALPHA)
       
   243 #elif defined(TARGET_ARM)
       
   244 #elif defined(TARGET_PPC)
       
   245 #elif defined(TARGET_MIPS)
       
   246 #elif defined(TARGET_SH4)
       
   247 #elif defined(TARGET_CRIS)
       
   248     /* XXXXX */
       
   249 #else
       
   250 #error unsupported target CPU
       
   251 #endif
       
   252     env->exception_index = -1;
       
   253 
       
   254     /* prepare setjmp context for exception handling */
       
   255     for(;;) {
       
   256         if (setjmp(env->jmp_env) == 0) {
       
   257             env->current_tb = NULL;
       
   258             /* if an exception is pending, we execute it here */
       
   259             if (env->exception_index >= 0) {
       
   260                 if (env->exception_index >= EXCP_INTERRUPT) {
       
   261                     /* exit request from the cpu execution loop */
       
   262                     ret = env->exception_index;
       
   263                     if (ret == EXCP_DEBUG)
       
   264                         cpu_handle_debug_exception(env);
       
   265                     break;
       
   266                 } else if (env->user_mode_only) {
       
   267                     /* if user mode only, we simulate a fake exception
       
   268                        which will be handled outside the cpu execution
       
   269                        loop */
       
   270 #if defined(TARGET_I386)
       
   271                     do_interrupt_user(env->exception_index,
       
   272                                       env->exception_is_int,
       
   273                                       env->error_code,
       
   274                                       env->exception_next_eip);
       
   275                     /* successfully delivered */
       
   276                     env->old_exception = -1;
       
   277 #endif
       
   278                     ret = env->exception_index;
       
   279                     break;
       
   280                 } else {
       
   281 #if defined(TARGET_I386)
       
   282                     /* simulate a real cpu exception. On i386, it can
       
   283                        trigger new exceptions, but we do not handle
       
   284                        double or triple faults yet. */
       
   285                     do_interrupt(env->exception_index,
       
   286                                  env->exception_is_int,
       
   287                                  env->error_code,
       
   288                                  env->exception_next_eip, 0);
       
   289                     /* successfully delivered */
       
   290                     env->old_exception = -1;
       
   291 #elif defined(TARGET_PPC)
       
   292                     do_interrupt(env);
       
   293 #elif defined(TARGET_MIPS)
       
   294                     do_interrupt(env);
       
   295 #elif defined(TARGET_SPARC)
       
   296                     do_interrupt(env);
       
   297 #elif defined(TARGET_ARM)
       
   298                     do_interrupt(env);
       
   299 #elif defined(TARGET_SH4)
       
   300 		    do_interrupt(env);
       
   301 #elif defined(TARGET_ALPHA)
       
   302                     do_interrupt(env);
       
   303 #elif defined(TARGET_CRIS)
       
   304                     do_interrupt(env);
       
   305 #elif defined(TARGET_M68K)
       
   306                     do_interrupt(0);
       
   307 #endif
       
   308                 }
       
   309                 env->exception_index = -1;
       
   310             }
       
   311 #ifdef USE_KQEMU
       
   312             if (kqemu_is_ok(env) && env->interrupt_request == 0) {
       
   313                 int ret;
       
   314                 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
       
   315                 ret = kqemu_cpu_exec(env);
       
   316                 /* put eflags in CPU temporary format */
       
   317                 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
       
   318                 DF = 1 - (2 * ((env->eflags >> 10) & 1));
       
   319                 CC_OP = CC_OP_EFLAGS;
       
   320                 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
       
   321                 if (ret == 1) {
       
   322                     /* exception */
       
   323                     longjmp(env->jmp_env, 1);
       
   324                 } else if (ret == 2) {
       
   325                     /* softmmu execution needed */
       
   326                 } else {
       
   327                     if (env->interrupt_request != 0) {
       
   328                         /* hardware interrupt will be executed just after */
       
   329                     } else {
       
   330                         /* otherwise, we restart */
       
   331                         longjmp(env->jmp_env, 1);
       
   332                     }
       
   333                 }
       
   334             }
       
   335 #endif
       
   336 
       
   337             if (kvm_enabled()) {
       
   338                 kvm_cpu_exec(env);
       
   339                 longjmp(env->jmp_env, 1);
       
   340             }
       
   341 
       
   342             next_tb = 0; /* force lookup of first TB */
       
   343             for(;;) {
       
   344                 interrupt_request = env->interrupt_request;
       
   345                 if (unlikely(interrupt_request)) {
       
   346                     if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
       
   347                         /* Mask out external interrupts for this step. */
       
   348                         interrupt_request &= ~(CPU_INTERRUPT_HARD |
       
   349                                                CPU_INTERRUPT_FIQ |
       
   350                                                CPU_INTERRUPT_SMI |
       
   351                                                CPU_INTERRUPT_NMI);
       
   352                     }
       
   353                     if (interrupt_request & CPU_INTERRUPT_DEBUG) {
       
   354                         env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
       
   355                         env->exception_index = EXCP_DEBUG;
       
   356                         cpu_loop_exit();
       
   357                     }
       
   358 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
       
   359     defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
       
   360                     if (interrupt_request & CPU_INTERRUPT_HALT) {
       
   361                         env->interrupt_request &= ~CPU_INTERRUPT_HALT;
       
   362                         env->halted = 1;
       
   363                         env->exception_index = EXCP_HLT;
       
   364                         cpu_loop_exit();
       
   365                     }
       
   366 #endif
       
   367 #if defined(TARGET_I386)
       
   368                     if (env->hflags2 & HF2_GIF_MASK) {
       
   369                         if ((interrupt_request & CPU_INTERRUPT_SMI) &&
       
   370                             !(env->hflags & HF_SMM_MASK)) {
       
   371                             svm_check_intercept(SVM_EXIT_SMI);
       
   372                             env->interrupt_request &= ~CPU_INTERRUPT_SMI;
       
   373                             do_smm_enter();
       
   374                             next_tb = 0;
       
   375                         } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
       
   376                                    !(env->hflags2 & HF2_NMI_MASK)) {
       
   377                             env->interrupt_request &= ~CPU_INTERRUPT_NMI;
       
   378                             env->hflags2 |= HF2_NMI_MASK;
       
   379                             do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
       
   380                             next_tb = 0;
       
   381                         } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
       
   382                                    (((env->hflags2 & HF2_VINTR_MASK) && 
       
   383                                      (env->hflags2 & HF2_HIF_MASK)) ||
       
   384                                     (!(env->hflags2 & HF2_VINTR_MASK) && 
       
   385                                      (env->eflags & IF_MASK && 
       
   386                                       !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
       
   387                             int intno;
       
   388                             svm_check_intercept(SVM_EXIT_INTR);
       
   389                             env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
       
   390                             intno = cpu_get_pic_interrupt(env);
       
   391                             if (loglevel & CPU_LOG_TB_IN_ASM) {
       
   392                                 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
       
   393                             }
       
   394                             do_interrupt(intno, 0, 0, 0, 1);
       
   395                             /* ensure that no TB jump will be modified as
       
   396                                the program flow was changed */
       
   397                             next_tb = 0;
       
   398 #if !defined(CONFIG_USER_ONLY)
       
   399                         } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
       
   400                                    (env->eflags & IF_MASK) && 
       
   401                                    !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
       
   402                             int intno;
       
   403                             /* FIXME: this should respect TPR */
       
   404                             svm_check_intercept(SVM_EXIT_VINTR);
       
   405                             intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
       
   406                             if (loglevel & CPU_LOG_TB_IN_ASM)
       
   407                                 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
       
   408                             do_interrupt(intno, 0, 0, 0, 1);
       
   409                             env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
       
   410                             next_tb = 0;
       
   411 #endif
       
   412                         }
       
   413                     }
       
   414 #elif defined(TARGET_PPC)
       
   415 #if 0
       
   416                     if ((interrupt_request & CPU_INTERRUPT_RESET)) {
       
   417                         cpu_ppc_reset(env);
       
   418                     }
       
   419 #endif
       
   420                     if (interrupt_request & CPU_INTERRUPT_HARD) {
       
   421                         ppc_hw_interrupt(env);
       
   422                         if (env->pending_interrupts == 0)
       
   423                             env->interrupt_request &= ~CPU_INTERRUPT_HARD;
       
   424                         next_tb = 0;
       
   425                     }
       
   426 #elif defined(TARGET_MIPS)
       
   427                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
       
   428                         (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
       
   429                         (env->CP0_Status & (1 << CP0St_IE)) &&
       
   430                         !(env->CP0_Status & (1 << CP0St_EXL)) &&
       
   431                         !(env->CP0_Status & (1 << CP0St_ERL)) &&
       
   432                         !(env->hflags & MIPS_HFLAG_DM)) {
       
   433                         /* Raise it */
       
   434                         env->exception_index = EXCP_EXT_INTERRUPT;
       
   435                         env->error_code = 0;
       
   436                         do_interrupt(env);
       
   437                         next_tb = 0;
       
   438                     }
       
   439 #elif defined(TARGET_SPARC)
       
   440                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
       
   441 			(env->psret != 0)) {
       
   442 			int pil = env->interrupt_index & 15;
       
   443 			int type = env->interrupt_index & 0xf0;
       
   444 
       
   445 			if (((type == TT_EXTINT) &&
       
   446 			     (pil == 15 || pil > env->psrpil)) ||
       
   447 			    type != TT_EXTINT) {
       
   448 			    env->interrupt_request &= ~CPU_INTERRUPT_HARD;
       
   449                             env->exception_index = env->interrupt_index;
       
   450                             do_interrupt(env);
       
   451 			    env->interrupt_index = 0;
       
   452 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
       
   453                             cpu_check_irqs(env);
       
   454 #endif
       
   455                         next_tb = 0;
       
   456 			}
       
   457 		    } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
       
   458 			//do_interrupt(0, 0, 0, 0, 0);
       
   459 			env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
       
   460 		    }
       
   461 #elif defined(TARGET_ARM)
       
   462                     if (interrupt_request & CPU_INTERRUPT_FIQ
       
   463                         && !(env->uncached_cpsr & CPSR_F)) {
       
   464                         env->exception_index = EXCP_FIQ;
       
   465                         do_interrupt(env);
       
   466                         next_tb = 0;
       
   467                     }
       
   468                     /* ARMv7-M interrupt return works by loading a magic value
       
   469                        into the PC.  On real hardware the load causes the
       
   470                        return to occur.  The qemu implementation performs the
       
   471                        jump normally, then does the exception return when the
       
   472                        CPU tries to execute code at the magic address.
       
   473                        This will cause the magic PC value to be pushed to
       
   474                        the stack if an interrupt occured at the wrong time.
       
   475                        We avoid this by disabling interrupts when
       
   476                        pc contains a magic address.  */
       
   477                     if (interrupt_request & CPU_INTERRUPT_HARD
       
   478                         && ((IS_M(env) && env->regs[15] < 0xfffffff0)
       
   479                             || !(env->uncached_cpsr & CPSR_I))) {
       
   480                         env->exception_index = EXCP_IRQ;
       
   481                         do_interrupt(env);
       
   482                         next_tb = 0;
       
   483                     }
       
   484 #elif defined(TARGET_SH4)
       
   485                     if (interrupt_request & CPU_INTERRUPT_HARD) {
       
   486                         do_interrupt(env);
       
   487                         next_tb = 0;
       
   488                     }
       
   489 #elif defined(TARGET_ALPHA)
       
   490                     if (interrupt_request & CPU_INTERRUPT_HARD) {
       
   491                         do_interrupt(env);
       
   492                         next_tb = 0;
       
   493                     }
       
   494 #elif defined(TARGET_CRIS)
       
   495                     if (interrupt_request & CPU_INTERRUPT_HARD
       
   496                         && (env->pregs[PR_CCS] & I_FLAG)) {
       
   497                         env->exception_index = EXCP_IRQ;
       
   498                         do_interrupt(env);
       
   499                         next_tb = 0;
       
   500                     }
       
   501                     if (interrupt_request & CPU_INTERRUPT_NMI
       
   502                         && (env->pregs[PR_CCS] & M_FLAG)) {
       
   503                         env->exception_index = EXCP_NMI;
       
   504                         do_interrupt(env);
       
   505                         next_tb = 0;
       
   506                     }
       
   507 #elif defined(TARGET_M68K)
       
   508                     if (interrupt_request & CPU_INTERRUPT_HARD
       
   509                         && ((env->sr & SR_I) >> SR_I_SHIFT)
       
   510                             < env->pending_level) {
       
   511                         /* Real hardware gets the interrupt vector via an
       
   512                            IACK cycle at this point.  Current emulated
       
   513                            hardware doesn't rely on this, so we
       
   514                            provide/save the vector when the interrupt is
       
   515                            first signalled.  */
       
   516                         env->exception_index = env->pending_vector;
       
   517                         do_interrupt(1);
       
   518                         next_tb = 0;
       
   519                     }
       
   520 #endif
       
   521                    /* Don't use the cached interupt_request value,
       
   522                       do_interrupt may have updated the EXITTB flag. */
       
   523                     if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
       
   524                         env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
       
   525                         /* ensure that no TB jump will be modified as
       
   526                            the program flow was changed */
       
   527                         next_tb = 0;
       
   528                     }
       
   529                     if (interrupt_request & CPU_INTERRUPT_EXIT) {
       
   530                         env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
       
   531                         env->exception_index = EXCP_INTERRUPT;
       
   532                         cpu_loop_exit();
       
   533                     }
       
   534                 }
       
   535 #ifdef DEBUG_EXEC
       
   536                 if ((loglevel & CPU_LOG_TB_CPU)) {
       
   537                     /* restore flags in standard format */
       
   538                     regs_to_env();
       
   539 #if defined(TARGET_I386)
       
   540                     env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
       
   541                     cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
       
   542                     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
       
   543 #elif defined(TARGET_ARM)
       
   544                     cpu_dump_state(env, logfile, fprintf, 0);
       
   545 #elif defined(TARGET_SPARC)
       
   546                     cpu_dump_state(env, logfile, fprintf, 0);
       
   547 #elif defined(TARGET_PPC)
       
   548                     cpu_dump_state(env, logfile, fprintf, 0);
       
   549 #elif defined(TARGET_M68K)
       
   550                     cpu_m68k_flush_flags(env, env->cc_op);
       
   551                     env->cc_op = CC_OP_FLAGS;
       
   552                     env->sr = (env->sr & 0xffe0)
       
   553                               | env->cc_dest | (env->cc_x << 4);
       
   554                     cpu_dump_state(env, logfile, fprintf, 0);
       
   555 #elif defined(TARGET_MIPS)
       
   556                     cpu_dump_state(env, logfile, fprintf, 0);
       
   557 #elif defined(TARGET_SH4)
       
   558 		    cpu_dump_state(env, logfile, fprintf, 0);
       
   559 #elif defined(TARGET_ALPHA)
       
   560                     cpu_dump_state(env, logfile, fprintf, 0);
       
   561 #elif defined(TARGET_CRIS)
       
   562                     cpu_dump_state(env, logfile, fprintf, 0);
       
   563 #else
       
   564 #error unsupported target CPU
       
   565 #endif
       
   566                 }
       
   567 #endif
       
   568                 spin_lock(&tb_lock);
       
   569                 tb = tb_find_fast();
       
   570                 /* Note: we do it here to avoid a gcc bug on Mac OS X when
       
   571                    doing it in tb_find_slow */
       
   572                 if (tb_invalidated_flag) {
       
   573                     /* as some TB could have been invalidated because
       
   574                        of memory exceptions while generating the code, we
       
   575                        must recompute the hash index here */
       
   576                     next_tb = 0;
       
   577                     tb_invalidated_flag = 0;
       
   578                 }
       
   579 #ifdef DEBUG_EXEC
       
   580                 if ((loglevel & CPU_LOG_EXEC)) {
       
   581                     fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
       
   582                             (long)tb->tc_ptr, tb->pc,
       
   583                             lookup_symbol(tb->pc));
       
   584                 }
       
   585 #endif
       
   586                 /* see if we can patch the calling TB. When the TB
       
   587                    spans two pages, we cannot safely do a direct
       
   588                    jump. */
       
   589                 {
       
   590                     if (next_tb != 0 &&
       
   591 #ifdef USE_KQEMU
       
   592                         (env->kqemu_enabled != 2) &&
       
   593 #endif
       
   594                         tb->page_addr[1] == -1) {
       
   595                     tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
       
   596                 }
       
   597                 }
       
   598                 spin_unlock(&tb_lock);
       
   599                 env->current_tb = tb;
       
   600 
       
   601                 /* cpu_interrupt might be called while translating the
       
   602                    TB, but before it is linked into a potentially
       
   603                    infinite loop and becomes env->current_tb. Avoid
       
   604                    starting execution if there is a pending interrupt. */
       
   605                 if (unlikely (env->interrupt_request & CPU_INTERRUPT_EXIT))
       
   606                     env->current_tb = NULL;
       
   607 
       
   608                 while (env->current_tb) {
       
   609                     tc_ptr = tb->tc_ptr;
       
   610                 /* execute the generated code */
       
   611 #if defined(__sparc__) && !defined(HOST_SOLARIS)
       
   612 #undef env
       
   613                     env = cpu_single_env;
       
   614 #define env cpu_single_env
       
   615 #endif
       
   616                     next_tb = tcg_qemu_tb_exec(tc_ptr);
       
   617                     env->current_tb = NULL;
       
   618                     if ((next_tb & 3) == 2) {
       
   619                         /* Instruction counter expired.  */
       
   620                         int insns_left;
       
   621                         tb = (TranslationBlock *)(long)(next_tb & ~3);
       
   622                         /* Restore PC.  */
       
   623                         cpu_pc_from_tb(env, tb);
       
   624                         insns_left = env->icount_decr.u32;
       
   625                         if (env->icount_extra && insns_left >= 0) {
       
   626                             /* Refill decrementer and continue execution.  */
       
   627                             env->icount_extra += insns_left;
       
   628                             if (env->icount_extra > 0xffff) {
       
   629                                 insns_left = 0xffff;
       
   630                             } else {
       
   631                                 insns_left = env->icount_extra;
       
   632                             }
       
   633                             env->icount_extra -= insns_left;
       
   634                             env->icount_decr.u16.low = insns_left;
       
   635                         } else {
       
   636                             if (insns_left > 0) {
       
   637                                 /* Execute remaining instructions.  */
       
   638                                 cpu_exec_nocache(insns_left, tb);
       
   639                             }
       
   640                             env->exception_index = EXCP_INTERRUPT;
       
   641                             next_tb = 0;
       
   642                             cpu_loop_exit();
       
   643                         }
       
   644                     }
       
   645                 }
       
   646                 /* reset soft MMU for next block (it can currently
       
   647                    only be set by a memory fault) */
       
   648 #if defined(USE_KQEMU)
       
   649 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
       
   650                 if (kqemu_is_ok(env) &&
       
   651                     (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
       
   652                     cpu_loop_exit();
       
   653                 }
       
   654 #endif
       
   655             } /* for(;;) */
       
   656         } else {
       
   657             env_to_regs();
       
   658         }
       
   659     } /* for(;;) */
       
   660 
       
   661 
       
   662 #if defined(TARGET_I386)
       
   663     /* restore flags in standard format */
       
   664     env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
       
   665 #elif defined(TARGET_ARM)
       
   666     /* XXX: Save/restore host fpu exception state?.  */
       
   667 #elif defined(TARGET_SPARC)
       
   668 #elif defined(TARGET_PPC)
       
   669 #elif defined(TARGET_M68K)
       
   670     cpu_m68k_flush_flags(env, env->cc_op);
       
   671     env->cc_op = CC_OP_FLAGS;
       
   672     env->sr = (env->sr & 0xffe0)
       
   673               | env->cc_dest | (env->cc_x << 4);
       
   674 #elif defined(TARGET_MIPS)
       
   675 #elif defined(TARGET_SH4)
       
   676 #elif defined(TARGET_ALPHA)
       
   677 #elif defined(TARGET_CRIS)
       
   678     /* XXXXX */
       
   679 #else
       
   680 #error unsupported target CPU
       
   681 #endif
       
   682 
       
   683     /* restore global registers */
       
   684 #include "hostregs_helper.h"
       
   685 
       
   686     /* fail safe : never use cpu_single_env outside cpu_exec() */
       
   687     cpu_single_env = NULL;
       
   688     return ret;
       
   689 }
       
   690 
       
   691 /* must only be called from the generated code as an exception can be
       
   692    generated */
       
   693 void tb_invalidate_page_range(target_ulong start, target_ulong end)
       
   694 {
       
   695     /* XXX: cannot enable it yet because it yields to MMU exception
       
   696        where NIP != read address on PowerPC */
       
   697 #if 0
       
   698     target_ulong phys_addr;
       
   699     phys_addr = get_phys_addr_code(env, start);
       
   700     tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
       
   701 #endif
       
   702 }
       
   703 
       
   704 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
       
   705 
       
   706 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
       
   707 {
       
   708     CPUX86State *saved_env;
       
   709 
       
   710     saved_env = env;
       
   711     env = s;
       
   712     if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
       
   713         selector &= 0xffff;
       
   714         cpu_x86_load_seg_cache(env, seg_reg, selector,
       
   715                                (selector << 4), 0xffff, 0);
       
   716     } else {
       
   717         helper_load_seg(seg_reg, selector);
       
   718     }
       
   719     env = saved_env;
       
   720 }
       
   721 
       
   722 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
       
   723 {
       
   724     CPUX86State *saved_env;
       
   725 
       
   726     saved_env = env;
       
   727     env = s;
       
   728 
       
   729     helper_fsave(ptr, data32);
       
   730 
       
   731     env = saved_env;
       
   732 }
       
   733 
       
   734 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
       
   735 {
       
   736     CPUX86State *saved_env;
       
   737 
       
   738     saved_env = env;
       
   739     env = s;
       
   740 
       
   741     helper_frstor(ptr, data32);
       
   742 
       
   743     env = saved_env;
       
   744 }
       
   745 
       
   746 #endif /* TARGET_I386 */
       
   747 
       
   748 #if !defined(CONFIG_SOFTMMU)
       
   749 
       
   750 #if defined(TARGET_I386)
       
   751 
       
   752 /* 'pc' is the host PC at which the exception was raised. 'address' is
       
   753    the effective address of the memory exception. 'is_write' is 1 if a
       
   754    write caused the exception and otherwise 0'. 'old_set' is the
       
   755    signal set which should be restored */
       
   756 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
       
   757                                     int is_write, sigset_t *old_set,
       
   758                                     void *puc)
       
   759 {
       
   760     TranslationBlock *tb;
       
   761     int ret;
       
   762 
       
   763     if (cpu_single_env)
       
   764         env = cpu_single_env; /* XXX: find a correct solution for multithread */
       
   765 #if defined(DEBUG_SIGNAL)
       
   766     qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
       
   767                 pc, address, is_write, *(unsigned long *)old_set);
       
   768 #endif
       
   769     /* XXX: locking issue */
       
   770     if (is_write && page_unprotect(h2g(address), pc, puc)) {
       
   771         return 1;
       
   772     }
       
   773 
       
   774     /* see if it is an MMU fault */
       
   775     ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
       
   776     if (ret < 0)
       
   777         return 0; /* not an MMU fault */
       
   778     if (ret == 0)
       
   779         return 1; /* the MMU fault was handled without causing real CPU fault */
       
   780     /* now we have a real cpu fault */
       
   781     tb = tb_find_pc(pc);
       
   782     if (tb) {
       
   783         /* the PC is inside the translated code. It means that we have
       
   784            a virtual CPU fault */
       
   785         cpu_restore_state(tb, env, pc, puc);
       
   786     }
       
   787     if (ret == 1) {
       
   788 #if 0
       
   789         printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
       
   790                env->eip, env->cr[2], env->error_code);
       
   791 #endif
       
   792         /* we restore the process signal mask as the sigreturn should
       
   793            do it (XXX: use sigsetjmp) */
       
   794         sigprocmask(SIG_SETMASK, old_set, NULL);
       
   795         raise_exception_err(env->exception_index, env->error_code);
       
   796     } else {
       
   797         /* activate soft MMU for this block */
       
   798         env->hflags |= HF_SOFTMMU_MASK;
       
   799         cpu_resume_from_signal(env, puc);
       
   800     }
       
   801     /* never comes here */
       
   802     return 1;
       
   803 }
       
   804 
       
   805 #elif defined(TARGET_ARM)
       
   806 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
       
   807                                     int is_write, sigset_t *old_set,
       
   808                                     void *puc)
       
   809 {
       
   810     TranslationBlock *tb;
       
   811     int ret;
       
   812 
       
   813     if (cpu_single_env)
       
   814         env = cpu_single_env; /* XXX: find a correct solution for multithread */
       
   815 #if defined(DEBUG_SIGNAL)
       
   816     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
       
   817            pc, address, is_write, *(unsigned long *)old_set);
       
   818 #endif
       
   819     /* XXX: locking issue */
       
   820     if (is_write && page_unprotect(h2g(address), pc, puc)) {
       
   821         return 1;
       
   822     }
       
   823     /* see if it is an MMU fault */
       
   824     ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
       
   825     if (ret < 0)
       
   826         return 0; /* not an MMU fault */
       
   827     if (ret == 0)
       
   828         return 1; /* the MMU fault was handled without causing real CPU fault */
       
   829     /* now we have a real cpu fault */
       
   830     tb = tb_find_pc(pc);
       
   831     if (tb) {
       
   832         /* the PC is inside the translated code. It means that we have
       
   833            a virtual CPU fault */
       
   834         cpu_restore_state(tb, env, pc, puc);
       
   835     }
       
   836     /* we restore the process signal mask as the sigreturn should
       
   837        do it (XXX: use sigsetjmp) */
       
   838     sigprocmask(SIG_SETMASK, old_set, NULL);
       
   839     cpu_loop_exit();
       
   840     /* never comes here */
       
   841     return 1;
       
   842 }
       
   843 #elif defined(TARGET_SPARC)
       
   844 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
       
   845                                     int is_write, sigset_t *old_set,
       
   846                                     void *puc)
       
   847 {
       
   848     TranslationBlock *tb;
       
   849     int ret;
       
   850 
       
   851     if (cpu_single_env)
       
   852         env = cpu_single_env; /* XXX: find a correct solution for multithread */
       
   853 #if defined(DEBUG_SIGNAL)
       
   854     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
       
   855            pc, address, is_write, *(unsigned long *)old_set);
       
   856 #endif
       
   857     /* XXX: locking issue */
       
   858     if (is_write && page_unprotect(h2g(address), pc, puc)) {
       
   859         return 1;
       
   860     }
       
   861     /* see if it is an MMU fault */
       
   862     ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
       
   863     if (ret < 0)
       
   864         return 0; /* not an MMU fault */
       
   865     if (ret == 0)
       
   866         return 1; /* the MMU fault was handled without causing real CPU fault */
       
   867     /* now we have a real cpu fault */
       
   868     tb = tb_find_pc(pc);
       
   869     if (tb) {
       
   870         /* the PC is inside the translated code. It means that we have
       
   871            a virtual CPU fault */
       
   872         cpu_restore_state(tb, env, pc, puc);
       
   873     }
       
   874     /* we restore the process signal mask as the sigreturn should
       
   875        do it (XXX: use sigsetjmp) */
       
   876     sigprocmask(SIG_SETMASK, old_set, NULL);
       
   877     cpu_loop_exit();
       
   878     /* never comes here */
       
   879     return 1;
       
   880 }
       
   881 #elif defined (TARGET_PPC)
       
   882 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
       
   883                                     int is_write, sigset_t *old_set,
       
   884                                     void *puc)
       
   885 {
       
   886     TranslationBlock *tb;
       
   887     int ret;
       
   888 
       
   889     if (cpu_single_env)
       
   890         env = cpu_single_env; /* XXX: find a correct solution for multithread */
       
   891 #if defined(DEBUG_SIGNAL)
       
   892     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
       
   893            pc, address, is_write, *(unsigned long *)old_set);
       
   894 #endif
       
   895     /* XXX: locking issue */
       
   896     if (is_write && page_unprotect(h2g(address), pc, puc)) {
       
   897         return 1;
       
   898     }
       
   899 
       
   900     /* see if it is an MMU fault */
       
   901     ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
       
   902     if (ret < 0)
       
   903         return 0; /* not an MMU fault */
       
   904     if (ret == 0)
       
   905         return 1; /* the MMU fault was handled without causing real CPU fault */
       
   906 
       
   907     /* now we have a real cpu fault */
       
   908     tb = tb_find_pc(pc);
       
   909     if (tb) {
       
   910         /* the PC is inside the translated code. It means that we have
       
   911            a virtual CPU fault */
       
   912         cpu_restore_state(tb, env, pc, puc);
       
   913     }
       
   914     if (ret == 1) {
       
   915 #if 0
       
   916         printf("PF exception: NIP=0x%08x error=0x%x %p\n",
       
   917                env->nip, env->error_code, tb);
       
   918 #endif
       
   919     /* we restore the process signal mask as the sigreturn should
       
   920        do it (XXX: use sigsetjmp) */
       
   921         sigprocmask(SIG_SETMASK, old_set, NULL);
       
   922         cpu_loop_exit();
       
   923     } else {
       
   924         /* activate soft MMU for this block */
       
   925         cpu_resume_from_signal(env, puc);
       
   926     }
       
   927     /* never comes here */
       
   928     return 1;
       
   929 }
       
   930 
       
   931 #elif defined(TARGET_M68K)
       
   932 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
       
   933                                     int is_write, sigset_t *old_set,
       
   934                                     void *puc)
       
   935 {
       
   936     TranslationBlock *tb;
       
   937     int ret;
       
   938 
       
   939     if (cpu_single_env)
       
   940         env = cpu_single_env; /* XXX: find a correct solution for multithread */
       
   941 #if defined(DEBUG_SIGNAL)
       
   942     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
       
   943            pc, address, is_write, *(unsigned long *)old_set);
       
   944 #endif
       
   945     /* XXX: locking issue */
       
   946     if (is_write && page_unprotect(address, pc, puc)) {
       
   947         return 1;
       
   948     }
       
   949     /* see if it is an MMU fault */
       
   950     ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
       
   951     if (ret < 0)
       
   952         return 0; /* not an MMU fault */
       
   953     if (ret == 0)
       
   954         return 1; /* the MMU fault was handled without causing real CPU fault */
       
   955     /* now we have a real cpu fault */
       
   956     tb = tb_find_pc(pc);
       
   957     if (tb) {
       
   958         /* the PC is inside the translated code. It means that we have
       
   959            a virtual CPU fault */
       
   960         cpu_restore_state(tb, env, pc, puc);
       
   961     }
       
   962     /* we restore the process signal mask as the sigreturn should
       
   963        do it (XXX: use sigsetjmp) */
       
   964     sigprocmask(SIG_SETMASK, old_set, NULL);
       
   965     cpu_loop_exit();
       
   966     /* never comes here */
       
   967     return 1;
       
   968 }
       
   969 
       
   970 #elif defined (TARGET_MIPS)
       
   971 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
       
   972                                     int is_write, sigset_t *old_set,
       
   973                                     void *puc)
       
   974 {
       
   975     TranslationBlock *tb;
       
   976     int ret;
       
   977 
       
   978     if (cpu_single_env)
       
   979         env = cpu_single_env; /* XXX: find a correct solution for multithread */
       
   980 #if defined(DEBUG_SIGNAL)
       
   981     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
       
   982            pc, address, is_write, *(unsigned long *)old_set);
       
   983 #endif
       
   984     /* XXX: locking issue */
       
   985     if (is_write && page_unprotect(h2g(address), pc, puc)) {
       
   986         return 1;
       
   987     }
       
   988 
       
   989     /* see if it is an MMU fault */
       
   990     ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
       
   991     if (ret < 0)
       
   992         return 0; /* not an MMU fault */
       
   993     if (ret == 0)
       
   994         return 1; /* the MMU fault was handled without causing real CPU fault */
       
   995 
       
   996     /* now we have a real cpu fault */
       
   997     tb = tb_find_pc(pc);
       
   998     if (tb) {
       
   999         /* the PC is inside the translated code. It means that we have
       
  1000            a virtual CPU fault */
       
  1001         cpu_restore_state(tb, env, pc, puc);
       
  1002     }
       
  1003     if (ret == 1) {
       
  1004 #if 0
       
  1005         printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
       
  1006                env->PC, env->error_code, tb);
       
  1007 #endif
       
  1008     /* we restore the process signal mask as the sigreturn should
       
  1009        do it (XXX: use sigsetjmp) */
       
  1010         sigprocmask(SIG_SETMASK, old_set, NULL);
       
  1011         cpu_loop_exit();
       
  1012     } else {
       
  1013         /* activate soft MMU for this block */
       
  1014         cpu_resume_from_signal(env, puc);
       
  1015     }
       
  1016     /* never comes here */
       
  1017     return 1;
       
  1018 }
       
  1019 
       
  1020 #elif defined (TARGET_SH4)
       
  1021 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
       
  1022                                     int is_write, sigset_t *old_set,
       
  1023                                     void *puc)
       
  1024 {
       
  1025     TranslationBlock *tb;
       
  1026     int ret;
       
  1027 
       
  1028     if (cpu_single_env)
       
  1029         env = cpu_single_env; /* XXX: find a correct solution for multithread */
       
  1030 #if defined(DEBUG_SIGNAL)
       
  1031     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
       
  1032            pc, address, is_write, *(unsigned long *)old_set);
       
  1033 #endif
       
  1034     /* XXX: locking issue */
       
  1035     if (is_write && page_unprotect(h2g(address), pc, puc)) {
       
  1036         return 1;
       
  1037     }
       
  1038 
       
  1039     /* see if it is an MMU fault */
       
  1040     ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
       
  1041     if (ret < 0)
       
  1042         return 0; /* not an MMU fault */
       
  1043     if (ret == 0)
       
  1044         return 1; /* the MMU fault was handled without causing real CPU fault */
       
  1045 
       
  1046     /* now we have a real cpu fault */
       
  1047     tb = tb_find_pc(pc);
       
  1048     if (tb) {
       
  1049         /* the PC is inside the translated code. It means that we have
       
  1050            a virtual CPU fault */
       
  1051         cpu_restore_state(tb, env, pc, puc);
       
  1052     }
       
  1053 #if 0
       
  1054         printf("PF exception: NIP=0x%08x error=0x%x %p\n",
       
  1055                env->nip, env->error_code, tb);
       
  1056 #endif
       
  1057     /* we restore the process signal mask as the sigreturn should
       
  1058        do it (XXX: use sigsetjmp) */
       
  1059     sigprocmask(SIG_SETMASK, old_set, NULL);
       
  1060     cpu_loop_exit();
       
  1061     /* never comes here */
       
  1062     return 1;
       
  1063 }
       
  1064 
       
  1065 #elif defined (TARGET_ALPHA)
       
  1066 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
       
  1067                                     int is_write, sigset_t *old_set,
       
  1068                                     void *puc)
       
  1069 {
       
  1070     TranslationBlock *tb;
       
  1071     int ret;
       
  1072 
       
  1073     if (cpu_single_env)
       
  1074         env = cpu_single_env; /* XXX: find a correct solution for multithread */
       
  1075 #if defined(DEBUG_SIGNAL)
       
  1076     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
       
  1077            pc, address, is_write, *(unsigned long *)old_set);
       
  1078 #endif
       
  1079     /* XXX: locking issue */
       
  1080     if (is_write && page_unprotect(h2g(address), pc, puc)) {
       
  1081         return 1;
       
  1082     }
       
  1083 
       
  1084     /* see if it is an MMU fault */
       
  1085     ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
       
  1086     if (ret < 0)
       
  1087         return 0; /* not an MMU fault */
       
  1088     if (ret == 0)
       
  1089         return 1; /* the MMU fault was handled without causing real CPU fault */
       
  1090 
       
  1091     /* now we have a real cpu fault */
       
  1092     tb = tb_find_pc(pc);
       
  1093     if (tb) {
       
  1094         /* the PC is inside the translated code. It means that we have
       
  1095            a virtual CPU fault */
       
  1096         cpu_restore_state(tb, env, pc, puc);
       
  1097     }
       
  1098 #if 0
       
  1099         printf("PF exception: NIP=0x%08x error=0x%x %p\n",
       
  1100                env->nip, env->error_code, tb);
       
  1101 #endif
       
  1102     /* we restore the process signal mask as the sigreturn should
       
  1103        do it (XXX: use sigsetjmp) */
       
  1104     sigprocmask(SIG_SETMASK, old_set, NULL);
       
  1105     cpu_loop_exit();
       
  1106     /* never comes here */
       
  1107     return 1;
       
  1108 }
       
  1109 #elif defined (TARGET_CRIS)
       
  1110 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
       
  1111                                     int is_write, sigset_t *old_set,
       
  1112                                     void *puc)
       
  1113 {
       
  1114     TranslationBlock *tb;
       
  1115     int ret;
       
  1116 
       
  1117     if (cpu_single_env)
       
  1118         env = cpu_single_env; /* XXX: find a correct solution for multithread */
       
  1119 #if defined(DEBUG_SIGNAL)
       
  1120     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
       
  1121            pc, address, is_write, *(unsigned long *)old_set);
       
  1122 #endif
       
  1123     /* XXX: locking issue */
       
  1124     if (is_write && page_unprotect(h2g(address), pc, puc)) {
       
  1125         return 1;
       
  1126     }
       
  1127 
       
  1128     /* see if it is an MMU fault */
       
  1129     ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
       
  1130     if (ret < 0)
       
  1131         return 0; /* not an MMU fault */
       
  1132     if (ret == 0)
       
  1133         return 1; /* the MMU fault was handled without causing real CPU fault */
       
  1134 
       
  1135     /* now we have a real cpu fault */
       
  1136     tb = tb_find_pc(pc);
       
  1137     if (tb) {
       
  1138         /* the PC is inside the translated code. It means that we have
       
  1139            a virtual CPU fault */
       
  1140         cpu_restore_state(tb, env, pc, puc);
       
  1141     }
       
  1142     /* we restore the process signal mask as the sigreturn should
       
  1143        do it (XXX: use sigsetjmp) */
       
  1144     sigprocmask(SIG_SETMASK, old_set, NULL);
       
  1145     cpu_loop_exit();
       
  1146     /* never comes here */
       
  1147     return 1;
       
  1148 }
       
  1149 
       
  1150 #else
       
  1151 #error unsupported target CPU
       
  1152 #endif
       
  1153 
       
  1154 #if defined(__i386__)
       
  1155 
       
  1156 #if defined(__APPLE__)
       
  1157 # include <sys/ucontext.h>
       
  1158 
       
  1159 # define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
       
  1160 # define TRAP_sig(context)    ((context)->uc_mcontext->es.trapno)
       
  1161 # define ERROR_sig(context)   ((context)->uc_mcontext->es.err)
       
  1162 #else
       
  1163 # define EIP_sig(context)     ((context)->uc_mcontext.gregs[REG_EIP])
       
  1164 # define TRAP_sig(context)    ((context)->uc_mcontext.gregs[REG_TRAPNO])
       
  1165 # define ERROR_sig(context)   ((context)->uc_mcontext.gregs[REG_ERR])
       
  1166 #endif
       
  1167 
       
  1168 int cpu_signal_handler(int host_signum, void *pinfo,
       
  1169                        void *puc)
       
  1170 {
       
  1171     siginfo_t *info = pinfo;
       
  1172     struct ucontext *uc = puc;
       
  1173     unsigned long pc;
       
  1174     int trapno;
       
  1175 
       
  1176 #ifndef REG_EIP
       
  1177 /* for glibc 2.1 */
       
  1178 #define REG_EIP    EIP
       
  1179 #define REG_ERR    ERR
       
  1180 #define REG_TRAPNO TRAPNO
       
  1181 #endif
       
  1182     pc = EIP_sig(uc);
       
  1183     trapno = TRAP_sig(uc);
       
  1184     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
       
  1185                              trapno == 0xe ?
       
  1186                              (ERROR_sig(uc) >> 1) & 1 : 0,
       
  1187                              &uc->uc_sigmask, puc);
       
  1188 }
       
  1189 
       
  1190 #elif defined(__x86_64__)
       
  1191 
       
  1192 #ifdef __NetBSD__
       
  1193 #define REG_ERR _REG_ERR
       
  1194 #define REG_TRAPNO _REG_TRAPNO
       
  1195 
       
  1196 #define QEMU_UC_MCONTEXT_GREGS(uc, reg)	(uc)->uc_mcontext.__gregs[(reg)]
       
  1197 #define QEMU_UC_MACHINE_PC(uc)		_UC_MACHINE_PC(uc)
       
  1198 #else
       
  1199 #define QEMU_UC_MCONTEXT_GREGS(uc, reg)	(uc)->uc_mcontext.gregs[(reg)]
       
  1200 #define QEMU_UC_MACHINE_PC(uc)		QEMU_UC_MCONTEXT_GREGS(uc, REG_RIP)
       
  1201 #endif
       
  1202 
       
  1203 int cpu_signal_handler(int host_signum, void *pinfo,
       
  1204                        void *puc)
       
  1205 {
       
  1206     siginfo_t *info = pinfo;
       
  1207     unsigned long pc;
       
  1208 #ifdef __NetBSD__
       
  1209     ucontext_t *uc = puc;
       
  1210 #else
       
  1211     struct ucontext *uc = puc;
       
  1212 #endif
       
  1213 
       
  1214     pc = QEMU_UC_MACHINE_PC(uc);
       
  1215     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
       
  1216                              QEMU_UC_MCONTEXT_GREGS(uc, REG_TRAPNO) == 0xe ?
       
  1217                              (QEMU_UC_MCONTEXT_GREGS(uc, REG_ERR) >> 1) & 1 : 0,
       
  1218                              &uc->uc_sigmask, puc);
       
  1219 }
       
  1220 
       
  1221 #elif defined(__powerpc__)
       
  1222 
       
  1223 /***********************************************************************
       
  1224  * signal context platform-specific definitions
       
  1225  * From Wine
       
  1226  */
       
  1227 #ifdef linux
       
  1228 /* All Registers access - only for local access */
       
  1229 # define REG_sig(reg_name, context)		((context)->uc_mcontext.regs->reg_name)
       
  1230 /* Gpr Registers access  */
       
  1231 # define GPR_sig(reg_num, context)		REG_sig(gpr[reg_num], context)
       
  1232 # define IAR_sig(context)			REG_sig(nip, context)	/* Program counter */
       
  1233 # define MSR_sig(context)			REG_sig(msr, context)   /* Machine State Register (Supervisor) */
       
  1234 # define CTR_sig(context)			REG_sig(ctr, context)   /* Count register */
       
  1235 # define XER_sig(context)			REG_sig(xer, context) /* User's integer exception register */
       
  1236 # define LR_sig(context)			REG_sig(link, context) /* Link register */
       
  1237 # define CR_sig(context)			REG_sig(ccr, context) /* Condition register */
       
  1238 /* Float Registers access  */
       
  1239 # define FLOAT_sig(reg_num, context)		(((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
       
  1240 # define FPSCR_sig(context)			(*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
       
  1241 /* Exception Registers access */
       
  1242 # define DAR_sig(context)			REG_sig(dar, context)
       
  1243 # define DSISR_sig(context)			REG_sig(dsisr, context)
       
  1244 # define TRAP_sig(context)			REG_sig(trap, context)
       
  1245 #endif /* linux */
       
  1246 
       
  1247 #ifdef __APPLE__
       
  1248 # include <sys/ucontext.h>
       
  1249 typedef struct ucontext SIGCONTEXT;
       
  1250 /* All Registers access - only for local access */
       
  1251 # define REG_sig(reg_name, context)		((context)->uc_mcontext->ss.reg_name)
       
  1252 # define FLOATREG_sig(reg_name, context)	((context)->uc_mcontext->fs.reg_name)
       
  1253 # define EXCEPREG_sig(reg_name, context)	((context)->uc_mcontext->es.reg_name)
       
  1254 # define VECREG_sig(reg_name, context)		((context)->uc_mcontext->vs.reg_name)
       
  1255 /* Gpr Registers access */
       
  1256 # define GPR_sig(reg_num, context)		REG_sig(r##reg_num, context)
       
  1257 # define IAR_sig(context)			REG_sig(srr0, context)	/* Program counter */
       
  1258 # define MSR_sig(context)			REG_sig(srr1, context)  /* Machine State Register (Supervisor) */
       
  1259 # define CTR_sig(context)			REG_sig(ctr, context)
       
  1260 # define XER_sig(context)			REG_sig(xer, context) /* Link register */
       
  1261 # define LR_sig(context)			REG_sig(lr, context)  /* User's integer exception register */
       
  1262 # define CR_sig(context)			REG_sig(cr, context)  /* Condition register */
       
  1263 /* Float Registers access */
       
  1264 # define FLOAT_sig(reg_num, context)		FLOATREG_sig(fpregs[reg_num], context)
       
  1265 # define FPSCR_sig(context)			((double)FLOATREG_sig(fpscr, context))
       
  1266 /* Exception Registers access */
       
  1267 # define DAR_sig(context)			EXCEPREG_sig(dar, context)     /* Fault registers for coredump */
       
  1268 # define DSISR_sig(context)			EXCEPREG_sig(dsisr, context)
       
  1269 # define TRAP_sig(context)			EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
       
  1270 #endif /* __APPLE__ */
       
  1271 
       
  1272 int cpu_signal_handler(int host_signum, void *pinfo,
       
  1273                        void *puc)
       
  1274 {
       
  1275     siginfo_t *info = pinfo;
       
  1276     struct ucontext *uc = puc;
       
  1277     unsigned long pc;
       
  1278     int is_write;
       
  1279 
       
  1280     pc = IAR_sig(uc);
       
  1281     is_write = 0;
       
  1282 #if 0
       
  1283     /* ppc 4xx case */
       
  1284     if (DSISR_sig(uc) & 0x00800000)
       
  1285         is_write = 1;
       
  1286 #else
       
  1287     if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
       
  1288         is_write = 1;
       
  1289 #endif
       
  1290     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
       
  1291                              is_write, &uc->uc_sigmask, puc);
       
  1292 }
       
  1293 
       
  1294 #elif defined(__alpha__)
       
  1295 
       
  1296 int cpu_signal_handler(int host_signum, void *pinfo,
       
  1297                            void *puc)
       
  1298 {
       
  1299     siginfo_t *info = pinfo;
       
  1300     struct ucontext *uc = puc;
       
  1301     uint32_t *pc = uc->uc_mcontext.sc_pc;
       
  1302     uint32_t insn = *pc;
       
  1303     int is_write = 0;
       
  1304 
       
  1305     /* XXX: need kernel patch to get write flag faster */
       
  1306     switch (insn >> 26) {
       
  1307     case 0x0d: // stw
       
  1308     case 0x0e: // stb
       
  1309     case 0x0f: // stq_u
       
  1310     case 0x24: // stf
       
  1311     case 0x25: // stg
       
  1312     case 0x26: // sts
       
  1313     case 0x27: // stt
       
  1314     case 0x2c: // stl
       
  1315     case 0x2d: // stq
       
  1316     case 0x2e: // stl_c
       
  1317     case 0x2f: // stq_c
       
  1318 	is_write = 1;
       
  1319     }
       
  1320 
       
  1321     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
       
  1322                              is_write, &uc->uc_sigmask, puc);
       
  1323 }
       
  1324 #elif defined(__sparc__)
       
  1325 
       
  1326 int cpu_signal_handler(int host_signum, void *pinfo,
       
  1327                        void *puc)
       
  1328 {
       
  1329     siginfo_t *info = pinfo;
       
  1330     int is_write;
       
  1331     uint32_t insn;
       
  1332 #if !defined(__arch64__) || defined(HOST_SOLARIS)
       
  1333     uint32_t *regs = (uint32_t *)(info + 1);
       
  1334     void *sigmask = (regs + 20);
       
  1335     /* XXX: is there a standard glibc define ? */
       
  1336     unsigned long pc = regs[1];
       
  1337 #else
       
  1338 #ifdef __linux__
       
  1339     struct sigcontext *sc = puc;
       
  1340     unsigned long pc = sc->sigc_regs.tpc;
       
  1341     void *sigmask = (void *)sc->sigc_mask;
       
  1342 #elif defined(__OpenBSD__)
       
  1343     struct sigcontext *uc = puc;
       
  1344     unsigned long pc = uc->sc_pc;
       
  1345     void *sigmask = (void *)(long)uc->sc_mask;
       
  1346 #endif
       
  1347 #endif
       
  1348 
       
  1349     /* XXX: need kernel patch to get write flag faster */
       
  1350     is_write = 0;
       
  1351     insn = *(uint32_t *)pc;
       
  1352     if ((insn >> 30) == 3) {
       
  1353       switch((insn >> 19) & 0x3f) {
       
  1354       case 0x05: // stb
       
  1355       case 0x06: // sth
       
  1356       case 0x04: // st
       
  1357       case 0x07: // std
       
  1358       case 0x24: // stf
       
  1359       case 0x27: // stdf
       
  1360       case 0x25: // stfsr
       
  1361 	is_write = 1;
       
  1362 	break;
       
  1363       }
       
  1364     }
       
  1365     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
       
  1366                              is_write, sigmask, NULL);
       
  1367 }
       
  1368 
       
  1369 #elif defined(__arm__)
       
  1370 
       
  1371 int cpu_signal_handler(int host_signum, void *pinfo,
       
  1372                        void *puc)
       
  1373 {
       
  1374     siginfo_t *info = pinfo;
       
  1375     struct ucontext *uc = puc;
       
  1376     unsigned long pc;
       
  1377     int is_write;
       
  1378 
       
  1379 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
       
  1380     pc = uc->uc_mcontext.gregs[R15];
       
  1381 #else
       
  1382     pc = uc->uc_mcontext.arm_pc;
       
  1383 #endif
       
  1384     /* XXX: compute is_write */
       
  1385     is_write = 0;
       
  1386     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
       
  1387                              is_write,
       
  1388                              &uc->uc_sigmask, puc);
       
  1389 }
       
  1390 
       
  1391 #elif defined(__mc68000)
       
  1392 
       
  1393 int cpu_signal_handler(int host_signum, void *pinfo,
       
  1394                        void *puc)
       
  1395 {
       
  1396     siginfo_t *info = pinfo;
       
  1397     struct ucontext *uc = puc;
       
  1398     unsigned long pc;
       
  1399     int is_write;
       
  1400 
       
  1401     pc = uc->uc_mcontext.gregs[16];
       
  1402     /* XXX: compute is_write */
       
  1403     is_write = 0;
       
  1404     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
       
  1405                              is_write,
       
  1406                              &uc->uc_sigmask, puc);
       
  1407 }
       
  1408 
       
  1409 #elif defined(__ia64)
       
  1410 
       
  1411 #ifndef __ISR_VALID
       
  1412   /* This ought to be in <bits/siginfo.h>... */
       
  1413 # define __ISR_VALID	1
       
  1414 #endif
       
  1415 
       
  1416 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
       
  1417 {
       
  1418     siginfo_t *info = pinfo;
       
  1419     struct ucontext *uc = puc;
       
  1420     unsigned long ip;
       
  1421     int is_write = 0;
       
  1422 
       
  1423     ip = uc->uc_mcontext.sc_ip;
       
  1424     switch (host_signum) {
       
  1425       case SIGILL:
       
  1426       case SIGFPE:
       
  1427       case SIGSEGV:
       
  1428       case SIGBUS:
       
  1429       case SIGTRAP:
       
  1430 	  if (info->si_code && (info->si_segvflags & __ISR_VALID))
       
  1431 	      /* ISR.W (write-access) is bit 33:  */
       
  1432 	      is_write = (info->si_isr >> 33) & 1;
       
  1433 	  break;
       
  1434 
       
  1435       default:
       
  1436 	  break;
       
  1437     }
       
  1438     return handle_cpu_signal(ip, (unsigned long)info->si_addr,
       
  1439                              is_write,
       
  1440                              &uc->uc_sigmask, puc);
       
  1441 }
       
  1442 
       
  1443 #elif defined(__s390__)
       
  1444 
       
  1445 int cpu_signal_handler(int host_signum, void *pinfo,
       
  1446                        void *puc)
       
  1447 {
       
  1448     siginfo_t *info = pinfo;
       
  1449     struct ucontext *uc = puc;
       
  1450     unsigned long pc;
       
  1451     int is_write;
       
  1452 
       
  1453     pc = uc->uc_mcontext.psw.addr;
       
  1454     /* XXX: compute is_write */
       
  1455     is_write = 0;
       
  1456     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
       
  1457                              is_write, &uc->uc_sigmask, puc);
       
  1458 }
       
  1459 
       
  1460 #elif defined(__mips__)
       
  1461 
       
  1462 int cpu_signal_handler(int host_signum, void *pinfo,
       
  1463                        void *puc)
       
  1464 {
       
  1465     siginfo_t *info = pinfo;
       
  1466     struct ucontext *uc = puc;
       
  1467     greg_t pc = uc->uc_mcontext.pc;
       
  1468     int is_write;
       
  1469 
       
  1470     /* XXX: compute is_write */
       
  1471     is_write = 0;
       
  1472     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
       
  1473                              is_write, &uc->uc_sigmask, puc);
       
  1474 }
       
  1475 
       
  1476 #elif defined(__hppa__)
       
  1477 
       
  1478 int cpu_signal_handler(int host_signum, void *pinfo,
       
  1479                        void *puc)
       
  1480 {
       
  1481     struct siginfo *info = pinfo;
       
  1482     struct ucontext *uc = puc;
       
  1483     unsigned long pc;
       
  1484     int is_write;
       
  1485 
       
  1486     pc = uc->uc_mcontext.sc_iaoq[0];
       
  1487     /* FIXME: compute is_write */
       
  1488     is_write = 0;
       
  1489     return handle_cpu_signal(pc, (unsigned long)info->si_addr, 
       
  1490                              is_write,
       
  1491                              &uc->uc_sigmask, puc);
       
  1492 }
       
  1493 
       
  1494 #else
       
  1495 
       
  1496 #error host CPU specific signal handler needed
       
  1497 
       
  1498 #endif
       
  1499 
       
  1500 #endif /* !defined(CONFIG_SOFTMMU) */