symbian-qemu-0.9.1-12/qemu-symbian-svp/target-arm/translate.c
changeset 1 2fb8b9db1c86
child 99 99a56c5faf9c
equal deleted inserted replaced
0:ffa851df0825 1:2fb8b9db1c86
       
     1 /*
       
     2  *  ARM translation
       
     3  *
       
     4  *  Copyright (c) 2003 Fabrice Bellard
       
     5  *  Copyright (c) 2005-2007 CodeSourcery
       
     6  *  Copyright (c) 2007 OpenedHand, Ltd.
       
     7  *
       
     8  * This library is free software; you can redistribute it and/or
       
     9  * modify it under the terms of the GNU Lesser General Public
       
    10  * License as published by the Free Software Foundation; either
       
    11  * version 2 of the License, or (at your option) any later version.
       
    12  *
       
    13  * This library is distributed in the hope that it will be useful,
       
    14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
       
    15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
       
    16  * Lesser General Public License for more details.
       
    17  *
       
    18  * You should have received a copy of the GNU Lesser General Public
       
    19  * License along with this library; if not, write to the Free Software
       
    20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
       
    21  */
       
    22 #include <stdarg.h>
       
    23 #include <stdlib.h>
       
    24 #include <stdio.h>
       
    25 #include <string.h>
       
    26 #include <inttypes.h>
       
    27 
       
    28 #include "cpu.h"
       
    29 #include "exec-all.h"
       
    30 #include "disas.h"
       
    31 #include "tcg-op.h"
       
    32 #include "qemu-log.h"
       
    33 
       
    34 #include "helpers.h"
       
    35 #define GEN_HELPER 1
       
    36 #include "helpers.h"
       
    37 
       
    38 #define ENABLE_ARCH_5J    0
       
    39 #define ENABLE_ARCH_6     arm_feature(env, ARM_FEATURE_V6)
       
    40 #define ENABLE_ARCH_6K   arm_feature(env, ARM_FEATURE_V6K)
       
    41 #define ENABLE_ARCH_6T2   arm_feature(env, ARM_FEATURE_THUMB2)
       
    42 #define ENABLE_ARCH_7     arm_feature(env, ARM_FEATURE_V7)
       
    43 
       
    44 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
       
    45 
       
    46 /* internal defines */
       
    47 typedef struct DisasContext {
       
    48     target_ulong pc;
       
    49     int is_jmp;
       
    50     /* Nonzero if this instruction has been conditionally skipped.  */
       
    51     int condjmp;
       
    52     /* The label that will be jumped to when the instruction is skipped.  */
       
    53     int condlabel;
       
    54     /* Thumb-2 condtional execution bits.  */
       
    55     int condexec_mask;
       
    56     int condexec_cond;
       
    57     struct TranslationBlock *tb;
       
    58     int singlestep_enabled;
       
    59     int thumb;
       
    60 #if !defined(CONFIG_USER_ONLY)
       
    61     int user;
       
    62 #endif
       
    63 } DisasContext;
       
    64 
       
    65 #if defined(CONFIG_USER_ONLY)
       
    66 #define IS_USER(s) 1
       
    67 #else
       
    68 #define IS_USER(s) (s->user)
       
    69 #endif
       
    70 
       
    71 /* These instructions trap after executing, so defer them until after the
       
    72    conditional executions state has been updated.  */
       
    73 #define DISAS_WFI 4
       
    74 #define DISAS_SWI 5
       
    75 
       
    76 static TCGv_ptr cpu_env;
       
    77 /* We reuse the same 64-bit temporaries for efficiency.  */
       
    78 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
       
    79 
       
    80 /* FIXME:  These should be removed.  */
       
    81 static TCGv cpu_T[2];
       
    82 static TCGv cpu_F0s, cpu_F1s;
       
    83 static TCGv_i64 cpu_F0d, cpu_F1d;
       
    84 
       
    85 #define ICOUNT_TEMP cpu_T[0]
       
    86 #include "gen-icount.h"
       
    87 
       
    88 /* initialize TCG globals.  */
       
    89 void arm_translate_init(void)
       
    90 {
       
    91     cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
       
    92 
       
    93     cpu_T[0] = tcg_global_reg_new_i32(TCG_AREG1, "T0");
       
    94     cpu_T[1] = tcg_global_reg_new_i32(TCG_AREG2, "T1");
       
    95 
       
    96 #define GEN_HELPER 2
       
    97 #include "helpers.h"
       
    98 }
       
    99 
       
   100 /* The code generator doesn't like lots of temporaries, so maintain our own
       
   101    cache for reuse within a function.  */
       
   102 #define MAX_TEMPS 8
       
   103 static int num_temps;
       
   104 static TCGv temps[MAX_TEMPS];
       
   105 
       
   106 /* Allocate a temporary variable.  */
       
   107 static TCGv_i32 new_tmp(void)
       
   108 {
       
   109     TCGv tmp;
       
   110     if (num_temps == MAX_TEMPS)
       
   111         abort();
       
   112 
       
   113     if (GET_TCGV_I32(temps[num_temps]))
       
   114       return temps[num_temps++];
       
   115 
       
   116     tmp = tcg_temp_new_i32();
       
   117     temps[num_temps++] = tmp;
       
   118     return tmp;
       
   119 }
       
   120 
       
   121 /* Release a temporary variable.  */
       
   122 static void dead_tmp(TCGv tmp)
       
   123 {
       
   124     int i;
       
   125     num_temps--;
       
   126     i = num_temps;
       
   127     if (TCGV_EQUAL(temps[i], tmp))
       
   128         return;
       
   129 
       
   130     /* Shuffle this temp to the last slot.  */
       
   131     while (!TCGV_EQUAL(temps[i], tmp))
       
   132         i--;
       
   133     while (i < num_temps) {
       
   134         temps[i] = temps[i + 1];
       
   135         i++;
       
   136     }
       
   137     temps[i] = tmp;
       
   138 }
       
   139 
       
   140 static inline TCGv load_cpu_offset(int offset)
       
   141 {
       
   142     TCGv tmp = new_tmp();
       
   143     tcg_gen_ld_i32(tmp, cpu_env, offset);
       
   144     return tmp;
       
   145 }
       
   146 
       
   147 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
       
   148 
       
   149 static inline void store_cpu_offset(TCGv var, int offset)
       
   150 {
       
   151     tcg_gen_st_i32(var, cpu_env, offset);
       
   152     dead_tmp(var);
       
   153 }
       
   154 
       
   155 #define store_cpu_field(var, name) \
       
   156     store_cpu_offset(var, offsetof(CPUState, name))
       
   157 
       
   158 /* Set a variable to the value of a CPU register.  */
       
   159 static void load_reg_var(DisasContext *s, TCGv var, int reg)
       
   160 {
       
   161     if (reg == 15) {
       
   162         uint32_t addr;
       
   163         /* normaly, since we updated PC, we need only to add one insn */
       
   164         if (s->thumb)
       
   165             addr = (long)s->pc + 2;
       
   166         else
       
   167             addr = (long)s->pc + 4;
       
   168         tcg_gen_movi_i32(var, addr);
       
   169     } else {
       
   170         tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
       
   171     }
       
   172 }
       
   173 
       
   174 /* Create a new temporary and set it to the value of a CPU register.  */
       
   175 static inline TCGv load_reg(DisasContext *s, int reg)
       
   176 {
       
   177     TCGv tmp = new_tmp();
       
   178     load_reg_var(s, tmp, reg);
       
   179     return tmp;
       
   180 }
       
   181 
       
   182 /* Set a CPU register.  The source must be a temporary and will be
       
   183    marked as dead.  */
       
   184 static void store_reg(DisasContext *s, int reg, TCGv var)
       
   185 {
       
   186     if (reg == 15) {
       
   187         tcg_gen_andi_i32(var, var, ~1);
       
   188         s->is_jmp = DISAS_JUMP;
       
   189     }
       
   190     tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
       
   191     dead_tmp(var);
       
   192 }
       
   193 
       
   194 
       
   195 /* Basic operations.  */
       
   196 #define gen_op_movl_T0_T1() tcg_gen_mov_i32(cpu_T[0], cpu_T[1])
       
   197 #define gen_op_movl_T0_im(im) tcg_gen_movi_i32(cpu_T[0], im)
       
   198 #define gen_op_movl_T1_im(im) tcg_gen_movi_i32(cpu_T[1], im)
       
   199 
       
   200 #define gen_op_addl_T1_im(im) tcg_gen_addi_i32(cpu_T[1], cpu_T[1], im)
       
   201 #define gen_op_addl_T0_T1() tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1])
       
   202 #define gen_op_subl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[0], cpu_T[1])
       
   203 #define gen_op_rsbl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[1], cpu_T[0])
       
   204 
       
   205 #define gen_op_addl_T0_T1_cc() gen_helper_add_cc(cpu_T[0], cpu_T[0], cpu_T[1])
       
   206 #define gen_op_adcl_T0_T1_cc() gen_helper_adc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
       
   207 #define gen_op_subl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[0], cpu_T[1])
       
   208 #define gen_op_sbcl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
       
   209 #define gen_op_rsbl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[1], cpu_T[0])
       
   210 #define gen_op_rscl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[1], cpu_T[0])
       
   211 
       
   212 #define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1])
       
   213 #define gen_op_xorl_T0_T1() tcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_T[1])
       
   214 #define gen_op_orl_T0_T1() tcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1])
       
   215 #define gen_op_notl_T0() tcg_gen_not_i32(cpu_T[0], cpu_T[0])
       
   216 #define gen_op_notl_T1() tcg_gen_not_i32(cpu_T[1], cpu_T[1])
       
   217 #define gen_op_logic_T0_cc() gen_logic_CC(cpu_T[0]);
       
   218 #define gen_op_logic_T1_cc() gen_logic_CC(cpu_T[1]);
       
   219 
       
   220 #define gen_op_shll_T1_im(im) tcg_gen_shli_i32(cpu_T[1], cpu_T[1], im)
       
   221 #define gen_op_shrl_T1_im(im) tcg_gen_shri_i32(cpu_T[1], cpu_T[1], im)
       
   222 
       
   223 /* Value extensions.  */
       
   224 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
       
   225 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
       
   226 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
       
   227 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
       
   228 
       
   229 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
       
   230 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
       
   231 
       
   232 #define gen_op_mul_T0_T1() tcg_gen_mul_i32(cpu_T[0], cpu_T[0], cpu_T[1])
       
   233 
       
   234 #define gen_set_cpsr(var, mask) gen_helper_cpsr_write(var, tcg_const_i32(mask))
       
   235 /* Set NZCV flags from the high 4 bits of var.  */
       
   236 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
       
   237 
       
   238 static void gen_exception(int excp)
       
   239 {
       
   240     TCGv tmp = new_tmp();
       
   241     tcg_gen_movi_i32(tmp, excp);
       
   242     gen_helper_exception(tmp);
       
   243     dead_tmp(tmp);
       
   244 }
       
   245 
       
   246 static void gen_smul_dual(TCGv a, TCGv b)
       
   247 {
       
   248     TCGv tmp1 = new_tmp();
       
   249     TCGv tmp2 = new_tmp();
       
   250     tcg_gen_ext16s_i32(tmp1, a);
       
   251     tcg_gen_ext16s_i32(tmp2, b);
       
   252     tcg_gen_mul_i32(tmp1, tmp1, tmp2);
       
   253     dead_tmp(tmp2);
       
   254     tcg_gen_sari_i32(a, a, 16);
       
   255     tcg_gen_sari_i32(b, b, 16);
       
   256     tcg_gen_mul_i32(b, b, a);
       
   257     tcg_gen_mov_i32(a, tmp1);
       
   258     dead_tmp(tmp1);
       
   259 }
       
   260 
       
   261 /* Byteswap each halfword.  */
       
   262 static void gen_rev16(TCGv var)
       
   263 {
       
   264     TCGv tmp = new_tmp();
       
   265     tcg_gen_shri_i32(tmp, var, 8);
       
   266     tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
       
   267     tcg_gen_shli_i32(var, var, 8);
       
   268     tcg_gen_andi_i32(var, var, 0xff00ff00);
       
   269     tcg_gen_or_i32(var, var, tmp);
       
   270     dead_tmp(tmp);
       
   271 }
       
   272 
       
   273 /* Byteswap low halfword and sign extend.  */
       
   274 static void gen_revsh(TCGv var)
       
   275 {
       
   276     TCGv tmp = new_tmp();
       
   277     tcg_gen_shri_i32(tmp, var, 8);
       
   278     tcg_gen_andi_i32(tmp, tmp, 0x00ff);
       
   279     tcg_gen_shli_i32(var, var, 8);
       
   280     tcg_gen_ext8s_i32(var, var);
       
   281     tcg_gen_or_i32(var, var, tmp);
       
   282     dead_tmp(tmp);
       
   283 }
       
   284 
       
   285 /* Unsigned bitfield extract.  */
       
   286 static void gen_ubfx(TCGv var, int shift, uint32_t mask)
       
   287 {
       
   288     if (shift)
       
   289         tcg_gen_shri_i32(var, var, shift);
       
   290     tcg_gen_andi_i32(var, var, mask);
       
   291 }
       
   292 
       
   293 /* Signed bitfield extract.  */
       
   294 static void gen_sbfx(TCGv var, int shift, int width)
       
   295 {
       
   296     uint32_t signbit;
       
   297 
       
   298     if (shift)
       
   299         tcg_gen_sari_i32(var, var, shift);
       
   300     if (shift + width < 32) {
       
   301         signbit = 1u << (width - 1);
       
   302         tcg_gen_andi_i32(var, var, (1u << width) - 1);
       
   303         tcg_gen_xori_i32(var, var, signbit);
       
   304         tcg_gen_subi_i32(var, var, signbit);
       
   305     }
       
   306 }
       
   307 
       
   308 /* Bitfield insertion.  Insert val into base.  Clobbers base and val.  */
       
   309 static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
       
   310 {
       
   311     tcg_gen_andi_i32(val, val, mask);
       
   312     tcg_gen_shli_i32(val, val, shift);
       
   313     tcg_gen_andi_i32(base, base, ~(mask << shift));
       
   314     tcg_gen_or_i32(dest, base, val);
       
   315 }
       
   316 
       
   317 /* Round the top 32 bits of a 64-bit value.  */
       
   318 static void gen_roundqd(TCGv a, TCGv b)
       
   319 {
       
   320     tcg_gen_shri_i32(a, a, 31);
       
   321     tcg_gen_add_i32(a, a, b);
       
   322 }
       
   323 
       
   324 /* FIXME: Most targets have native widening multiplication.
       
   325    It would be good to use that instead of a full wide multiply.  */
       
   326 /* 32x32->64 multiply.  Marks inputs as dead.  */
       
   327 static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
       
   328 {
       
   329     TCGv_i64 tmp1 = tcg_temp_new_i64();
       
   330     TCGv_i64 tmp2 = tcg_temp_new_i64();
       
   331 
       
   332     tcg_gen_extu_i32_i64(tmp1, a);
       
   333     dead_tmp(a);
       
   334     tcg_gen_extu_i32_i64(tmp2, b);
       
   335     dead_tmp(b);
       
   336     tcg_gen_mul_i64(tmp1, tmp1, tmp2);
       
   337     return tmp1;
       
   338 }
       
   339 
       
   340 static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
       
   341 {
       
   342     TCGv_i64 tmp1 = tcg_temp_new_i64();
       
   343     TCGv_i64 tmp2 = tcg_temp_new_i64();
       
   344 
       
   345     tcg_gen_ext_i32_i64(tmp1, a);
       
   346     dead_tmp(a);
       
   347     tcg_gen_ext_i32_i64(tmp2, b);
       
   348     dead_tmp(b);
       
   349     tcg_gen_mul_i64(tmp1, tmp1, tmp2);
       
   350     return tmp1;
       
   351 }
       
   352 
       
   353 /* Unsigned 32x32->64 multiply.  */
       
   354 static void gen_op_mull_T0_T1(void)
       
   355 {
       
   356     TCGv_i64 tmp1 = tcg_temp_new_i64();
       
   357     TCGv_i64 tmp2 = tcg_temp_new_i64();
       
   358 
       
   359     tcg_gen_extu_i32_i64(tmp1, cpu_T[0]);
       
   360     tcg_gen_extu_i32_i64(tmp2, cpu_T[1]);
       
   361     tcg_gen_mul_i64(tmp1, tmp1, tmp2);
       
   362     tcg_gen_trunc_i64_i32(cpu_T[0], tmp1);
       
   363     tcg_gen_shri_i64(tmp1, tmp1, 32);
       
   364     tcg_gen_trunc_i64_i32(cpu_T[1], tmp1);
       
   365 }
       
   366 
       
   367 /* Signed 32x32->64 multiply.  */
       
   368 static void gen_imull(TCGv a, TCGv b)
       
   369 {
       
   370     TCGv_i64 tmp1 = tcg_temp_new_i64();
       
   371     TCGv_i64 tmp2 = tcg_temp_new_i64();
       
   372 
       
   373     tcg_gen_ext_i32_i64(tmp1, a);
       
   374     tcg_gen_ext_i32_i64(tmp2, b);
       
   375     tcg_gen_mul_i64(tmp1, tmp1, tmp2);
       
   376     tcg_gen_trunc_i64_i32(a, tmp1);
       
   377     tcg_gen_shri_i64(tmp1, tmp1, 32);
       
   378     tcg_gen_trunc_i64_i32(b, tmp1);
       
   379 }
       
   380 
       
   381 /* Swap low and high halfwords.  */
       
   382 static void gen_swap_half(TCGv var)
       
   383 {
       
   384     TCGv tmp = new_tmp();
       
   385     tcg_gen_shri_i32(tmp, var, 16);
       
   386     tcg_gen_shli_i32(var, var, 16);
       
   387     tcg_gen_or_i32(var, var, tmp);
       
   388     dead_tmp(tmp);
       
   389 }
       
   390 
       
   391 /* Dual 16-bit add.  Result placed in t0 and t1 is marked as dead.
       
   392     tmp = (t0 ^ t1) & 0x8000;
       
   393     t0 &= ~0x8000;
       
   394     t1 &= ~0x8000;
       
   395     t0 = (t0 + t1) ^ tmp;
       
   396  */
       
   397 
       
   398 static void gen_add16(TCGv t0, TCGv t1)
       
   399 {
       
   400     TCGv tmp = new_tmp();
       
   401     tcg_gen_xor_i32(tmp, t0, t1);
       
   402     tcg_gen_andi_i32(tmp, tmp, 0x8000);
       
   403     tcg_gen_andi_i32(t0, t0, ~0x8000);
       
   404     tcg_gen_andi_i32(t1, t1, ~0x8000);
       
   405     tcg_gen_add_i32(t0, t0, t1);
       
   406     tcg_gen_xor_i32(t0, t0, tmp);
       
   407     dead_tmp(tmp);
       
   408     dead_tmp(t1);
       
   409 }
       
   410 
       
   411 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
       
   412 
       
   413 /* Set CF to the top bit of var.  */
       
   414 static void gen_set_CF_bit31(TCGv var)
       
   415 {
       
   416     TCGv tmp = new_tmp();
       
   417     tcg_gen_shri_i32(tmp, var, 31);
       
   418     gen_set_CF(tmp);
       
   419     dead_tmp(tmp);
       
   420 }
       
   421 
       
   422 /* Set N and Z flags from var.  */
       
   423 static inline void gen_logic_CC(TCGv var)
       
   424 {
       
   425     tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
       
   426     tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
       
   427 }
       
   428 
       
   429 /* T0 += T1 + CF.  */
       
   430 static void gen_adc_T0_T1(void)
       
   431 {
       
   432     TCGv tmp;
       
   433     gen_op_addl_T0_T1();
       
   434     tmp = load_cpu_field(CF);
       
   435     tcg_gen_add_i32(cpu_T[0], cpu_T[0], tmp);
       
   436     dead_tmp(tmp);
       
   437 }
       
   438 
       
   439 /* dest = T0 - T1 + CF - 1.  */
       
   440 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
       
   441 {
       
   442     TCGv tmp;
       
   443     tcg_gen_sub_i32(dest, t0, t1);
       
   444     tmp = load_cpu_field(CF);
       
   445     tcg_gen_add_i32(dest, dest, tmp);
       
   446     tcg_gen_subi_i32(dest, dest, 1);
       
   447     dead_tmp(tmp);
       
   448 }
       
   449 
       
   450 #define gen_sbc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[0], cpu_T[1])
       
   451 #define gen_rsc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[1], cpu_T[0])
       
   452 
       
   453 /* T0 &= ~T1.  Clobbers T1.  */
       
   454 /* FIXME: Implement bic natively.  */
       
   455 static inline void tcg_gen_bic_i32(TCGv dest, TCGv t0, TCGv t1)
       
   456 {
       
   457     TCGv tmp = new_tmp();
       
   458     tcg_gen_not_i32(tmp, t1);
       
   459     tcg_gen_and_i32(dest, t0, tmp);
       
   460     dead_tmp(tmp);
       
   461 }
       
   462 static inline void gen_op_bicl_T0_T1(void)
       
   463 {
       
   464     gen_op_notl_T1();
       
   465     gen_op_andl_T0_T1();
       
   466 }
       
   467 
       
   468 /* FIXME:  Implement this natively.  */
       
   469 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
       
   470 
       
   471 /* FIXME:  Implement this natively.  */
       
   472 static void tcg_gen_rori_i32(TCGv t0, TCGv t1, int i)
       
   473 {
       
   474     TCGv tmp;
       
   475 
       
   476     if (i == 0)
       
   477         return;
       
   478 
       
   479     tmp = new_tmp();
       
   480     tcg_gen_shri_i32(tmp, t1, i);
       
   481     tcg_gen_shli_i32(t1, t1, 32 - i);
       
   482     tcg_gen_or_i32(t0, t1, tmp);
       
   483     dead_tmp(tmp);
       
   484 }
       
   485 
       
   486 static void shifter_out_im(TCGv var, int shift)
       
   487 {
       
   488     TCGv tmp = new_tmp();
       
   489     if (shift == 0) {
       
   490         tcg_gen_andi_i32(tmp, var, 1);
       
   491     } else {
       
   492         tcg_gen_shri_i32(tmp, var, shift);
       
   493         if (shift != 31)
       
   494             tcg_gen_andi_i32(tmp, tmp, 1);
       
   495     }
       
   496     gen_set_CF(tmp);
       
   497     dead_tmp(tmp);
       
   498 }
       
   499 
       
   500 /* Shift by immediate.  Includes special handling for shift == 0.  */
       
   501 static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
       
   502 {
       
   503     switch (shiftop) {
       
   504     case 0: /* LSL */
       
   505         if (shift != 0) {
       
   506             if (flags)
       
   507                 shifter_out_im(var, 32 - shift);
       
   508             tcg_gen_shli_i32(var, var, shift);
       
   509         }
       
   510         break;
       
   511     case 1: /* LSR */
       
   512         if (shift == 0) {
       
   513             if (flags) {
       
   514                 tcg_gen_shri_i32(var, var, 31);
       
   515                 gen_set_CF(var);
       
   516             }
       
   517             tcg_gen_movi_i32(var, 0);
       
   518         } else {
       
   519             if (flags)
       
   520                 shifter_out_im(var, shift - 1);
       
   521             tcg_gen_shri_i32(var, var, shift);
       
   522         }
       
   523         break;
       
   524     case 2: /* ASR */
       
   525         if (shift == 0)
       
   526             shift = 32;
       
   527         if (flags)
       
   528             shifter_out_im(var, shift - 1);
       
   529         if (shift == 32)
       
   530           shift = 31;
       
   531         tcg_gen_sari_i32(var, var, shift);
       
   532         break;
       
   533     case 3: /* ROR/RRX */
       
   534         if (shift != 0) {
       
   535             if (flags)
       
   536                 shifter_out_im(var, shift - 1);
       
   537             tcg_gen_rori_i32(var, var, shift); break;
       
   538         } else {
       
   539             TCGv tmp = load_cpu_field(CF);
       
   540             if (flags)
       
   541                 shifter_out_im(var, 0);
       
   542             tcg_gen_shri_i32(var, var, 1);
       
   543             tcg_gen_shli_i32(tmp, tmp, 31);
       
   544             tcg_gen_or_i32(var, var, tmp);
       
   545             dead_tmp(tmp);
       
   546         }
       
   547     }
       
   548 };
       
   549 
       
   550 static inline void gen_arm_shift_reg(TCGv var, int shiftop,
       
   551                                      TCGv shift, int flags)
       
   552 {
       
   553     if (flags) {
       
   554         switch (shiftop) {
       
   555         case 0: gen_helper_shl_cc(var, var, shift); break;
       
   556         case 1: gen_helper_shr_cc(var, var, shift); break;
       
   557         case 2: gen_helper_sar_cc(var, var, shift); break;
       
   558         case 3: gen_helper_ror_cc(var, var, shift); break;
       
   559         }
       
   560     } else {
       
   561         switch (shiftop) {
       
   562         case 0: gen_helper_shl(var, var, shift); break;
       
   563         case 1: gen_helper_shr(var, var, shift); break;
       
   564         case 2: gen_helper_sar(var, var, shift); break;
       
   565         case 3: gen_helper_ror(var, var, shift); break;
       
   566         }
       
   567     }
       
   568     dead_tmp(shift);
       
   569 }
       
   570 
       
   571 #define PAS_OP(pfx) \
       
   572     switch (op2) {  \
       
   573     case 0: gen_pas_helper(glue(pfx,add16)); break; \
       
   574     case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
       
   575     case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
       
   576     case 3: gen_pas_helper(glue(pfx,sub16)); break; \
       
   577     case 4: gen_pas_helper(glue(pfx,add8)); break; \
       
   578     case 7: gen_pas_helper(glue(pfx,sub8)); break; \
       
   579     }
       
   580 static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
       
   581 {
       
   582     TCGv_ptr tmp;
       
   583 
       
   584     switch (op1) {
       
   585 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
       
   586     case 1:
       
   587         tmp = tcg_temp_new_ptr();
       
   588         tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
       
   589         PAS_OP(s)
       
   590         break;
       
   591     case 5:
       
   592         tmp = tcg_temp_new_ptr();
       
   593         tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
       
   594         PAS_OP(u)
       
   595         break;
       
   596 #undef gen_pas_helper
       
   597 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
       
   598     case 2:
       
   599         PAS_OP(q);
       
   600         break;
       
   601     case 3:
       
   602         PAS_OP(sh);
       
   603         break;
       
   604     case 6:
       
   605         PAS_OP(uq);
       
   606         break;
       
   607     case 7:
       
   608         PAS_OP(uh);
       
   609         break;
       
   610 #undef gen_pas_helper
       
   611     }
       
   612 }
       
   613 #undef PAS_OP
       
   614 
       
   615 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings.  */
       
   616 #define PAS_OP(pfx) \
       
   617     switch (op2) {  \
       
   618     case 0: gen_pas_helper(glue(pfx,add8)); break; \
       
   619     case 1: gen_pas_helper(glue(pfx,add16)); break; \
       
   620     case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
       
   621     case 4: gen_pas_helper(glue(pfx,sub8)); break; \
       
   622     case 5: gen_pas_helper(glue(pfx,sub16)); break; \
       
   623     case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
       
   624     }
       
   625 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
       
   626 {
       
   627     TCGv_ptr tmp;
       
   628 
       
   629     switch (op1) {
       
   630 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
       
   631     case 0:
       
   632         tmp = tcg_temp_new_ptr();
       
   633         tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
       
   634         PAS_OP(s)
       
   635         break;
       
   636     case 4:
       
   637         tmp = tcg_temp_new_ptr();
       
   638         tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
       
   639         PAS_OP(u)
       
   640         break;
       
   641 #undef gen_pas_helper
       
   642 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
       
   643     case 1:
       
   644         PAS_OP(q);
       
   645         break;
       
   646     case 2:
       
   647         PAS_OP(sh);
       
   648         break;
       
   649     case 5:
       
   650         PAS_OP(uq);
       
   651         break;
       
   652     case 6:
       
   653         PAS_OP(uh);
       
   654         break;
       
   655 #undef gen_pas_helper
       
   656     }
       
   657 }
       
   658 #undef PAS_OP
       
   659 
       
   660 static void gen_test_cc(int cc, int label)
       
   661 {
       
   662     TCGv tmp;
       
   663     TCGv tmp2;
       
   664     int inv;
       
   665 
       
   666     switch (cc) {
       
   667     case 0: /* eq: Z */
       
   668         tmp = load_cpu_field(ZF);
       
   669         tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
       
   670         break;
       
   671     case 1: /* ne: !Z */
       
   672         tmp = load_cpu_field(ZF);
       
   673         tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
       
   674         break;
       
   675     case 2: /* cs: C */
       
   676         tmp = load_cpu_field(CF);
       
   677         tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
       
   678         break;
       
   679     case 3: /* cc: !C */
       
   680         tmp = load_cpu_field(CF);
       
   681         tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
       
   682         break;
       
   683     case 4: /* mi: N */
       
   684         tmp = load_cpu_field(NF);
       
   685         tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
       
   686         break;
       
   687     case 5: /* pl: !N */
       
   688         tmp = load_cpu_field(NF);
       
   689         tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
       
   690         break;
       
   691     case 6: /* vs: V */
       
   692         tmp = load_cpu_field(VF);
       
   693         tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
       
   694         break;
       
   695     case 7: /* vc: !V */
       
   696         tmp = load_cpu_field(VF);
       
   697         tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
       
   698         break;
       
   699     case 8: /* hi: C && !Z */
       
   700         inv = gen_new_label();
       
   701         tmp = load_cpu_field(CF);
       
   702         tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
       
   703         dead_tmp(tmp);
       
   704         tmp = load_cpu_field(ZF);
       
   705         tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
       
   706         gen_set_label(inv);
       
   707         break;
       
   708     case 9: /* ls: !C || Z */
       
   709         tmp = load_cpu_field(CF);
       
   710         tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
       
   711         dead_tmp(tmp);
       
   712         tmp = load_cpu_field(ZF);
       
   713         tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
       
   714         break;
       
   715     case 10: /* ge: N == V -> N ^ V == 0 */
       
   716         tmp = load_cpu_field(VF);
       
   717         tmp2 = load_cpu_field(NF);
       
   718         tcg_gen_xor_i32(tmp, tmp, tmp2);
       
   719         dead_tmp(tmp2);
       
   720         tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
       
   721         break;
       
   722     case 11: /* lt: N != V -> N ^ V != 0 */
       
   723         tmp = load_cpu_field(VF);
       
   724         tmp2 = load_cpu_field(NF);
       
   725         tcg_gen_xor_i32(tmp, tmp, tmp2);
       
   726         dead_tmp(tmp2);
       
   727         tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
       
   728         break;
       
   729     case 12: /* gt: !Z && N == V */
       
   730         inv = gen_new_label();
       
   731         tmp = load_cpu_field(ZF);
       
   732         tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
       
   733         dead_tmp(tmp);
       
   734         tmp = load_cpu_field(VF);
       
   735         tmp2 = load_cpu_field(NF);
       
   736         tcg_gen_xor_i32(tmp, tmp, tmp2);
       
   737         dead_tmp(tmp2);
       
   738         tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
       
   739         gen_set_label(inv);
       
   740         break;
       
   741     case 13: /* le: Z || N != V */
       
   742         tmp = load_cpu_field(ZF);
       
   743         tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
       
   744         dead_tmp(tmp);
       
   745         tmp = load_cpu_field(VF);
       
   746         tmp2 = load_cpu_field(NF);
       
   747         tcg_gen_xor_i32(tmp, tmp, tmp2);
       
   748         dead_tmp(tmp2);
       
   749         tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
       
   750         break;
       
   751     default:
       
   752         fprintf(stderr, "Bad condition code 0x%x\n", cc);
       
   753         abort();
       
   754     }
       
   755     dead_tmp(tmp);
       
   756 }
       
   757 
       
   758 static const uint8_t table_logic_cc[16] = {
       
   759     1, /* and */
       
   760     1, /* xor */
       
   761     0, /* sub */
       
   762     0, /* rsb */
       
   763     0, /* add */
       
   764     0, /* adc */
       
   765     0, /* sbc */
       
   766     0, /* rsc */
       
   767     1, /* andl */
       
   768     1, /* xorl */
       
   769     0, /* cmp */
       
   770     0, /* cmn */
       
   771     1, /* orr */
       
   772     1, /* mov */
       
   773     1, /* bic */
       
   774     1, /* mvn */
       
   775 };
       
   776 
       
   777 /* Set PC and Thumb state from an immediate address.  */
       
   778 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
       
   779 {
       
   780     TCGv tmp;
       
   781 
       
   782     s->is_jmp = DISAS_UPDATE;
       
   783     tmp = new_tmp();
       
   784     if (s->thumb != (addr & 1)) {
       
   785         tcg_gen_movi_i32(tmp, addr & 1);
       
   786         tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
       
   787     }
       
   788     tcg_gen_movi_i32(tmp, addr & ~1);
       
   789     tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, regs[15]));
       
   790     dead_tmp(tmp);
       
   791 }
       
   792 
       
   793 /* Set PC and Thumb state from var.  var is marked as dead.  */
       
   794 static inline void gen_bx(DisasContext *s, TCGv var)
       
   795 {
       
   796     TCGv tmp;
       
   797 
       
   798     s->is_jmp = DISAS_UPDATE;
       
   799     tmp = new_tmp();
       
   800     tcg_gen_andi_i32(tmp, var, 1);
       
   801     store_cpu_field(tmp, thumb);
       
   802     tcg_gen_andi_i32(var, var, ~1);
       
   803     store_cpu_field(var, regs[15]);
       
   804 }
       
   805 
       
   806 /* TODO: This should be removed.  Use gen_bx instead.  */
       
   807 static inline void gen_bx_T0(DisasContext *s)
       
   808 {
       
   809     TCGv tmp = new_tmp();
       
   810     tcg_gen_mov_i32(tmp, cpu_T[0]);
       
   811     gen_bx(s, tmp);
       
   812 }
       
   813 
       
   814 static inline TCGv gen_ld8s(TCGv addr, int index)
       
   815 {
       
   816     TCGv tmp = new_tmp();
       
   817     tcg_gen_qemu_ld8s(tmp, addr, index);
       
   818     return tmp;
       
   819 }
       
   820 static inline TCGv gen_ld8u(TCGv addr, int index)
       
   821 {
       
   822     TCGv tmp = new_tmp();
       
   823     tcg_gen_qemu_ld8u(tmp, addr, index);
       
   824     return tmp;
       
   825 }
       
   826 static inline TCGv gen_ld16s(TCGv addr, int index)
       
   827 {
       
   828     TCGv tmp = new_tmp();
       
   829     tcg_gen_qemu_ld16s(tmp, addr, index);
       
   830     return tmp;
       
   831 }
       
   832 static inline TCGv gen_ld16u(TCGv addr, int index)
       
   833 {
       
   834     TCGv tmp = new_tmp();
       
   835     tcg_gen_qemu_ld16u(tmp, addr, index);
       
   836     return tmp;
       
   837 }
       
   838 static inline TCGv gen_ld32(TCGv addr, int index)
       
   839 {
       
   840     TCGv tmp = new_tmp();
       
   841     tcg_gen_qemu_ld32u(tmp, addr, index);
       
   842     return tmp;
       
   843 }
       
   844 static inline void gen_st8(TCGv val, TCGv addr, int index)
       
   845 {
       
   846     tcg_gen_qemu_st8(val, addr, index);
       
   847     dead_tmp(val);
       
   848 }
       
   849 static inline void gen_st16(TCGv val, TCGv addr, int index)
       
   850 {
       
   851     tcg_gen_qemu_st16(val, addr, index);
       
   852     dead_tmp(val);
       
   853 }
       
   854 static inline void gen_st32(TCGv val, TCGv addr, int index)
       
   855 {
       
   856     tcg_gen_qemu_st32(val, addr, index);
       
   857     dead_tmp(val);
       
   858 }
       
   859 
       
   860 static inline void gen_movl_T0_reg(DisasContext *s, int reg)
       
   861 {
       
   862     load_reg_var(s, cpu_T[0], reg);
       
   863 }
       
   864 
       
   865 static inline void gen_movl_T1_reg(DisasContext *s, int reg)
       
   866 {
       
   867     load_reg_var(s, cpu_T[1], reg);
       
   868 }
       
   869 
       
   870 static inline void gen_movl_T2_reg(DisasContext *s, int reg)
       
   871 {
       
   872     load_reg_var(s, cpu_T[2], reg);
       
   873 }
       
   874 
       
   875 static inline void gen_set_pc_im(uint32_t val)
       
   876 {
       
   877     TCGv tmp = new_tmp();
       
   878     tcg_gen_movi_i32(tmp, val);
       
   879     store_cpu_field(tmp, regs[15]);
       
   880 }
       
   881 
       
   882 static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
       
   883 {
       
   884     TCGv tmp;
       
   885     if (reg == 15) {
       
   886         tmp = new_tmp();
       
   887         tcg_gen_andi_i32(tmp, cpu_T[t], ~1);
       
   888     } else {
       
   889         tmp = cpu_T[t];
       
   890     }
       
   891     tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, regs[reg]));
       
   892     if (reg == 15) {
       
   893         dead_tmp(tmp);
       
   894         s->is_jmp = DISAS_JUMP;
       
   895     }
       
   896 }
       
   897 
       
   898 static inline void gen_movl_reg_T0(DisasContext *s, int reg)
       
   899 {
       
   900     gen_movl_reg_TN(s, reg, 0);
       
   901 }
       
   902 
       
   903 static inline void gen_movl_reg_T1(DisasContext *s, int reg)
       
   904 {
       
   905     gen_movl_reg_TN(s, reg, 1);
       
   906 }
       
   907 
       
   908 /* Force a TB lookup after an instruction that changes the CPU state.  */
       
   909 static inline void gen_lookup_tb(DisasContext *s)
       
   910 {
       
   911     gen_op_movl_T0_im(s->pc);
       
   912     gen_movl_reg_T0(s, 15);
       
   913     s->is_jmp = DISAS_UPDATE;
       
   914 }
       
   915 
       
   916 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
       
   917                                        TCGv var)
       
   918 {
       
   919     int val, rm, shift, shiftop;
       
   920     TCGv offset;
       
   921 
       
   922     if (!(insn & (1 << 25))) {
       
   923         /* immediate */
       
   924         val = insn & 0xfff;
       
   925         if (!(insn & (1 << 23)))
       
   926             val = -val;
       
   927         if (val != 0)
       
   928             tcg_gen_addi_i32(var, var, val);
       
   929     } else {
       
   930         /* shift/register */
       
   931         rm = (insn) & 0xf;
       
   932         shift = (insn >> 7) & 0x1f;
       
   933         shiftop = (insn >> 5) & 3;
       
   934         offset = load_reg(s, rm);
       
   935         gen_arm_shift_im(offset, shiftop, shift, 0);
       
   936         if (!(insn & (1 << 23)))
       
   937             tcg_gen_sub_i32(var, var, offset);
       
   938         else
       
   939             tcg_gen_add_i32(var, var, offset);
       
   940         dead_tmp(offset);
       
   941     }
       
   942 }
       
   943 
       
   944 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
       
   945                                         int extra, TCGv var)
       
   946 {
       
   947     int val, rm;
       
   948     TCGv offset;
       
   949 
       
   950     if (insn & (1 << 22)) {
       
   951         /* immediate */
       
   952         val = (insn & 0xf) | ((insn >> 4) & 0xf0);
       
   953         if (!(insn & (1 << 23)))
       
   954             val = -val;
       
   955         val += extra;
       
   956         if (val != 0)
       
   957             tcg_gen_addi_i32(var, var, val);
       
   958     } else {
       
   959         /* register */
       
   960         if (extra)
       
   961             tcg_gen_addi_i32(var, var, extra);
       
   962         rm = (insn) & 0xf;
       
   963         offset = load_reg(s, rm);
       
   964         if (!(insn & (1 << 23)))
       
   965             tcg_gen_sub_i32(var, var, offset);
       
   966         else
       
   967             tcg_gen_add_i32(var, var, offset);
       
   968         dead_tmp(offset);
       
   969     }
       
   970 }
       
   971 
       
   972 #define VFP_OP2(name)                                                 \
       
   973 static inline void gen_vfp_##name(int dp)                             \
       
   974 {                                                                     \
       
   975     if (dp)                                                           \
       
   976         gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
       
   977     else                                                              \
       
   978         gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
       
   979 }
       
   980 
       
   981 VFP_OP2(add)
       
   982 VFP_OP2(sub)
       
   983 VFP_OP2(mul)
       
   984 VFP_OP2(div)
       
   985 
       
   986 #undef VFP_OP2
       
   987 
       
   988 static inline void gen_vfp_abs(int dp)
       
   989 {
       
   990     if (dp)
       
   991         gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
       
   992     else
       
   993         gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
       
   994 }
       
   995 
       
   996 static inline void gen_vfp_neg(int dp)
       
   997 {
       
   998     if (dp)
       
   999         gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
       
  1000     else
       
  1001         gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
       
  1002 }
       
  1003 
       
  1004 static inline void gen_vfp_sqrt(int dp)
       
  1005 {
       
  1006     if (dp)
       
  1007         gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
       
  1008     else
       
  1009         gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
       
  1010 }
       
  1011 
       
  1012 static inline void gen_vfp_cmp(int dp)
       
  1013 {
       
  1014     if (dp)
       
  1015         gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
       
  1016     else
       
  1017         gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
       
  1018 }
       
  1019 
       
  1020 static inline void gen_vfp_cmpe(int dp)
       
  1021 {
       
  1022     if (dp)
       
  1023         gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
       
  1024     else
       
  1025         gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
       
  1026 }
       
  1027 
       
  1028 static inline void gen_vfp_F1_ld0(int dp)
       
  1029 {
       
  1030     if (dp)
       
  1031         tcg_gen_movi_i64(cpu_F1d, 0);
       
  1032     else
       
  1033         tcg_gen_movi_i32(cpu_F1s, 0);
       
  1034 }
       
  1035 
       
  1036 static inline void gen_vfp_uito(int dp)
       
  1037 {
       
  1038     if (dp)
       
  1039         gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
       
  1040     else
       
  1041         gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
       
  1042 }
       
  1043 
       
  1044 static inline void gen_vfp_sito(int dp)
       
  1045 {
       
  1046     if (dp)
       
  1047         gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
       
  1048     else
       
  1049         gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
       
  1050 }
       
  1051 
       
  1052 static inline void gen_vfp_toui(int dp)
       
  1053 {
       
  1054     if (dp)
       
  1055         gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
       
  1056     else
       
  1057         gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
       
  1058 }
       
  1059 
       
  1060 static inline void gen_vfp_touiz(int dp)
       
  1061 {
       
  1062     if (dp)
       
  1063         gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
       
  1064     else
       
  1065         gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
       
  1066 }
       
  1067 
       
  1068 static inline void gen_vfp_tosi(int dp)
       
  1069 {
       
  1070     if (dp)
       
  1071         gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
       
  1072     else
       
  1073         gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
       
  1074 }
       
  1075 
       
  1076 static inline void gen_vfp_tosiz(int dp)
       
  1077 {
       
  1078     if (dp)
       
  1079         gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
       
  1080     else
       
  1081         gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
       
  1082 }
       
  1083 
       
  1084 #define VFP_GEN_FIX(name) \
       
  1085 static inline void gen_vfp_##name(int dp, int shift) \
       
  1086 { \
       
  1087     if (dp) \
       
  1088         gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tcg_const_i32(shift), cpu_env);\
       
  1089     else \
       
  1090         gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tcg_const_i32(shift), cpu_env);\
       
  1091 }
       
  1092 VFP_GEN_FIX(tosh)
       
  1093 VFP_GEN_FIX(tosl)
       
  1094 VFP_GEN_FIX(touh)
       
  1095 VFP_GEN_FIX(toul)
       
  1096 VFP_GEN_FIX(shto)
       
  1097 VFP_GEN_FIX(slto)
       
  1098 VFP_GEN_FIX(uhto)
       
  1099 VFP_GEN_FIX(ulto)
       
  1100 #undef VFP_GEN_FIX
       
  1101 
       
  1102 static inline void gen_vfp_ld(DisasContext *s, int dp)
       
  1103 {
       
  1104     if (dp) {
       
  1105         /* VFP memory accesses only require word alignment, so split
       
  1106            doubleword accesses.  */
       
  1107         TCGv low = new_tmp();
       
  1108         TCGv high = new_tmp();
       
  1109         tcg_gen_qemu_ld32u(low, cpu_T[1], IS_USER(s));
       
  1110         tcg_gen_addi_i32(high, cpu_T[1], 4);
       
  1111         tcg_gen_qemu_ld32u(high, high, IS_USER(s));
       
  1112         tcg_gen_concat_i32_i64(cpu_F0d, low, high);
       
  1113         dead_tmp(high);
       
  1114         dead_tmp(low);
       
  1115     } else {
       
  1116         tcg_gen_qemu_ld32u(cpu_F0s, cpu_T[1], IS_USER(s));
       
  1117     }
       
  1118 }
       
  1119 
       
  1120 static inline void gen_vfp_st(DisasContext *s, int dp)
       
  1121 {
       
  1122     if (dp) {
       
  1123         /* VFP memory accesses only require word alignment, so split
       
  1124            doubleword accesses.  */
       
  1125         TCGv val = new_tmp();
       
  1126         TCGv addr = new_tmp();
       
  1127         tcg_gen_trunc_i64_i32(val, cpu_F0d);
       
  1128         tcg_gen_qemu_st32(val, cpu_T[1], IS_USER(s));
       
  1129         tcg_gen_addi_i32(addr, cpu_T[1], 4);
       
  1130         tcg_gen_shri_i64(cpu_F0d, cpu_F0d, 32);
       
  1131         tcg_gen_trunc_i64_i32(val, cpu_F0d);
       
  1132         tcg_gen_qemu_st32(val, addr, IS_USER(s));
       
  1133         dead_tmp(addr);
       
  1134         dead_tmp(val);
       
  1135     } else {
       
  1136         tcg_gen_qemu_st32(cpu_F0s, cpu_T[1], IS_USER(s));
       
  1137     }
       
  1138 }
       
  1139 
       
  1140 static inline long
       
  1141 vfp_reg_offset (int dp, int reg)
       
  1142 {
       
  1143     if (dp)
       
  1144         return offsetof(CPUARMState, vfp.regs[reg]);
       
  1145     else if (reg & 1) {
       
  1146         return offsetof(CPUARMState, vfp.regs[reg >> 1])
       
  1147           + offsetof(CPU_DoubleU, l.upper);
       
  1148     } else {
       
  1149         return offsetof(CPUARMState, vfp.regs[reg >> 1])
       
  1150           + offsetof(CPU_DoubleU, l.lower);
       
  1151     }
       
  1152 }
       
  1153 
       
  1154 /* Return the offset of a 32-bit piece of a NEON register.
       
  1155    zero is the least significant end of the register.  */
       
  1156 static inline long
       
  1157 neon_reg_offset (int reg, int n)
       
  1158 {
       
  1159     int sreg;
       
  1160     sreg = reg * 2 + n;
       
  1161     return vfp_reg_offset(0, sreg);
       
  1162 }
       
  1163 
       
  1164 /* FIXME: Remove these.  */
       
  1165 #define neon_T0 cpu_T[0]
       
  1166 #define neon_T1 cpu_T[1]
       
  1167 #define NEON_GET_REG(T, reg, n) \
       
  1168   tcg_gen_ld_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
       
  1169 #define NEON_SET_REG(T, reg, n) \
       
  1170   tcg_gen_st_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
       
  1171 
       
  1172 static TCGv neon_load_reg(int reg, int pass)
       
  1173 {
       
  1174     TCGv tmp = new_tmp();
       
  1175     tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
       
  1176     return tmp;
       
  1177 }
       
  1178 
       
  1179 static void neon_store_reg(int reg, int pass, TCGv var)
       
  1180 {
       
  1181     tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
       
  1182     dead_tmp(var);
       
  1183 }
       
  1184 
       
  1185 static inline void neon_load_reg64(TCGv_i64 var, int reg)
       
  1186 {
       
  1187     tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
       
  1188 }
       
  1189 
       
  1190 static inline void neon_store_reg64(TCGv_i64 var, int reg)
       
  1191 {
       
  1192     tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
       
  1193 }
       
  1194 
       
  1195 #define tcg_gen_ld_f32 tcg_gen_ld_i32
       
  1196 #define tcg_gen_ld_f64 tcg_gen_ld_i64
       
  1197 #define tcg_gen_st_f32 tcg_gen_st_i32
       
  1198 #define tcg_gen_st_f64 tcg_gen_st_i64
       
  1199 
       
  1200 static inline void gen_mov_F0_vreg(int dp, int reg)
       
  1201 {
       
  1202     if (dp)
       
  1203         tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
       
  1204     else
       
  1205         tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
       
  1206 }
       
  1207 
       
  1208 static inline void gen_mov_F1_vreg(int dp, int reg)
       
  1209 {
       
  1210     if (dp)
       
  1211         tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
       
  1212     else
       
  1213         tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
       
  1214 }
       
  1215 
       
  1216 static inline void gen_mov_vreg_F0(int dp, int reg)
       
  1217 {
       
  1218     if (dp)
       
  1219         tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
       
  1220     else
       
  1221         tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
       
  1222 }
       
  1223 
       
  1224 #define ARM_CP_RW_BIT	(1 << 20)
       
  1225 
       
  1226 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
       
  1227 {
       
  1228     tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
       
  1229 }
       
  1230 
       
  1231 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
       
  1232 {
       
  1233     tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
       
  1234 }
       
  1235 
       
  1236 static inline void gen_op_iwmmxt_movl_wCx_T0(int reg)
       
  1237 {
       
  1238     tcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
       
  1239 }
       
  1240 
       
  1241 static inline void gen_op_iwmmxt_movl_T0_wCx(int reg)
       
  1242 {
       
  1243     tcg_gen_ld_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
       
  1244 }
       
  1245 
       
  1246 static inline void gen_op_iwmmxt_movl_T1_wCx(int reg)
       
  1247 {
       
  1248     tcg_gen_ld_i32(cpu_T[1], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
       
  1249 }
       
  1250 
       
  1251 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
       
  1252 {
       
  1253     iwmmxt_store_reg(cpu_M0, rn);
       
  1254 }
       
  1255 
       
  1256 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
       
  1257 {
       
  1258     iwmmxt_load_reg(cpu_M0, rn);
       
  1259 }
       
  1260 
       
  1261 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
       
  1262 {
       
  1263     iwmmxt_load_reg(cpu_V1, rn);
       
  1264     tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
       
  1265 }
       
  1266 
       
  1267 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
       
  1268 {
       
  1269     iwmmxt_load_reg(cpu_V1, rn);
       
  1270     tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
       
  1271 }
       
  1272 
       
  1273 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
       
  1274 {
       
  1275     iwmmxt_load_reg(cpu_V1, rn);
       
  1276     tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
       
  1277 }
       
  1278 
       
  1279 #define IWMMXT_OP(name) \
       
  1280 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
       
  1281 { \
       
  1282     iwmmxt_load_reg(cpu_V1, rn); \
       
  1283     gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
       
  1284 }
       
  1285 
       
  1286 #define IWMMXT_OP_ENV(name) \
       
  1287 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
       
  1288 { \
       
  1289     iwmmxt_load_reg(cpu_V1, rn); \
       
  1290     gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
       
  1291 }
       
  1292 
       
  1293 #define IWMMXT_OP_ENV_SIZE(name) \
       
  1294 IWMMXT_OP_ENV(name##b) \
       
  1295 IWMMXT_OP_ENV(name##w) \
       
  1296 IWMMXT_OP_ENV(name##l)
       
  1297 
       
  1298 #define IWMMXT_OP_ENV1(name) \
       
  1299 static inline void gen_op_iwmmxt_##name##_M0(void) \
       
  1300 { \
       
  1301     gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
       
  1302 }
       
  1303 
       
  1304 IWMMXT_OP(maddsq)
       
  1305 IWMMXT_OP(madduq)
       
  1306 IWMMXT_OP(sadb)
       
  1307 IWMMXT_OP(sadw)
       
  1308 IWMMXT_OP(mulslw)
       
  1309 IWMMXT_OP(mulshw)
       
  1310 IWMMXT_OP(mululw)
       
  1311 IWMMXT_OP(muluhw)
       
  1312 IWMMXT_OP(macsw)
       
  1313 IWMMXT_OP(macuw)
       
  1314 
       
  1315 IWMMXT_OP_ENV_SIZE(unpackl)
       
  1316 IWMMXT_OP_ENV_SIZE(unpackh)
       
  1317 
       
  1318 IWMMXT_OP_ENV1(unpacklub)
       
  1319 IWMMXT_OP_ENV1(unpackluw)
       
  1320 IWMMXT_OP_ENV1(unpacklul)
       
  1321 IWMMXT_OP_ENV1(unpackhub)
       
  1322 IWMMXT_OP_ENV1(unpackhuw)
       
  1323 IWMMXT_OP_ENV1(unpackhul)
       
  1324 IWMMXT_OP_ENV1(unpacklsb)
       
  1325 IWMMXT_OP_ENV1(unpacklsw)
       
  1326 IWMMXT_OP_ENV1(unpacklsl)
       
  1327 IWMMXT_OP_ENV1(unpackhsb)
       
  1328 IWMMXT_OP_ENV1(unpackhsw)
       
  1329 IWMMXT_OP_ENV1(unpackhsl)
       
  1330 
       
  1331 IWMMXT_OP_ENV_SIZE(cmpeq)
       
  1332 IWMMXT_OP_ENV_SIZE(cmpgtu)
       
  1333 IWMMXT_OP_ENV_SIZE(cmpgts)
       
  1334 
       
  1335 IWMMXT_OP_ENV_SIZE(mins)
       
  1336 IWMMXT_OP_ENV_SIZE(minu)
       
  1337 IWMMXT_OP_ENV_SIZE(maxs)
       
  1338 IWMMXT_OP_ENV_SIZE(maxu)
       
  1339 
       
  1340 IWMMXT_OP_ENV_SIZE(subn)
       
  1341 IWMMXT_OP_ENV_SIZE(addn)
       
  1342 IWMMXT_OP_ENV_SIZE(subu)
       
  1343 IWMMXT_OP_ENV_SIZE(addu)
       
  1344 IWMMXT_OP_ENV_SIZE(subs)
       
  1345 IWMMXT_OP_ENV_SIZE(adds)
       
  1346 
       
  1347 IWMMXT_OP_ENV(avgb0)
       
  1348 IWMMXT_OP_ENV(avgb1)
       
  1349 IWMMXT_OP_ENV(avgw0)
       
  1350 IWMMXT_OP_ENV(avgw1)
       
  1351 
       
  1352 IWMMXT_OP(msadb)
       
  1353 
       
  1354 IWMMXT_OP_ENV(packuw)
       
  1355 IWMMXT_OP_ENV(packul)
       
  1356 IWMMXT_OP_ENV(packuq)
       
  1357 IWMMXT_OP_ENV(packsw)
       
  1358 IWMMXT_OP_ENV(packsl)
       
  1359 IWMMXT_OP_ENV(packsq)
       
  1360 
       
  1361 static inline void gen_op_iwmmxt_muladdsl_M0_T0_T1(void)
       
  1362 {
       
  1363     gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
       
  1364 }
       
  1365 
       
  1366 static inline void gen_op_iwmmxt_muladdsw_M0_T0_T1(void)
       
  1367 {
       
  1368     gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
       
  1369 }
       
  1370 
       
  1371 static inline void gen_op_iwmmxt_muladdswl_M0_T0_T1(void)
       
  1372 {
       
  1373     gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
       
  1374 }
       
  1375 
       
  1376 static inline void gen_op_iwmmxt_align_M0_T0_wRn(int rn)
       
  1377 {
       
  1378     iwmmxt_load_reg(cpu_V1, rn);
       
  1379     gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, cpu_T[0]);
       
  1380 }
       
  1381 
       
  1382 static inline void gen_op_iwmmxt_insr_M0_T0_T1(int shift)
       
  1383 {
       
  1384     TCGv tmp = tcg_const_i32(shift);
       
  1385     gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1], tmp);
       
  1386 }
       
  1387 
       
  1388 static inline void gen_op_iwmmxt_extrsb_T0_M0(int shift)
       
  1389 {
       
  1390     tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
       
  1391     tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
       
  1392     tcg_gen_ext8s_i32(cpu_T[0], cpu_T[0]);
       
  1393 }
       
  1394 
       
  1395 static inline void gen_op_iwmmxt_extrsw_T0_M0(int shift)
       
  1396 {
       
  1397     tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
       
  1398     tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
       
  1399     tcg_gen_ext16s_i32(cpu_T[0], cpu_T[0]);
       
  1400 }
       
  1401 
       
  1402 static inline void gen_op_iwmmxt_extru_T0_M0(int shift, uint32_t mask)
       
  1403 {
       
  1404     tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
       
  1405     tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
       
  1406     if (mask != ~0u)
       
  1407         tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);
       
  1408 }
       
  1409 
       
  1410 static void gen_op_iwmmxt_set_mup(void)
       
  1411 {
       
  1412     TCGv tmp;
       
  1413     tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
       
  1414     tcg_gen_ori_i32(tmp, tmp, 2);
       
  1415     store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
       
  1416 }
       
  1417 
       
  1418 static void gen_op_iwmmxt_set_cup(void)
       
  1419 {
       
  1420     TCGv tmp;
       
  1421     tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
       
  1422     tcg_gen_ori_i32(tmp, tmp, 1);
       
  1423     store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
       
  1424 }
       
  1425 
       
  1426 static void gen_op_iwmmxt_setpsr_nz(void)
       
  1427 {
       
  1428     TCGv tmp = new_tmp();
       
  1429     gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
       
  1430     store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
       
  1431 }
       
  1432 
       
  1433 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
       
  1434 {
       
  1435     iwmmxt_load_reg(cpu_V1, rn);
       
  1436     tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
       
  1437     tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
       
  1438 }
       
  1439 
       
  1440 
       
  1441 static void gen_iwmmxt_movl_T0_T1_wRn(int rn)
       
  1442 {
       
  1443     iwmmxt_load_reg(cpu_V0, rn);
       
  1444     tcg_gen_trunc_i64_i32(cpu_T[0], cpu_V0);
       
  1445     tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
       
  1446     tcg_gen_trunc_i64_i32(cpu_T[1], cpu_V0);
       
  1447 }
       
  1448 
       
  1449 static void gen_iwmmxt_movl_wRn_T0_T1(int rn)
       
  1450 {
       
  1451     tcg_gen_concat_i32_i64(cpu_V0, cpu_T[0], cpu_T[1]);
       
  1452     iwmmxt_store_reg(cpu_V0, rn);
       
  1453 }
       
  1454 
       
  1455 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn)
       
  1456 {
       
  1457     int rd;
       
  1458     uint32_t offset;
       
  1459 
       
  1460     rd = (insn >> 16) & 0xf;
       
  1461     gen_movl_T1_reg(s, rd);
       
  1462 
       
  1463     offset = (insn & 0xff) << ((insn >> 7) & 2);
       
  1464     if (insn & (1 << 24)) {
       
  1465         /* Pre indexed */
       
  1466         if (insn & (1 << 23))
       
  1467             gen_op_addl_T1_im(offset);
       
  1468         else
       
  1469             gen_op_addl_T1_im(-offset);
       
  1470 
       
  1471         if (insn & (1 << 21))
       
  1472             gen_movl_reg_T1(s, rd);
       
  1473     } else if (insn & (1 << 21)) {
       
  1474         /* Post indexed */
       
  1475         if (insn & (1 << 23))
       
  1476             gen_op_movl_T0_im(offset);
       
  1477         else
       
  1478             gen_op_movl_T0_im(- offset);
       
  1479         gen_op_addl_T0_T1();
       
  1480         gen_movl_reg_T0(s, rd);
       
  1481     } else if (!(insn & (1 << 23)))
       
  1482         return 1;
       
  1483     return 0;
       
  1484 }
       
  1485 
       
  1486 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask)
       
  1487 {
       
  1488     int rd = (insn >> 0) & 0xf;
       
  1489 
       
  1490     if (insn & (1 << 8))
       
  1491         if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3)
       
  1492             return 1;
       
  1493         else
       
  1494             gen_op_iwmmxt_movl_T0_wCx(rd);
       
  1495     else
       
  1496         gen_iwmmxt_movl_T0_T1_wRn(rd);
       
  1497 
       
  1498     gen_op_movl_T1_im(mask);
       
  1499     gen_op_andl_T0_T1();
       
  1500     return 0;
       
  1501 }
       
  1502 
       
  1503 /* Disassemble an iwMMXt instruction.  Returns nonzero if an error occured
       
  1504    (ie. an undefined instruction).  */
       
  1505 static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
       
  1506 {
       
  1507     int rd, wrd;
       
  1508     int rdhi, rdlo, rd0, rd1, i;
       
  1509     TCGv tmp;
       
  1510 
       
  1511     if ((insn & 0x0e000e00) == 0x0c000000) {
       
  1512         if ((insn & 0x0fe00ff0) == 0x0c400000) {
       
  1513             wrd = insn & 0xf;
       
  1514             rdlo = (insn >> 12) & 0xf;
       
  1515             rdhi = (insn >> 16) & 0xf;
       
  1516             if (insn & ARM_CP_RW_BIT) {			/* TMRRC */
       
  1517                 gen_iwmmxt_movl_T0_T1_wRn(wrd);
       
  1518                 gen_movl_reg_T0(s, rdlo);
       
  1519                 gen_movl_reg_T1(s, rdhi);
       
  1520             } else {					/* TMCRR */
       
  1521                 gen_movl_T0_reg(s, rdlo);
       
  1522                 gen_movl_T1_reg(s, rdhi);
       
  1523                 gen_iwmmxt_movl_wRn_T0_T1(wrd);
       
  1524                 gen_op_iwmmxt_set_mup();
       
  1525             }
       
  1526             return 0;
       
  1527         }
       
  1528 
       
  1529         wrd = (insn >> 12) & 0xf;
       
  1530         if (gen_iwmmxt_address(s, insn))
       
  1531             return 1;
       
  1532         if (insn & ARM_CP_RW_BIT) {
       
  1533             if ((insn >> 28) == 0xf) {			/* WLDRW wCx */
       
  1534                 tmp = gen_ld32(cpu_T[1], IS_USER(s));
       
  1535                 tcg_gen_mov_i32(cpu_T[0], tmp);
       
  1536                 dead_tmp(tmp);
       
  1537                 gen_op_iwmmxt_movl_wCx_T0(wrd);
       
  1538             } else {
       
  1539                 i = 1;
       
  1540                 if (insn & (1 << 8)) {
       
  1541                     if (insn & (1 << 22)) {		/* WLDRD */
       
  1542                         tcg_gen_qemu_ld64(cpu_M0, cpu_T[1], IS_USER(s));
       
  1543                         i = 0;
       
  1544                     } else {				/* WLDRW wRd */
       
  1545                         tmp = gen_ld32(cpu_T[1], IS_USER(s));
       
  1546                     }
       
  1547                 } else {
       
  1548                     if (insn & (1 << 22)) {		/* WLDRH */
       
  1549                         tmp = gen_ld16u(cpu_T[1], IS_USER(s));
       
  1550                     } else {				/* WLDRB */
       
  1551                         tmp = gen_ld8u(cpu_T[1], IS_USER(s));
       
  1552                     }
       
  1553                 }
       
  1554                 if (i) {
       
  1555                     tcg_gen_extu_i32_i64(cpu_M0, tmp);
       
  1556                     dead_tmp(tmp);
       
  1557                 }
       
  1558                 gen_op_iwmmxt_movq_wRn_M0(wrd);
       
  1559             }
       
  1560         } else {
       
  1561             if ((insn >> 28) == 0xf) {			/* WSTRW wCx */
       
  1562                 gen_op_iwmmxt_movl_T0_wCx(wrd);
       
  1563                 tmp = new_tmp();
       
  1564                 tcg_gen_mov_i32(tmp, cpu_T[0]);
       
  1565                 gen_st32(tmp, cpu_T[1], IS_USER(s));
       
  1566             } else {
       
  1567                 gen_op_iwmmxt_movq_M0_wRn(wrd);
       
  1568                 tmp = new_tmp();
       
  1569                 if (insn & (1 << 8)) {
       
  1570                     if (insn & (1 << 22)) {		/* WSTRD */
       
  1571                         dead_tmp(tmp);
       
  1572                         tcg_gen_qemu_st64(cpu_M0, cpu_T[1], IS_USER(s));
       
  1573                     } else {				/* WSTRW wRd */
       
  1574                         tcg_gen_trunc_i64_i32(tmp, cpu_M0);
       
  1575                         gen_st32(tmp, cpu_T[1], IS_USER(s));
       
  1576                     }
       
  1577                 } else {
       
  1578                     if (insn & (1 << 22)) {		/* WSTRH */
       
  1579                         tcg_gen_trunc_i64_i32(tmp, cpu_M0);
       
  1580                         gen_st16(tmp, cpu_T[1], IS_USER(s));
       
  1581                     } else {				/* WSTRB */
       
  1582                         tcg_gen_trunc_i64_i32(tmp, cpu_M0);
       
  1583                         gen_st8(tmp, cpu_T[1], IS_USER(s));
       
  1584                     }
       
  1585                 }
       
  1586             }
       
  1587         }
       
  1588         return 0;
       
  1589     }
       
  1590 
       
  1591     if ((insn & 0x0f000000) != 0x0e000000)
       
  1592         return 1;
       
  1593 
       
  1594     switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
       
  1595     case 0x000:						/* WOR */
       
  1596         wrd = (insn >> 12) & 0xf;
       
  1597         rd0 = (insn >> 0) & 0xf;
       
  1598         rd1 = (insn >> 16) & 0xf;
       
  1599         gen_op_iwmmxt_movq_M0_wRn(rd0);
       
  1600         gen_op_iwmmxt_orq_M0_wRn(rd1);
       
  1601         gen_op_iwmmxt_setpsr_nz();
       
  1602         gen_op_iwmmxt_movq_wRn_M0(wrd);
       
  1603         gen_op_iwmmxt_set_mup();
       
  1604         gen_op_iwmmxt_set_cup();
       
  1605         break;
       
  1606     case 0x011:						/* TMCR */
       
  1607         if (insn & 0xf)
       
  1608             return 1;
       
  1609         rd = (insn >> 12) & 0xf;
       
  1610         wrd = (insn >> 16) & 0xf;
       
  1611         switch (wrd) {
       
  1612         case ARM_IWMMXT_wCID:
       
  1613         case ARM_IWMMXT_wCASF:
       
  1614             break;
       
  1615         case ARM_IWMMXT_wCon:
       
  1616             gen_op_iwmmxt_set_cup();
       
  1617             /* Fall through.  */
       
  1618         case ARM_IWMMXT_wCSSF:
       
  1619             gen_op_iwmmxt_movl_T0_wCx(wrd);
       
  1620             gen_movl_T1_reg(s, rd);
       
  1621             gen_op_bicl_T0_T1();
       
  1622             gen_op_iwmmxt_movl_wCx_T0(wrd);
       
  1623             break;
       
  1624         case ARM_IWMMXT_wCGR0:
       
  1625         case ARM_IWMMXT_wCGR1:
       
  1626         case ARM_IWMMXT_wCGR2:
       
  1627         case ARM_IWMMXT_wCGR3:
       
  1628             gen_op_iwmmxt_set_cup();
       
  1629             gen_movl_reg_T0(s, rd);
       
  1630             gen_op_iwmmxt_movl_wCx_T0(wrd);
       
  1631             break;
       
  1632         default:
       
  1633             return 1;
       
  1634         }
       
  1635         break;
       
  1636     case 0x100:						/* WXOR */
       
  1637         wrd = (insn >> 12) & 0xf;
       
  1638         rd0 = (insn >> 0) & 0xf;
       
  1639         rd1 = (insn >> 16) & 0xf;
       
  1640         gen_op_iwmmxt_movq_M0_wRn(rd0);
       
  1641         gen_op_iwmmxt_xorq_M0_wRn(rd1);
       
  1642         gen_op_iwmmxt_setpsr_nz();
       
  1643         gen_op_iwmmxt_movq_wRn_M0(wrd);
       
  1644         gen_op_iwmmxt_set_mup();
       
  1645         gen_op_iwmmxt_set_cup();
       
  1646         break;
       
  1647     case 0x111:						/* TMRC */
       
  1648         if (insn & 0xf)
       
  1649             return 1;
       
  1650         rd = (insn >> 12) & 0xf;
       
  1651         wrd = (insn >> 16) & 0xf;
       
  1652         gen_op_iwmmxt_movl_T0_wCx(wrd);
       
  1653         gen_movl_reg_T0(s, rd);
       
  1654         break;
       
  1655     case 0x300:						/* WANDN */
       
  1656         wrd = (insn >> 12) & 0xf;
       
  1657         rd0 = (insn >> 0) & 0xf;
       
  1658         rd1 = (insn >> 16) & 0xf;
       
  1659         gen_op_iwmmxt_movq_M0_wRn(rd0);
       
  1660         tcg_gen_neg_i64(cpu_M0, cpu_M0);
       
  1661         gen_op_iwmmxt_andq_M0_wRn(rd1);
       
  1662         gen_op_iwmmxt_setpsr_nz();
       
  1663         gen_op_iwmmxt_movq_wRn_M0(wrd);
       
  1664         gen_op_iwmmxt_set_mup();
       
  1665         gen_op_iwmmxt_set_cup();
       
  1666         break;
       
  1667     case 0x200:						/* WAND */
       
  1668         wrd = (insn >> 12) & 0xf;
       
  1669         rd0 = (insn >> 0) & 0xf;
       
  1670         rd1 = (insn >> 16) & 0xf;
       
  1671         gen_op_iwmmxt_movq_M0_wRn(rd0);
       
  1672         gen_op_iwmmxt_andq_M0_wRn(rd1);
       
  1673         gen_op_iwmmxt_setpsr_nz();
       
  1674         gen_op_iwmmxt_movq_wRn_M0(wrd);
       
  1675         gen_op_iwmmxt_set_mup();
       
  1676         gen_op_iwmmxt_set_cup();
       
  1677         break;
       
  1678     case 0x810: case 0xa10:				/* WMADD */
       
  1679         wrd = (insn >> 12) & 0xf;
       
  1680         rd0 = (insn >> 0) & 0xf;
       
  1681         rd1 = (insn >> 16) & 0xf;
       
  1682         gen_op_iwmmxt_movq_M0_wRn(rd0);
       
  1683         if (insn & (1 << 21))
       
  1684             gen_op_iwmmxt_maddsq_M0_wRn(rd1);
       
  1685         else
       
  1686             gen_op_iwmmxt_madduq_M0_wRn(rd1);
       
  1687         gen_op_iwmmxt_movq_wRn_M0(wrd);
       
  1688         gen_op_iwmmxt_set_mup();
       
  1689         break;
       
  1690     case 0x10e: case 0x50e: case 0x90e: case 0xd0e:	/* WUNPCKIL */
       
  1691         wrd = (insn >> 12) & 0xf;
       
  1692         rd0 = (insn >> 16) & 0xf;
       
  1693         rd1 = (insn >> 0) & 0xf;
       
  1694         gen_op_iwmmxt_movq_M0_wRn(rd0);
       
  1695         switch ((insn >> 22) & 3) {
       
  1696         case 0:
       
  1697             gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
       
  1698             break;
       
  1699         case 1:
       
  1700             gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
       
  1701             break;
       
  1702         case 2:
       
  1703             gen_op_iwmmxt_unpackll_M0_wRn(rd1);
       
  1704             break;
       
  1705         case 3:
       
  1706             return 1;
       
  1707         }
       
  1708         gen_op_iwmmxt_movq_wRn_M0(wrd);
       
  1709         gen_op_iwmmxt_set_mup();
       
  1710         gen_op_iwmmxt_set_cup();
       
  1711         break;
       
  1712     case 0x10c: case 0x50c: case 0x90c: case 0xd0c:	/* WUNPCKIH */
       
  1713         wrd = (insn >> 12) & 0xf;
       
  1714         rd0 = (insn >> 16) & 0xf;
       
  1715         rd1 = (insn >> 0) & 0xf;
       
  1716         gen_op_iwmmxt_movq_M0_wRn(rd0);
       
  1717         switch ((insn >> 22) & 3) {
       
  1718         case 0:
       
  1719             gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
       
  1720             break;
       
  1721         case 1:
       
  1722             gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
       
  1723             break;
       
  1724         case 2:
       
  1725             gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
       
  1726             break;
       
  1727         case 3:
       
  1728             return 1;
       
  1729         }
       
  1730         gen_op_iwmmxt_movq_wRn_M0(wrd);
       
  1731         gen_op_iwmmxt_set_mup();
       
  1732         gen_op_iwmmxt_set_cup();
       
  1733         break;
       
  1734     case 0x012: case 0x112: case 0x412: case 0x512:	/* WSAD */
       
  1735         wrd = (insn >> 12) & 0xf;
       
  1736         rd0 = (insn >> 16) & 0xf;
       
  1737         rd1 = (insn >> 0) & 0xf;
       
  1738         gen_op_iwmmxt_movq_M0_wRn(rd0);
       
  1739         if (insn & (1 << 22))
       
  1740             gen_op_iwmmxt_sadw_M0_wRn(rd1);
       
  1741         else
       
  1742             gen_op_iwmmxt_sadb_M0_wRn(rd1);
       
  1743         if (!(insn & (1 << 20)))
       
  1744             gen_op_iwmmxt_addl_M0_wRn(wrd);
       
  1745         gen_op_iwmmxt_movq_wRn_M0(wrd);
       
  1746         gen_op_iwmmxt_set_mup();
       
  1747         break;
       
  1748     case 0x010: case 0x110: case 0x210: case 0x310:	/* WMUL */
       
  1749         wrd = (insn >> 12) & 0xf;
       
  1750         rd0 = (insn >> 16) & 0xf;
       
  1751         rd1 = (insn >> 0) & 0xf;
       
  1752         gen_op_iwmmxt_movq_M0_wRn(rd0);
       
  1753         if (insn & (1 << 21)) {
       
  1754             if (insn & (1 << 20))
       
  1755                 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
       
  1756             else
       
  1757                 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
       
  1758         } else {
       
  1759             if (insn & (1 << 20))
       
  1760                 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
       
  1761             else
       
  1762                 gen_op_iwmmxt_mululw_M0_wRn(rd1);
       
  1763         }
       
  1764         gen_op_iwmmxt_movq_wRn_M0(wrd);
       
  1765         gen_op_iwmmxt_set_mup();
       
  1766         break;
       
  1767     case 0x410: case 0x510: case 0x610: case 0x710:	/* WMAC */
       
  1768         wrd = (insn >> 12) & 0xf;
       
  1769         rd0 = (insn >> 16) & 0xf;
       
  1770         rd1 = (insn >> 0) & 0xf;
       
  1771         gen_op_iwmmxt_movq_M0_wRn(rd0);
       
  1772         if (insn & (1 << 21))
       
  1773             gen_op_iwmmxt_macsw_M0_wRn(rd1);
       
  1774         else
       
  1775             gen_op_iwmmxt_macuw_M0_wRn(rd1);
       
  1776         if (!(insn & (1 << 20))) {
       
  1777             iwmmxt_load_reg(cpu_V1, wrd);
       
  1778             tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
       
  1779         }
       
  1780         gen_op_iwmmxt_movq_wRn_M0(wrd);
       
  1781         gen_op_iwmmxt_set_mup();
       
  1782         break;
       
  1783     case 0x006: case 0x406: case 0x806: case 0xc06:	/* WCMPEQ */
       
  1784         wrd = (insn >> 12) & 0xf;
       
  1785         rd0 = (insn >> 16) & 0xf;
       
  1786         rd1 = (insn >> 0) & 0xf;
       
  1787         gen_op_iwmmxt_movq_M0_wRn(rd0);
       
  1788         switch ((insn >> 22) & 3) {
       
  1789         case 0:
       
  1790             gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
       
  1791             break;
       
  1792         case 1:
       
  1793             gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
       
  1794             break;
       
  1795         case 2:
       
  1796             gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
       
  1797             break;
       
  1798         case 3:
       
  1799             return 1;
       
  1800         }
       
  1801         gen_op_iwmmxt_movq_wRn_M0(wrd);
       
  1802         gen_op_iwmmxt_set_mup();
       
  1803         gen_op_iwmmxt_set_cup();
       
  1804         break;
       
  1805     case 0x800: case 0x900: case 0xc00: case 0xd00:	/* WAVG2 */
       
  1806         wrd = (insn >> 12) & 0xf;
       
  1807         rd0 = (insn >> 16) & 0xf;
       
  1808         rd1 = (insn >> 0) & 0xf;
       
  1809         gen_op_iwmmxt_movq_M0_wRn(rd0);
       
  1810         if (insn & (1 << 22)) {
       
  1811             if (insn & (1 << 20))
       
  1812                 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
       
  1813             else
       
  1814                 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
       
  1815         } else {
       
  1816             if (insn & (1 << 20))
       
  1817                 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
       
  1818             else
       
  1819                 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
       
  1820         }
       
  1821         gen_op_iwmmxt_movq_wRn_M0(wrd);
       
  1822         gen_op_iwmmxt_set_mup();
       
  1823         gen_op_iwmmxt_set_cup();
       
  1824         break;
       
  1825     case 0x802: case 0x902: case 0xa02: case 0xb02:	/* WALIGNR */
       
  1826         wrd = (insn >> 12) & 0xf;
       
  1827         rd0 = (insn >> 16) & 0xf;
       
  1828         rd1 = (insn >> 0) & 0xf;
       
  1829         gen_op_iwmmxt_movq_M0_wRn(rd0);
       
  1830         gen_op_iwmmxt_movl_T0_wCx(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
       
  1831         gen_op_movl_T1_im(7);
       
  1832         gen_op_andl_T0_T1();
       
  1833         gen_op_iwmmxt_align_M0_T0_wRn(rd1);
       
  1834         gen_op_iwmmxt_movq_wRn_M0(wrd);
       
  1835         gen_op_iwmmxt_set_mup();
       
  1836         break;
       
  1837     case 0x601: case 0x605: case 0x609: case 0x60d:	/* TINSR */
       
  1838         rd = (insn >> 12) & 0xf;
       
  1839         wrd = (insn >> 16) & 0xf;
       
  1840         gen_movl_T0_reg(s, rd);
       
  1841         gen_op_iwmmxt_movq_M0_wRn(wrd);
       
  1842         switch ((insn >> 6) & 3) {
       
  1843         case 0:
       
  1844             gen_op_movl_T1_im(0xff);
       
  1845             gen_op_iwmmxt_insr_M0_T0_T1((insn & 7) << 3);
       
  1846             break;
       
  1847         case 1:
       
  1848             gen_op_movl_T1_im(0xffff);
       
  1849             gen_op_iwmmxt_insr_M0_T0_T1((insn & 3) << 4);
       
  1850             break;
       
  1851         case 2:
       
  1852             gen_op_movl_T1_im(0xffffffff);
       
  1853             gen_op_iwmmxt_insr_M0_T0_T1((insn & 1) << 5);
       
  1854             break;
       
  1855         case 3:
       
  1856             return 1;
       
  1857         }
       
  1858         gen_op_iwmmxt_movq_wRn_M0(wrd);
       
  1859         gen_op_iwmmxt_set_mup();
       
  1860         break;
       
  1861     case 0x107: case 0x507: case 0x907: case 0xd07:	/* TEXTRM */
       
  1862         rd = (insn >> 12) & 0xf;
       
  1863         wrd = (insn >> 16) & 0xf;
       
  1864         if (rd == 15)
       
  1865             return 1;
       
  1866         gen_op_iwmmxt_movq_M0_wRn(wrd);
       
  1867         switch ((insn >> 22) & 3) {
       
  1868         case 0:
       
  1869             if (insn & 8)
       
  1870                 gen_op_iwmmxt_extrsb_T0_M0((insn & 7) << 3);
       
  1871             else {
       
  1872                 gen_op_iwmmxt_extru_T0_M0((insn & 7) << 3, 0xff);
       
  1873             }
       
  1874             break;
       
  1875         case 1:
       
  1876             if (insn & 8)
       
  1877                 gen_op_iwmmxt_extrsw_T0_M0((insn & 3) << 4);
       
  1878             else {
       
  1879                 gen_op_iwmmxt_extru_T0_M0((insn & 3) << 4, 0xffff);
       
  1880             }
       
  1881             break;
       
  1882         case 2:
       
  1883             gen_op_iwmmxt_extru_T0_M0((insn & 1) << 5, ~0u);
       
  1884             break;
       
  1885         case 3:
       
  1886             return 1;
       
  1887         }
       
  1888         gen_movl_reg_T0(s, rd);
       
  1889         break;
       
  1890     case 0x117: case 0x517: case 0x917: case 0xd17:	/* TEXTRC */
       
  1891         if ((insn & 0x000ff008) != 0x0003f000)
       
  1892             return 1;
       
  1893         gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
       
  1894         switch ((insn >> 22) & 3) {
       
  1895         case 0:
       
  1896             gen_op_shrl_T1_im(((insn & 7) << 2) + 0);
       
  1897             break;
       
  1898         case 1:
       
  1899             gen_op_shrl_T1_im(((insn & 3) << 3) + 4);
       
  1900             break;
       
  1901         case 2:
       
  1902             gen_op_shrl_T1_im(((insn & 1) << 4) + 12);
       
  1903             break;
       
  1904         case 3:
       
  1905             return 1;
       
  1906         }
       
  1907         gen_op_shll_T1_im(28);
       
  1908         gen_set_nzcv(cpu_T[1]);
       
  1909         break;
       
  1910     case 0x401: case 0x405: case 0x409: case 0x40d:	/* TBCST */
       
  1911         rd = (insn >> 12) & 0xf;
       
  1912         wrd = (insn >> 16) & 0xf;
       
  1913         gen_movl_T0_reg(s, rd);
       
  1914         switch ((insn >> 6) & 3) {
       
  1915         case 0:
       
  1916             gen_helper_iwmmxt_bcstb(cpu_M0, cpu_T[0]);
       
  1917             break;
       
  1918         case 1:
       
  1919             gen_helper_iwmmxt_bcstw(cpu_M0, cpu_T[0]);
       
  1920             break;
       
  1921         case 2:
       
  1922             gen_helper_iwmmxt_bcstl(cpu_M0, cpu_T[0]);
       
  1923             break;
       
  1924         case 3:
       
  1925             return 1;
       
  1926         }
       
  1927         gen_op_iwmmxt_movq_wRn_M0(wrd);
       
  1928         gen_op_iwmmxt_set_mup();
       
  1929         break;
       
  1930     case 0x113: case 0x513: case 0x913: case 0xd13:	/* TANDC */
       
  1931         if ((insn & 0x000ff00f) != 0x0003f000)
       
  1932             return 1;
       
  1933         gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
       
  1934         switch ((insn >> 22) & 3) {
       
  1935         case 0:
       
  1936             for (i = 0; i < 7; i ++) {
       
  1937                 gen_op_shll_T1_im(4);
       
  1938                 gen_op_andl_T0_T1();
       
  1939             }
       
  1940             break;
       
  1941         case 1:
       
  1942             for (i = 0; i < 3; i ++) {
       
  1943                 gen_op_shll_T1_im(8);
       
  1944                 gen_op_andl_T0_T1();
       
  1945             }
       
  1946             break;
       
  1947         case 2:
       
  1948             gen_op_shll_T1_im(16);
       
  1949             gen_op_andl_T0_T1();
       
  1950             break;
       
  1951         case 3:
       
  1952             return 1;
       
  1953         }
       
  1954         gen_set_nzcv(cpu_T[0]);
       
  1955         break;
       
  1956     case 0x01c: case 0x41c: case 0x81c: case 0xc1c:	/* WACC */
       
  1957         wrd = (insn >> 12) & 0xf;
       
  1958         rd0 = (insn >> 16) & 0xf;
       
  1959         gen_op_iwmmxt_movq_M0_wRn(rd0);
       
  1960         switch ((insn >> 22) & 3) {
       
  1961         case 0:
       
  1962             gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
       
  1963             break;
       
  1964         case 1:
       
  1965             gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
       
  1966             break;
       
  1967         case 2:
       
  1968             gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
       
  1969             break;
       
  1970         case 3:
       
  1971             return 1;
       
  1972         }
       
  1973         gen_op_iwmmxt_movq_wRn_M0(wrd);
       
  1974         gen_op_iwmmxt_set_mup();
       
  1975         break;
       
  1976     case 0x115: case 0x515: case 0x915: case 0xd15:	/* TORC */
       
  1977         if ((insn & 0x000ff00f) != 0x0003f000)
       
  1978             return 1;
       
  1979         gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
       
  1980         switch ((insn >> 22) & 3) {
       
  1981         case 0:
       
  1982             for (i = 0; i < 7; i ++) {
       
  1983                 gen_op_shll_T1_im(4);
       
  1984                 gen_op_orl_T0_T1();
       
  1985             }
       
  1986             break;
       
  1987         case 1:
       
  1988             for (i = 0; i < 3; i ++) {
       
  1989                 gen_op_shll_T1_im(8);
       
  1990                 gen_op_orl_T0_T1();
       
  1991             }
       
  1992             break;
       
  1993         case 2:
       
  1994             gen_op_shll_T1_im(16);
       
  1995             gen_op_orl_T0_T1();
       
  1996             break;
       
  1997         case 3:
       
  1998             return 1;
       
  1999         }
       
  2000         gen_set_nzcv(cpu_T[0]);
       
  2001         break;
       
  2002     case 0x103: case 0x503: case 0x903: case 0xd03:	/* TMOVMSK */
       
  2003         rd = (insn >> 12) & 0xf;
       
  2004         rd0 = (insn >> 16) & 0xf;
       
  2005         if ((insn & 0xf) != 0)
       
  2006             return 1;
       
  2007         gen_op_iwmmxt_movq_M0_wRn(rd0);
       
  2008         switch ((insn >> 22) & 3) {
       
  2009         case 0:
       
  2010             gen_helper_iwmmxt_msbb(cpu_T[0], cpu_M0);
       
  2011             break;
       
  2012         case 1:
       
  2013             gen_helper_iwmmxt_msbw(cpu_T[0], cpu_M0);
       
  2014             break;
       
  2015         case 2:
       
  2016             gen_helper_iwmmxt_msbl(cpu_T[0], cpu_M0);
       
  2017             break;
       
  2018         case 3:
       
  2019             return 1;
       
  2020         }
       
  2021         gen_movl_reg_T0(s, rd);
       
  2022         break;
       
  2023     case 0x106: case 0x306: case 0x506: case 0x706:	/* WCMPGT */
       
  2024     case 0x906: case 0xb06: case 0xd06: case 0xf06:
       
  2025         wrd = (insn >> 12) & 0xf;
       
  2026         rd0 = (insn >> 16) & 0xf;
       
  2027         rd1 = (insn >> 0) & 0xf;
       
  2028         gen_op_iwmmxt_movq_M0_wRn(rd0);
       
  2029         switch ((insn >> 22) & 3) {
       
  2030         case 0:
       
  2031             if (insn & (1 << 21))
       
  2032                 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
       
  2033             else
       
  2034                 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
       
  2035             break;
       
  2036         case 1:
       
  2037             if (insn & (1 << 21))
       
  2038                 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
       
  2039             else
       
  2040                 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
       
  2041             break;
       
  2042         case 2:
       
  2043             if (insn & (1 << 21))
       
  2044                 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
       
  2045             else
       
  2046                 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
       
  2047             break;
       
  2048         case 3:
       
  2049             return 1;
       
  2050         }
       
  2051         gen_op_iwmmxt_movq_wRn_M0(wrd);
       
  2052         gen_op_iwmmxt_set_mup();
       
  2053         gen_op_iwmmxt_set_cup();
       
  2054         break;
       
  2055     case 0x00e: case 0x20e: case 0x40e: case 0x60e:	/* WUNPCKEL */
       
  2056     case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
       
  2057         wrd = (insn >> 12) & 0xf;
       
  2058         rd0 = (insn >> 16) & 0xf;
       
  2059         gen_op_iwmmxt_movq_M0_wRn(rd0);
       
  2060         switch ((insn >> 22) & 3) {
       
  2061         case 0:
       
  2062             if (insn & (1 << 21))
       
  2063                 gen_op_iwmmxt_unpacklsb_M0();
       
  2064             else
       
  2065                 gen_op_iwmmxt_unpacklub_M0();
       
  2066             break;
       
  2067         case 1:
       
  2068             if (insn & (1 << 21))
       
  2069                 gen_op_iwmmxt_unpacklsw_M0();
       
  2070             else
       
  2071                 gen_op_iwmmxt_unpackluw_M0();
       
  2072             break;
       
  2073         case 2:
       
  2074             if (insn & (1 << 21))
       
  2075                 gen_op_iwmmxt_unpacklsl_M0();
       
  2076             else
       
  2077                 gen_op_iwmmxt_unpacklul_M0();
       
  2078             break;
       
  2079         case 3:
       
  2080             return 1;
       
  2081         }
       
  2082         gen_op_iwmmxt_movq_wRn_M0(wrd);
       
  2083         gen_op_iwmmxt_set_mup();
       
  2084         gen_op_iwmmxt_set_cup();
       
  2085         break;
       
  2086     case 0x00c: case 0x20c: case 0x40c: case 0x60c:	/* WUNPCKEH */
       
  2087     case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
       
  2088         wrd = (insn >> 12) & 0xf;
       
  2089         rd0 = (insn >> 16) & 0xf;
       
  2090         gen_op_iwmmxt_movq_M0_wRn(rd0);
       
  2091         switch ((insn >> 22) & 3) {
       
  2092         case 0:
       
  2093             if (insn & (1 << 21))
       
  2094                 gen_op_iwmmxt_unpackhsb_M0();
       
  2095             else
       
  2096                 gen_op_iwmmxt_unpackhub_M0();
       
  2097             break;
       
  2098         case 1:
       
  2099             if (insn & (1 << 21))
       
  2100                 gen_op_iwmmxt_unpackhsw_M0();
       
  2101             else
       
  2102                 gen_op_iwmmxt_unpackhuw_M0();
       
  2103             break;
       
  2104         case 2:
       
  2105             if (insn & (1 << 21))
       
  2106                 gen_op_iwmmxt_unpackhsl_M0();
       
  2107             else
       
  2108                 gen_op_iwmmxt_unpackhul_M0();
       
  2109             break;
       
  2110         case 3:
       
  2111             return 1;
       
  2112         }
       
  2113         gen_op_iwmmxt_movq_wRn_M0(wrd);
       
  2114         gen_op_iwmmxt_set_mup();
       
  2115         gen_op_iwmmxt_set_cup();
       
  2116         break;
       
  2117     case 0x204: case 0x604: case 0xa04: case 0xe04:	/* WSRL */
       
  2118     case 0x214: case 0x614: case 0xa14: case 0xe14:
       
  2119         wrd = (insn >> 12) & 0xf;
       
  2120         rd0 = (insn >> 16) & 0xf;
       
  2121         gen_op_iwmmxt_movq_M0_wRn(rd0);
       
  2122         if (gen_iwmmxt_shift(insn, 0xff))
       
  2123             return 1;
       
  2124         switch ((insn >> 22) & 3) {
       
  2125         case 0:
       
  2126             return 1;
       
  2127         case 1:
       
  2128             gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
       
  2129             break;
       
  2130         case 2:
       
  2131             gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
       
  2132             break;
       
  2133         case 3:
       
  2134             gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
       
  2135             break;
       
  2136         }
       
  2137         gen_op_iwmmxt_movq_wRn_M0(wrd);
       
  2138         gen_op_iwmmxt_set_mup();
       
  2139         gen_op_iwmmxt_set_cup();
       
  2140         break;
       
  2141     case 0x004: case 0x404: case 0x804: case 0xc04:	/* WSRA */
       
  2142     case 0x014: case 0x414: case 0x814: case 0xc14:
       
  2143         wrd = (insn >> 12) & 0xf;
       
  2144         rd0 = (insn >> 16) & 0xf;
       
  2145         gen_op_iwmmxt_movq_M0_wRn(rd0);
       
  2146         if (gen_iwmmxt_shift(insn, 0xff))
       
  2147             return 1;
       
  2148         switch ((insn >> 22) & 3) {
       
  2149         case 0:
       
  2150             return 1;
       
  2151         case 1:
       
  2152             gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
       
  2153             break;
       
  2154         case 2:
       
  2155             gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
       
  2156             break;
       
  2157         case 3:
       
  2158             gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
       
  2159             break;
       
  2160         }
       
  2161         gen_op_iwmmxt_movq_wRn_M0(wrd);
       
  2162         gen_op_iwmmxt_set_mup();
       
  2163         gen_op_iwmmxt_set_cup();
       
  2164         break;
       
  2165     case 0x104: case 0x504: case 0x904: case 0xd04:	/* WSLL */
       
  2166     case 0x114: case 0x514: case 0x914: case 0xd14:
       
  2167         wrd = (insn >> 12) & 0xf;
       
  2168         rd0 = (insn >> 16) & 0xf;
       
  2169         gen_op_iwmmxt_movq_M0_wRn(rd0);
       
  2170         if (gen_iwmmxt_shift(insn, 0xff))
       
  2171             return 1;
       
  2172         switch ((insn >> 22) & 3) {
       
  2173         case 0:
       
  2174             return 1;
       
  2175         case 1:
       
  2176             gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
       
  2177             break;
       
  2178         case 2:
       
  2179             gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
       
  2180             break;
       
  2181         case 3:
       
  2182             gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
       
  2183             break;
       
  2184         }
       
  2185         gen_op_iwmmxt_movq_wRn_M0(wrd);
       
  2186         gen_op_iwmmxt_set_mup();
       
  2187         gen_op_iwmmxt_set_cup();
       
  2188         break;
       
  2189     case 0x304: case 0x704: case 0xb04: case 0xf04:	/* WROR */
       
  2190     case 0x314: case 0x714: case 0xb14: case 0xf14:
       
  2191         wrd = (insn >> 12) & 0xf;
       
  2192         rd0 = (insn >> 16) & 0xf;
       
  2193         gen_op_iwmmxt_movq_M0_wRn(rd0);
       
  2194         switch ((insn >> 22) & 3) {
       
  2195         case 0:
       
  2196             return 1;
       
  2197         case 1:
       
  2198             if (gen_iwmmxt_shift(insn, 0xf))
       
  2199                 return 1;
       
  2200             gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
       
  2201             break;
       
  2202         case 2:
       
  2203             if (gen_iwmmxt_shift(insn, 0x1f))
       
  2204                 return 1;
       
  2205             gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
       
  2206             break;
       
  2207         case 3:
       
  2208             if (gen_iwmmxt_shift(insn, 0x3f))
       
  2209                 return 1;
       
  2210             gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
       
  2211             break;
       
  2212         }
       
  2213         gen_op_iwmmxt_movq_wRn_M0(wrd);
       
  2214         gen_op_iwmmxt_set_mup();
       
  2215         gen_op_iwmmxt_set_cup();
       
  2216         break;
       
  2217     case 0x116: case 0x316: case 0x516: case 0x716:	/* WMIN */
       
  2218     case 0x916: case 0xb16: case 0xd16: case 0xf16:
       
  2219         wrd = (insn >> 12) & 0xf;
       
  2220         rd0 = (insn >> 16) & 0xf;
       
  2221         rd1 = (insn >> 0) & 0xf;
       
  2222         gen_op_iwmmxt_movq_M0_wRn(rd0);
       
  2223         switch ((insn >> 22) & 3) {
       
  2224         case 0:
       
  2225             if (insn & (1 << 21))
       
  2226                 gen_op_iwmmxt_minsb_M0_wRn(rd1);
       
  2227             else
       
  2228                 gen_op_iwmmxt_minub_M0_wRn(rd1);
       
  2229             break;
       
  2230         case 1:
       
  2231             if (insn & (1 << 21))
       
  2232                 gen_op_iwmmxt_minsw_M0_wRn(rd1);
       
  2233             else
       
  2234                 gen_op_iwmmxt_minuw_M0_wRn(rd1);
       
  2235             break;
       
  2236         case 2:
       
  2237             if (insn & (1 << 21))
       
  2238                 gen_op_iwmmxt_minsl_M0_wRn(rd1);
       
  2239             else
       
  2240                 gen_op_iwmmxt_minul_M0_wRn(rd1);
       
  2241             break;
       
  2242         case 3:
       
  2243             return 1;
       
  2244         }
       
  2245         gen_op_iwmmxt_movq_wRn_M0(wrd);
       
  2246         gen_op_iwmmxt_set_mup();
       
  2247         break;
       
  2248     case 0x016: case 0x216: case 0x416: case 0x616:	/* WMAX */
       
  2249     case 0x816: case 0xa16: case 0xc16: case 0xe16:
       
  2250         wrd = (insn >> 12) & 0xf;
       
  2251         rd0 = (insn >> 16) & 0xf;
       
  2252         rd1 = (insn >> 0) & 0xf;
       
  2253         gen_op_iwmmxt_movq_M0_wRn(rd0);
       
  2254         switch ((insn >> 22) & 3) {
       
  2255         case 0:
       
  2256             if (insn & (1 << 21))
       
  2257                 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
       
  2258             else
       
  2259                 gen_op_iwmmxt_maxub_M0_wRn(rd1);
       
  2260             break;
       
  2261         case 1:
       
  2262             if (insn & (1 << 21))
       
  2263                 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
       
  2264             else
       
  2265                 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
       
  2266             break;
       
  2267         case 2:
       
  2268             if (insn & (1 << 21))
       
  2269                 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
       
  2270             else
       
  2271                 gen_op_iwmmxt_maxul_M0_wRn(rd1);
       
  2272             break;
       
  2273         case 3:
       
  2274             return 1;
       
  2275         }
       
  2276         gen_op_iwmmxt_movq_wRn_M0(wrd);
       
  2277         gen_op_iwmmxt_set_mup();
       
  2278         break;
       
  2279     case 0x002: case 0x102: case 0x202: case 0x302:	/* WALIGNI */
       
  2280     case 0x402: case 0x502: case 0x602: case 0x702:
       
  2281         wrd = (insn >> 12) & 0xf;
       
  2282         rd0 = (insn >> 16) & 0xf;
       
  2283         rd1 = (insn >> 0) & 0xf;
       
  2284         gen_op_iwmmxt_movq_M0_wRn(rd0);
       
  2285         gen_op_movl_T0_im((insn >> 20) & 3);
       
  2286         gen_op_iwmmxt_align_M0_T0_wRn(rd1);
       
  2287         gen_op_iwmmxt_movq_wRn_M0(wrd);
       
  2288         gen_op_iwmmxt_set_mup();
       
  2289         break;
       
  2290     case 0x01a: case 0x11a: case 0x21a: case 0x31a:	/* WSUB */
       
  2291     case 0x41a: case 0x51a: case 0x61a: case 0x71a:
       
  2292     case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
       
  2293     case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
       
  2294         wrd = (insn >> 12) & 0xf;
       
  2295         rd0 = (insn >> 16) & 0xf;
       
  2296         rd1 = (insn >> 0) & 0xf;
       
  2297         gen_op_iwmmxt_movq_M0_wRn(rd0);
       
  2298         switch ((insn >> 20) & 0xf) {
       
  2299         case 0x0:
       
  2300             gen_op_iwmmxt_subnb_M0_wRn(rd1);
       
  2301             break;
       
  2302         case 0x1:
       
  2303             gen_op_iwmmxt_subub_M0_wRn(rd1);
       
  2304             break;
       
  2305         case 0x3:
       
  2306             gen_op_iwmmxt_subsb_M0_wRn(rd1);
       
  2307             break;
       
  2308         case 0x4:
       
  2309             gen_op_iwmmxt_subnw_M0_wRn(rd1);
       
  2310             break;
       
  2311         case 0x5:
       
  2312             gen_op_iwmmxt_subuw_M0_wRn(rd1);
       
  2313             break;
       
  2314         case 0x7:
       
  2315             gen_op_iwmmxt_subsw_M0_wRn(rd1);
       
  2316             break;
       
  2317         case 0x8:
       
  2318             gen_op_iwmmxt_subnl_M0_wRn(rd1);
       
  2319             break;
       
  2320         case 0x9:
       
  2321             gen_op_iwmmxt_subul_M0_wRn(rd1);
       
  2322             break;
       
  2323         case 0xb:
       
  2324             gen_op_iwmmxt_subsl_M0_wRn(rd1);
       
  2325             break;
       
  2326         default:
       
  2327             return 1;
       
  2328         }
       
  2329         gen_op_iwmmxt_movq_wRn_M0(wrd);
       
  2330         gen_op_iwmmxt_set_mup();
       
  2331         gen_op_iwmmxt_set_cup();
       
  2332         break;
       
  2333     case 0x01e: case 0x11e: case 0x21e: case 0x31e:	/* WSHUFH */
       
  2334     case 0x41e: case 0x51e: case 0x61e: case 0x71e:
       
  2335     case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
       
  2336     case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
       
  2337         wrd = (insn >> 12) & 0xf;
       
  2338         rd0 = (insn >> 16) & 0xf;
       
  2339         gen_op_iwmmxt_movq_M0_wRn(rd0);
       
  2340         gen_op_movl_T0_im(((insn >> 16) & 0xf0) | (insn & 0x0f));
       
  2341         gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
       
  2342         gen_op_iwmmxt_movq_wRn_M0(wrd);
       
  2343         gen_op_iwmmxt_set_mup();
       
  2344         gen_op_iwmmxt_set_cup();
       
  2345         break;
       
  2346     case 0x018: case 0x118: case 0x218: case 0x318:	/* WADD */
       
  2347     case 0x418: case 0x518: case 0x618: case 0x718:
       
  2348     case 0x818: case 0x918: case 0xa18: case 0xb18:
       
  2349     case 0xc18: case 0xd18: case 0xe18: case 0xf18:
       
  2350         wrd = (insn >> 12) & 0xf;
       
  2351         rd0 = (insn >> 16) & 0xf;
       
  2352         rd1 = (insn >> 0) & 0xf;
       
  2353         gen_op_iwmmxt_movq_M0_wRn(rd0);
       
  2354         switch ((insn >> 20) & 0xf) {
       
  2355         case 0x0:
       
  2356             gen_op_iwmmxt_addnb_M0_wRn(rd1);
       
  2357             break;
       
  2358         case 0x1:
       
  2359             gen_op_iwmmxt_addub_M0_wRn(rd1);
       
  2360             break;
       
  2361         case 0x3:
       
  2362             gen_op_iwmmxt_addsb_M0_wRn(rd1);
       
  2363             break;
       
  2364         case 0x4:
       
  2365             gen_op_iwmmxt_addnw_M0_wRn(rd1);
       
  2366             break;
       
  2367         case 0x5:
       
  2368             gen_op_iwmmxt_adduw_M0_wRn(rd1);
       
  2369             break;
       
  2370         case 0x7:
       
  2371             gen_op_iwmmxt_addsw_M0_wRn(rd1);
       
  2372             break;
       
  2373         case 0x8:
       
  2374             gen_op_iwmmxt_addnl_M0_wRn(rd1);
       
  2375             break;
       
  2376         case 0x9:
       
  2377             gen_op_iwmmxt_addul_M0_wRn(rd1);
       
  2378             break;
       
  2379         case 0xb:
       
  2380             gen_op_iwmmxt_addsl_M0_wRn(rd1);
       
  2381             break;
       
  2382         default:
       
  2383             return 1;
       
  2384         }
       
  2385         gen_op_iwmmxt_movq_wRn_M0(wrd);
       
  2386         gen_op_iwmmxt_set_mup();
       
  2387         gen_op_iwmmxt_set_cup();
       
  2388         break;
       
  2389     case 0x008: case 0x108: case 0x208: case 0x308:	/* WPACK */
       
  2390     case 0x408: case 0x508: case 0x608: case 0x708:
       
  2391     case 0x808: case 0x908: case 0xa08: case 0xb08:
       
  2392     case 0xc08: case 0xd08: case 0xe08: case 0xf08:
       
  2393         wrd = (insn >> 12) & 0xf;
       
  2394         rd0 = (insn >> 16) & 0xf;
       
  2395         rd1 = (insn >> 0) & 0xf;
       
  2396         gen_op_iwmmxt_movq_M0_wRn(rd0);
       
  2397         if (!(insn & (1 << 20)))
       
  2398             return 1;
       
  2399         switch ((insn >> 22) & 3) {
       
  2400         case 0:
       
  2401             return 1;
       
  2402         case 1:
       
  2403             if (insn & (1 << 21))
       
  2404                 gen_op_iwmmxt_packsw_M0_wRn(rd1);
       
  2405             else
       
  2406                 gen_op_iwmmxt_packuw_M0_wRn(rd1);
       
  2407             break;
       
  2408         case 2:
       
  2409             if (insn & (1 << 21))
       
  2410                 gen_op_iwmmxt_packsl_M0_wRn(rd1);
       
  2411             else
       
  2412                 gen_op_iwmmxt_packul_M0_wRn(rd1);
       
  2413             break;
       
  2414         case 3:
       
  2415             if (insn & (1 << 21))
       
  2416                 gen_op_iwmmxt_packsq_M0_wRn(rd1);
       
  2417             else
       
  2418                 gen_op_iwmmxt_packuq_M0_wRn(rd1);
       
  2419             break;
       
  2420         }
       
  2421         gen_op_iwmmxt_movq_wRn_M0(wrd);
       
  2422         gen_op_iwmmxt_set_mup();
       
  2423         gen_op_iwmmxt_set_cup();
       
  2424         break;
       
  2425     case 0x201: case 0x203: case 0x205: case 0x207:
       
  2426     case 0x209: case 0x20b: case 0x20d: case 0x20f:
       
  2427     case 0x211: case 0x213: case 0x215: case 0x217:
       
  2428     case 0x219: case 0x21b: case 0x21d: case 0x21f:
       
  2429         wrd = (insn >> 5) & 0xf;
       
  2430         rd0 = (insn >> 12) & 0xf;
       
  2431         rd1 = (insn >> 0) & 0xf;
       
  2432         if (rd0 == 0xf || rd1 == 0xf)
       
  2433             return 1;
       
  2434         gen_op_iwmmxt_movq_M0_wRn(wrd);
       
  2435         switch ((insn >> 16) & 0xf) {
       
  2436         case 0x0:					/* TMIA */
       
  2437             gen_movl_T0_reg(s, rd0);
       
  2438             gen_movl_T1_reg(s, rd1);
       
  2439             gen_op_iwmmxt_muladdsl_M0_T0_T1();
       
  2440             break;
       
  2441         case 0x8:					/* TMIAPH */
       
  2442             gen_movl_T0_reg(s, rd0);
       
  2443             gen_movl_T1_reg(s, rd1);
       
  2444             gen_op_iwmmxt_muladdsw_M0_T0_T1();
       
  2445             break;
       
  2446         case 0xc: case 0xd: case 0xe: case 0xf:		/* TMIAxy */
       
  2447             gen_movl_T1_reg(s, rd0);
       
  2448             if (insn & (1 << 16))
       
  2449                 gen_op_shrl_T1_im(16);
       
  2450             gen_op_movl_T0_T1();
       
  2451             gen_movl_T1_reg(s, rd1);
       
  2452             if (insn & (1 << 17))
       
  2453                 gen_op_shrl_T1_im(16);
       
  2454             gen_op_iwmmxt_muladdswl_M0_T0_T1();
       
  2455             break;
       
  2456         default:
       
  2457             return 1;
       
  2458         }
       
  2459         gen_op_iwmmxt_movq_wRn_M0(wrd);
       
  2460         gen_op_iwmmxt_set_mup();
       
  2461         break;
       
  2462     default:
       
  2463         return 1;
       
  2464     }
       
  2465 
       
  2466     return 0;
       
  2467 }
       
  2468 
       
  2469 /* Disassemble an XScale DSP instruction.  Returns nonzero if an error occured
       
  2470    (ie. an undefined instruction).  */
       
  2471 static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
       
  2472 {
       
  2473     int acc, rd0, rd1, rdhi, rdlo;
       
  2474 
       
  2475     if ((insn & 0x0ff00f10) == 0x0e200010) {
       
  2476         /* Multiply with Internal Accumulate Format */
       
  2477         rd0 = (insn >> 12) & 0xf;
       
  2478         rd1 = insn & 0xf;
       
  2479         acc = (insn >> 5) & 7;
       
  2480 
       
  2481         if (acc != 0)
       
  2482             return 1;
       
  2483 
       
  2484         switch ((insn >> 16) & 0xf) {
       
  2485         case 0x0:					/* MIA */
       
  2486             gen_movl_T0_reg(s, rd0);
       
  2487             gen_movl_T1_reg(s, rd1);
       
  2488             gen_op_iwmmxt_muladdsl_M0_T0_T1();
       
  2489             break;
       
  2490         case 0x8:					/* MIAPH */
       
  2491             gen_movl_T0_reg(s, rd0);
       
  2492             gen_movl_T1_reg(s, rd1);
       
  2493             gen_op_iwmmxt_muladdsw_M0_T0_T1();
       
  2494             break;
       
  2495         case 0xc:					/* MIABB */
       
  2496         case 0xd:					/* MIABT */
       
  2497         case 0xe:					/* MIATB */
       
  2498         case 0xf:					/* MIATT */
       
  2499             gen_movl_T1_reg(s, rd0);
       
  2500             if (insn & (1 << 16))
       
  2501                 gen_op_shrl_T1_im(16);
       
  2502             gen_op_movl_T0_T1();
       
  2503             gen_movl_T1_reg(s, rd1);
       
  2504             if (insn & (1 << 17))
       
  2505                 gen_op_shrl_T1_im(16);
       
  2506             gen_op_iwmmxt_muladdswl_M0_T0_T1();
       
  2507             break;
       
  2508         default:
       
  2509             return 1;
       
  2510         }
       
  2511 
       
  2512         gen_op_iwmmxt_movq_wRn_M0(acc);
       
  2513         return 0;
       
  2514     }
       
  2515 
       
  2516     if ((insn & 0x0fe00ff8) == 0x0c400000) {
       
  2517         /* Internal Accumulator Access Format */
       
  2518         rdhi = (insn >> 16) & 0xf;
       
  2519         rdlo = (insn >> 12) & 0xf;
       
  2520         acc = insn & 7;
       
  2521 
       
  2522         if (acc != 0)
       
  2523             return 1;
       
  2524 
       
  2525         if (insn & ARM_CP_RW_BIT) {			/* MRA */
       
  2526             gen_iwmmxt_movl_T0_T1_wRn(acc);
       
  2527             gen_movl_reg_T0(s, rdlo);
       
  2528             gen_op_movl_T0_im((1 << (40 - 32)) - 1);
       
  2529             gen_op_andl_T0_T1();
       
  2530             gen_movl_reg_T0(s, rdhi);
       
  2531         } else {					/* MAR */
       
  2532             gen_movl_T0_reg(s, rdlo);
       
  2533             gen_movl_T1_reg(s, rdhi);
       
  2534             gen_iwmmxt_movl_wRn_T0_T1(acc);
       
  2535         }
       
  2536         return 0;
       
  2537     }
       
  2538 
       
  2539     return 1;
       
  2540 }
       
  2541 
       
  2542 /* Disassemble system coprocessor instruction.  Return nonzero if
       
  2543    instruction is not defined.  */
       
  2544 static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
       
  2545 {
       
  2546     TCGv tmp;
       
  2547     uint32_t rd = (insn >> 12) & 0xf;
       
  2548     uint32_t cp = (insn >> 8) & 0xf;
       
  2549     if (IS_USER(s)) {
       
  2550         return 1;
       
  2551     }
       
  2552 
       
  2553     if (insn & ARM_CP_RW_BIT) {
       
  2554         if (!env->cp[cp].cp_read)
       
  2555             return 1;
       
  2556         gen_set_pc_im(s->pc);
       
  2557         tmp = new_tmp();
       
  2558         gen_helper_get_cp(tmp, cpu_env, tcg_const_i32(insn));
       
  2559         store_reg(s, rd, tmp);
       
  2560     } else {
       
  2561         if (!env->cp[cp].cp_write)
       
  2562             return 1;
       
  2563         gen_set_pc_im(s->pc);
       
  2564         tmp = load_reg(s, rd);
       
  2565         gen_helper_set_cp(cpu_env, tcg_const_i32(insn), tmp);
       
  2566         dead_tmp(tmp);
       
  2567     }
       
  2568     return 0;
       
  2569 }
       
  2570 
       
  2571 static int cp15_user_ok(uint32_t insn)
       
  2572 {
       
  2573     int cpn = (insn >> 16) & 0xf;
       
  2574     int cpm = insn & 0xf;
       
  2575     int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
       
  2576 
       
  2577     if (cpn == 13 && cpm == 0) {
       
  2578         /* TLS register.  */
       
  2579         if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
       
  2580             return 1;
       
  2581     }
       
  2582     if (cpn == 7) {
       
  2583         /* ISB, DSB, DMB.  */
       
  2584         if ((cpm == 5 && op == 4)
       
  2585                 || (cpm == 10 && (op == 4 || op == 5)))
       
  2586             return 1;
       
  2587     }
       
  2588     return 0;
       
  2589 }
       
  2590 
       
  2591 /* Disassemble system coprocessor (cp15) instruction.  Return nonzero if
       
  2592    instruction is not defined.  */
       
  2593 static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
       
  2594 {
       
  2595     uint32_t rd;
       
  2596     TCGv tmp;
       
  2597 
       
  2598     /* M profile cores use memory mapped registers instead of cp15.  */
       
  2599     if (arm_feature(env, ARM_FEATURE_M))
       
  2600 	return 1;
       
  2601 
       
  2602     if ((insn & (1 << 25)) == 0) {
       
  2603         if (insn & (1 << 20)) {
       
  2604             /* mrrc */
       
  2605             return 1;
       
  2606         }
       
  2607         /* mcrr.  Used for block cache operations, so implement as no-op.  */
       
  2608         return 0;
       
  2609     }
       
  2610     if ((insn & (1 << 4)) == 0) {
       
  2611         /* cdp */
       
  2612         return 1;
       
  2613     }
       
  2614     if (IS_USER(s) && !cp15_user_ok(insn)) {
       
  2615         return 1;
       
  2616     }
       
  2617     if ((insn & 0x0fff0fff) == 0x0e070f90
       
  2618         || (insn & 0x0fff0fff) == 0x0e070f58) {
       
  2619         /* Wait for interrupt.  */
       
  2620         gen_set_pc_im(s->pc);
       
  2621         s->is_jmp = DISAS_WFI;
       
  2622         return 0;
       
  2623     }
       
  2624     rd = (insn >> 12) & 0xf;
       
  2625     if (insn & ARM_CP_RW_BIT) {
       
  2626         tmp = new_tmp();
       
  2627         gen_helper_get_cp15(tmp, cpu_env, tcg_const_i32(insn));
       
  2628         /* If the destination register is r15 then sets condition codes.  */
       
  2629         if (rd != 15)
       
  2630             store_reg(s, rd, tmp);
       
  2631         else
       
  2632             dead_tmp(tmp);
       
  2633     } else {
       
  2634         tmp = load_reg(s, rd);
       
  2635         gen_helper_set_cp15(cpu_env, tcg_const_i32(insn), tmp);
       
  2636         dead_tmp(tmp);
       
  2637         /* Normally we would always end the TB here, but Linux
       
  2638          * arch/arm/mach-pxa/sleep.S expects two instructions following
       
  2639          * an MMU enable to execute from cache.  Imitate this behaviour.  */
       
  2640         if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
       
  2641                 (insn & 0x0fff0fff) != 0x0e010f10)
       
  2642             gen_lookup_tb(s);
       
  2643     }
       
  2644     return 0;
       
  2645 }
       
  2646 
       
  2647 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
       
  2648 #define VFP_SREG(insn, bigbit, smallbit) \
       
  2649   ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
       
  2650 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
       
  2651     if (arm_feature(env, ARM_FEATURE_VFP3)) { \
       
  2652         reg = (((insn) >> (bigbit)) & 0x0f) \
       
  2653               | (((insn) >> ((smallbit) - 4)) & 0x10); \
       
  2654     } else { \
       
  2655         if (insn & (1 << (smallbit))) \
       
  2656             return 1; \
       
  2657         reg = ((insn) >> (bigbit)) & 0x0f; \
       
  2658     }} while (0)
       
  2659 
       
  2660 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
       
  2661 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
       
  2662 #define VFP_SREG_N(insn) VFP_SREG(insn, 16,  7)
       
  2663 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16,  7)
       
  2664 #define VFP_SREG_M(insn) VFP_SREG(insn,  0,  5)
       
  2665 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn,  0,  5)
       
  2666 
       
  2667 /* Move between integer and VFP cores.  */
       
  2668 static TCGv gen_vfp_mrs(void)
       
  2669 {
       
  2670     TCGv tmp = new_tmp();
       
  2671     tcg_gen_mov_i32(tmp, cpu_F0s);
       
  2672     return tmp;
       
  2673 }
       
  2674 
       
  2675 static void gen_vfp_msr(TCGv tmp)
       
  2676 {
       
  2677     tcg_gen_mov_i32(cpu_F0s, tmp);
       
  2678     dead_tmp(tmp);
       
  2679 }
       
  2680 
       
  2681 static inline int
       
  2682 vfp_enabled(CPUState * env)
       
  2683 {
       
  2684     return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
       
  2685 }
       
  2686 
       
  2687 static void gen_neon_dup_u8(TCGv var, int shift)
       
  2688 {
       
  2689     TCGv tmp = new_tmp();
       
  2690     if (shift)
       
  2691         tcg_gen_shri_i32(var, var, shift);
       
  2692     tcg_gen_ext8u_i32(var, var);
       
  2693     tcg_gen_shli_i32(tmp, var, 8);
       
  2694     tcg_gen_or_i32(var, var, tmp);
       
  2695     tcg_gen_shli_i32(tmp, var, 16);
       
  2696     tcg_gen_or_i32(var, var, tmp);
       
  2697     dead_tmp(tmp);
       
  2698 }
       
  2699 
       
  2700 static void gen_neon_dup_low16(TCGv var)
       
  2701 {
       
  2702     TCGv tmp = new_tmp();
       
  2703     tcg_gen_ext16u_i32(var, var);
       
  2704     tcg_gen_shli_i32(tmp, var, 16);
       
  2705     tcg_gen_or_i32(var, var, tmp);
       
  2706     dead_tmp(tmp);
       
  2707 }
       
  2708 
       
  2709 static void gen_neon_dup_high16(TCGv var)
       
  2710 {
       
  2711     TCGv tmp = new_tmp();
       
  2712     tcg_gen_andi_i32(var, var, 0xffff0000);
       
  2713     tcg_gen_shri_i32(tmp, var, 16);
       
  2714     tcg_gen_or_i32(var, var, tmp);
       
  2715     dead_tmp(tmp);
       
  2716 }
       
  2717 
       
  2718 /* Disassemble a VFP instruction.  Returns nonzero if an error occured
       
  2719    (ie. an undefined instruction).  */
       
  2720 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
       
  2721 {
       
  2722     uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
       
  2723     int dp, veclen;
       
  2724     TCGv tmp;
       
  2725     TCGv tmp2;
       
  2726 
       
  2727     if (!arm_feature(env, ARM_FEATURE_VFP))
       
  2728         return 1;
       
  2729 
       
  2730     if (!vfp_enabled(env)) {
       
  2731         /* VFP disabled.  Only allow fmxr/fmrx to/from some control regs.  */
       
  2732         if ((insn & 0x0fe00fff) != 0x0ee00a10)
       
  2733             return 1;
       
  2734         rn = (insn >> 16) & 0xf;
       
  2735         if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
       
  2736             && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
       
  2737             return 1;
       
  2738     }
       
  2739     dp = ((insn & 0xf00) == 0xb00);
       
  2740     switch ((insn >> 24) & 0xf) {
       
  2741     case 0xe:
       
  2742         if (insn & (1 << 4)) {
       
  2743             /* single register transfer */
       
  2744             rd = (insn >> 12) & 0xf;
       
  2745             if (dp) {
       
  2746                 int size;
       
  2747                 int pass;
       
  2748 
       
  2749                 VFP_DREG_N(rn, insn);
       
  2750                 if (insn & 0xf)
       
  2751                     return 1;
       
  2752                 if (insn & 0x00c00060
       
  2753                     && !arm_feature(env, ARM_FEATURE_NEON))
       
  2754                     return 1;
       
  2755 
       
  2756                 pass = (insn >> 21) & 1;
       
  2757                 if (insn & (1 << 22)) {
       
  2758                     size = 0;
       
  2759                     offset = ((insn >> 5) & 3) * 8;
       
  2760                 } else if (insn & (1 << 5)) {
       
  2761                     size = 1;
       
  2762                     offset = (insn & (1 << 6)) ? 16 : 0;
       
  2763                 } else {
       
  2764                     size = 2;
       
  2765                     offset = 0;
       
  2766                 }
       
  2767                 if (insn & ARM_CP_RW_BIT) {
       
  2768                     /* vfp->arm */
       
  2769                     tmp = neon_load_reg(rn, pass);
       
  2770                     switch (size) {
       
  2771                     case 0:
       
  2772                         if (offset)
       
  2773                             tcg_gen_shri_i32(tmp, tmp, offset);
       
  2774                         if (insn & (1 << 23))
       
  2775                             gen_uxtb(tmp);
       
  2776                         else
       
  2777                             gen_sxtb(tmp);
       
  2778                         break;
       
  2779                     case 1:
       
  2780                         if (insn & (1 << 23)) {
       
  2781                             if (offset) {
       
  2782                                 tcg_gen_shri_i32(tmp, tmp, 16);
       
  2783                             } else {
       
  2784                                 gen_uxth(tmp);
       
  2785                             }
       
  2786                         } else {
       
  2787                             if (offset) {
       
  2788                                 tcg_gen_sari_i32(tmp, tmp, 16);
       
  2789                             } else {
       
  2790                                 gen_sxth(tmp);
       
  2791                             }
       
  2792                         }
       
  2793                         break;
       
  2794                     case 2:
       
  2795                         break;
       
  2796                     }
       
  2797                     store_reg(s, rd, tmp);
       
  2798                 } else {
       
  2799                     /* arm->vfp */
       
  2800                     tmp = load_reg(s, rd);
       
  2801                     if (insn & (1 << 23)) {
       
  2802                         /* VDUP */
       
  2803                         if (size == 0) {
       
  2804                             gen_neon_dup_u8(tmp, 0);
       
  2805                         } else if (size == 1) {
       
  2806                             gen_neon_dup_low16(tmp);
       
  2807                         }
       
  2808                         tmp2 = new_tmp();
       
  2809                         tcg_gen_mov_i32(tmp2, tmp);
       
  2810                         neon_store_reg(rn, 0, tmp2);
       
  2811                         neon_store_reg(rn, 1, tmp);
       
  2812                     } else {
       
  2813                         /* VMOV */
       
  2814                         switch (size) {
       
  2815                         case 0:
       
  2816                             tmp2 = neon_load_reg(rn, pass);
       
  2817                             gen_bfi(tmp, tmp2, tmp, offset, 0xff);
       
  2818                             dead_tmp(tmp2);
       
  2819                             break;
       
  2820                         case 1:
       
  2821                             tmp2 = neon_load_reg(rn, pass);
       
  2822                             gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
       
  2823                             dead_tmp(tmp2);
       
  2824                             break;
       
  2825                         case 2:
       
  2826                             break;
       
  2827                         }
       
  2828                         neon_store_reg(rn, pass, tmp);
       
  2829                     }
       
  2830                 }
       
  2831             } else { /* !dp */
       
  2832                 if ((insn & 0x6f) != 0x00)
       
  2833                     return 1;
       
  2834                 rn = VFP_SREG_N(insn);
       
  2835                 if (insn & ARM_CP_RW_BIT) {
       
  2836                     /* vfp->arm */
       
  2837                     if (insn & (1 << 21)) {
       
  2838                         /* system register */
       
  2839                         rn >>= 1;
       
  2840 
       
  2841                         switch (rn) {
       
  2842                         case ARM_VFP_FPSID:
       
  2843                             /* VFP2 allows access to FSID from userspace.
       
  2844                                VFP3 restricts all id registers to privileged
       
  2845                                accesses.  */
       
  2846                             if (IS_USER(s)
       
  2847                                 && arm_feature(env, ARM_FEATURE_VFP3))
       
  2848                                 return 1;
       
  2849                             tmp = load_cpu_field(vfp.xregs[rn]);
       
  2850                             break;
       
  2851                         case ARM_VFP_FPEXC:
       
  2852                             if (IS_USER(s))
       
  2853                                 return 1;
       
  2854                             tmp = load_cpu_field(vfp.xregs[rn]);
       
  2855                             break;
       
  2856                         case ARM_VFP_FPINST:
       
  2857                         case ARM_VFP_FPINST2:
       
  2858                             /* Not present in VFP3.  */
       
  2859                             if (IS_USER(s)
       
  2860                                 || arm_feature(env, ARM_FEATURE_VFP3))
       
  2861                                 return 1;
       
  2862                             tmp = load_cpu_field(vfp.xregs[rn]);
       
  2863                             break;
       
  2864                         case ARM_VFP_FPSCR:
       
  2865                             if (rd == 15) {
       
  2866                                 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
       
  2867                                 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
       
  2868                             } else {
       
  2869                                 tmp = new_tmp();
       
  2870                                 gen_helper_vfp_get_fpscr(tmp, cpu_env);
       
  2871                             }
       
  2872                             break;
       
  2873                         case ARM_VFP_MVFR0:
       
  2874                         case ARM_VFP_MVFR1:
       
  2875                             if (IS_USER(s)
       
  2876                                 || !arm_feature(env, ARM_FEATURE_VFP3))
       
  2877                                 return 1;
       
  2878                             tmp = load_cpu_field(vfp.xregs[rn]);
       
  2879                             break;
       
  2880                         default:
       
  2881                             return 1;
       
  2882                         }
       
  2883                     } else {
       
  2884                         gen_mov_F0_vreg(0, rn);
       
  2885                         tmp = gen_vfp_mrs();
       
  2886                     }
       
  2887                     if (rd == 15) {
       
  2888                         /* Set the 4 flag bits in the CPSR.  */
       
  2889                         gen_set_nzcv(tmp);
       
  2890                         dead_tmp(tmp);
       
  2891                     } else {
       
  2892                         store_reg(s, rd, tmp);
       
  2893                     }
       
  2894                 } else {
       
  2895                     /* arm->vfp */
       
  2896                     tmp = load_reg(s, rd);
       
  2897                     if (insn & (1 << 21)) {
       
  2898                         rn >>= 1;
       
  2899                         /* system register */
       
  2900                         switch (rn) {
       
  2901                         case ARM_VFP_FPSID:
       
  2902                         case ARM_VFP_MVFR0:
       
  2903                         case ARM_VFP_MVFR1:
       
  2904                             /* Writes are ignored.  */
       
  2905                             break;
       
  2906                         case ARM_VFP_FPSCR:
       
  2907                             gen_helper_vfp_set_fpscr(cpu_env, tmp);
       
  2908                             dead_tmp(tmp);
       
  2909                             gen_lookup_tb(s);
       
  2910                             break;
       
  2911                         case ARM_VFP_FPEXC:
       
  2912                             if (IS_USER(s))
       
  2913                                 return 1;
       
  2914                             store_cpu_field(tmp, vfp.xregs[rn]);
       
  2915                             gen_lookup_tb(s);
       
  2916                             break;
       
  2917                         case ARM_VFP_FPINST:
       
  2918                         case ARM_VFP_FPINST2:
       
  2919                             store_cpu_field(tmp, vfp.xregs[rn]);
       
  2920                             break;
       
  2921                         default:
       
  2922                             return 1;
       
  2923                         }
       
  2924                     } else {
       
  2925                         gen_vfp_msr(tmp);
       
  2926                         gen_mov_vreg_F0(0, rn);
       
  2927                     }
       
  2928                 }
       
  2929             }
       
  2930         } else {
       
  2931             /* data processing */
       
  2932             /* The opcode is in bits 23, 21, 20 and 6.  */
       
  2933             op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
       
  2934             if (dp) {
       
  2935                 if (op == 15) {
       
  2936                     /* rn is opcode */
       
  2937                     rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
       
  2938                 } else {
       
  2939                     /* rn is register number */
       
  2940                     VFP_DREG_N(rn, insn);
       
  2941                 }
       
  2942 
       
  2943                 if (op == 15 && (rn == 15 || rn > 17)) {
       
  2944                     /* Integer or single precision destination.  */
       
  2945                     rd = VFP_SREG_D(insn);
       
  2946                 } else {
       
  2947                     VFP_DREG_D(rd, insn);
       
  2948                 }
       
  2949 
       
  2950                 if (op == 15 && (rn == 16 || rn == 17)) {
       
  2951                     /* Integer source.  */
       
  2952                     rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
       
  2953                 } else {
       
  2954                     VFP_DREG_M(rm, insn);
       
  2955                 }
       
  2956             } else {
       
  2957                 rn = VFP_SREG_N(insn);
       
  2958                 if (op == 15 && rn == 15) {
       
  2959                     /* Double precision destination.  */
       
  2960                     VFP_DREG_D(rd, insn);
       
  2961                 } else {
       
  2962                     rd = VFP_SREG_D(insn);
       
  2963                 }
       
  2964                 rm = VFP_SREG_M(insn);
       
  2965             }
       
  2966 
       
  2967             veclen = env->vfp.vec_len;
       
  2968             if (op == 15 && rn > 3)
       
  2969                 veclen = 0;
       
  2970 
       
  2971             /* Shut up compiler warnings.  */
       
  2972             delta_m = 0;
       
  2973             delta_d = 0;
       
  2974             bank_mask = 0;
       
  2975 
       
  2976             if (veclen > 0) {
       
  2977                 if (dp)
       
  2978                     bank_mask = 0xc;
       
  2979                 else
       
  2980                     bank_mask = 0x18;
       
  2981 
       
  2982                 /* Figure out what type of vector operation this is.  */
       
  2983                 if ((rd & bank_mask) == 0) {
       
  2984                     /* scalar */
       
  2985                     veclen = 0;
       
  2986                 } else {
       
  2987                     if (dp)
       
  2988                         delta_d = (env->vfp.vec_stride >> 1) + 1;
       
  2989                     else
       
  2990                         delta_d = env->vfp.vec_stride + 1;
       
  2991 
       
  2992                     if ((rm & bank_mask) == 0) {
       
  2993                         /* mixed scalar/vector */
       
  2994                         delta_m = 0;
       
  2995                     } else {
       
  2996                         /* vector */
       
  2997                         delta_m = delta_d;
       
  2998                     }
       
  2999                 }
       
  3000             }
       
  3001 
       
  3002             /* Load the initial operands.  */
       
  3003             if (op == 15) {
       
  3004                 switch (rn) {
       
  3005                 case 16:
       
  3006                 case 17:
       
  3007                     /* Integer source */
       
  3008                     gen_mov_F0_vreg(0, rm);
       
  3009                     break;
       
  3010                 case 8:
       
  3011                 case 9:
       
  3012                     /* Compare */
       
  3013                     gen_mov_F0_vreg(dp, rd);
       
  3014                     gen_mov_F1_vreg(dp, rm);
       
  3015                     break;
       
  3016                 case 10:
       
  3017                 case 11:
       
  3018                     /* Compare with zero */
       
  3019                     gen_mov_F0_vreg(dp, rd);
       
  3020                     gen_vfp_F1_ld0(dp);
       
  3021                     break;
       
  3022                 case 20:
       
  3023                 case 21:
       
  3024                 case 22:
       
  3025                 case 23:
       
  3026                 case 28:
       
  3027                 case 29:
       
  3028                 case 30:
       
  3029                 case 31:
       
  3030                     /* Source and destination the same.  */
       
  3031                     gen_mov_F0_vreg(dp, rd);
       
  3032                     break;
       
  3033                 default:
       
  3034                     /* One source operand.  */
       
  3035                     gen_mov_F0_vreg(dp, rm);
       
  3036                     break;
       
  3037                 }
       
  3038             } else {
       
  3039                 /* Two source operands.  */
       
  3040                 gen_mov_F0_vreg(dp, rn);
       
  3041                 gen_mov_F1_vreg(dp, rm);
       
  3042             }
       
  3043 
       
  3044             for (;;) {
       
  3045                 /* Perform the calculation.  */
       
  3046                 switch (op) {
       
  3047                 case 0: /* mac: fd + (fn * fm) */
       
  3048                     gen_vfp_mul(dp);
       
  3049                     gen_mov_F1_vreg(dp, rd);
       
  3050                     gen_vfp_add(dp);
       
  3051                     break;
       
  3052                 case 1: /* nmac: fd - (fn * fm) */
       
  3053                     gen_vfp_mul(dp);
       
  3054                     gen_vfp_neg(dp);
       
  3055                     gen_mov_F1_vreg(dp, rd);
       
  3056                     gen_vfp_add(dp);
       
  3057                     break;
       
  3058                 case 2: /* msc: -fd + (fn * fm) */
       
  3059                     gen_vfp_mul(dp);
       
  3060                     gen_mov_F1_vreg(dp, rd);
       
  3061                     gen_vfp_sub(dp);
       
  3062                     break;
       
  3063                 case 3: /* nmsc: -fd - (fn * fm)  */
       
  3064                     gen_vfp_mul(dp);
       
  3065                     gen_vfp_neg(dp);
       
  3066                     gen_mov_F1_vreg(dp, rd);
       
  3067                     gen_vfp_sub(dp);
       
  3068                     break;
       
  3069                 case 4: /* mul: fn * fm */
       
  3070                     gen_vfp_mul(dp);
       
  3071                     break;
       
  3072                 case 5: /* nmul: -(fn * fm) */
       
  3073                     gen_vfp_mul(dp);
       
  3074                     gen_vfp_neg(dp);
       
  3075                     break;
       
  3076                 case 6: /* add: fn + fm */
       
  3077                     gen_vfp_add(dp);
       
  3078                     break;
       
  3079                 case 7: /* sub: fn - fm */
       
  3080                     gen_vfp_sub(dp);
       
  3081                     break;
       
  3082                 case 8: /* div: fn / fm */
       
  3083                     gen_vfp_div(dp);
       
  3084                     break;
       
  3085                 case 14: /* fconst */
       
  3086                     if (!arm_feature(env, ARM_FEATURE_VFP3))
       
  3087                       return 1;
       
  3088 
       
  3089                     n = (insn << 12) & 0x80000000;
       
  3090                     i = ((insn >> 12) & 0x70) | (insn & 0xf);
       
  3091                     if (dp) {
       
  3092                         if (i & 0x40)
       
  3093                             i |= 0x3f80;
       
  3094                         else
       
  3095                             i |= 0x4000;
       
  3096                         n |= i << 16;
       
  3097                         tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
       
  3098                     } else {
       
  3099                         if (i & 0x40)
       
  3100                             i |= 0x780;
       
  3101                         else
       
  3102                             i |= 0x800;
       
  3103                         n |= i << 19;
       
  3104                         tcg_gen_movi_i32(cpu_F0s, n);
       
  3105                     }
       
  3106                     break;
       
  3107                 case 15: /* extension space */
       
  3108                     switch (rn) {
       
  3109                     case 0: /* cpy */
       
  3110                         /* no-op */
       
  3111                         break;
       
  3112                     case 1: /* abs */
       
  3113                         gen_vfp_abs(dp);
       
  3114                         break;
       
  3115                     case 2: /* neg */
       
  3116                         gen_vfp_neg(dp);
       
  3117                         break;
       
  3118                     case 3: /* sqrt */
       
  3119                         gen_vfp_sqrt(dp);
       
  3120                         break;
       
  3121                     case 4: /* vcvtb.f32.f16 */
       
  3122                         tmp = gen_vfp_mrs();
       
  3123                         tcg_gen_ext16u_i32(tmp, tmp);
       
  3124                         gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
       
  3125                         dead_tmp(tmp);
       
  3126                         break;
       
  3127                     case 5: /* vcvtt.f32.f16 */
       
  3128                         tmp = gen_vfp_mrs();
       
  3129                         tcg_gen_shri_i32(tmp, tmp, 16);
       
  3130                         gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
       
  3131                         dead_tmp(tmp);
       
  3132                         break;
       
  3133                     case 6: /* vcvtb.f16.f32 */
       
  3134                         tmp = new_tmp();
       
  3135                         gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
       
  3136                         gen_mov_F0_vreg(0, rd);
       
  3137                         tmp2 = gen_vfp_mrs();
       
  3138                         tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
       
  3139                         tcg_gen_or_i32(tmp, tmp, tmp2);
       
  3140                         dead_tmp(tmp2);
       
  3141                         gen_vfp_msr(tmp);
       
  3142                         break;
       
  3143                     case 7: /* vcvtt.f16.f32 */
       
  3144                         tmp = new_tmp();
       
  3145                         gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
       
  3146                         tcg_gen_shli_i32(tmp, tmp, 16);
       
  3147                         gen_mov_F0_vreg(0, rd);
       
  3148                         tmp2 = gen_vfp_mrs();
       
  3149                         tcg_gen_ext16u_i32(tmp2, tmp2);
       
  3150                         tcg_gen_or_i32(tmp, tmp, tmp2);
       
  3151                         dead_tmp(tmp2);
       
  3152                         gen_vfp_msr(tmp);
       
  3153                         break;
       
  3154                     case 8: /* cmp */
       
  3155                         gen_vfp_cmp(dp);
       
  3156                         break;
       
  3157                     case 9: /* cmpe */
       
  3158                         gen_vfp_cmpe(dp);
       
  3159                         break;
       
  3160                     case 10: /* cmpz */
       
  3161                         gen_vfp_cmp(dp);
       
  3162                         break;
       
  3163                     case 11: /* cmpez */
       
  3164                         gen_vfp_F1_ld0(dp);
       
  3165                         gen_vfp_cmpe(dp);
       
  3166                         break;
       
  3167                     case 15: /* single<->double conversion */
       
  3168                         if (dp)
       
  3169                             gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
       
  3170                         else
       
  3171                             gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
       
  3172                         break;
       
  3173                     case 16: /* fuito */
       
  3174                         gen_vfp_uito(dp);
       
  3175                         break;
       
  3176                     case 17: /* fsito */
       
  3177                         gen_vfp_sito(dp);
       
  3178                         break;
       
  3179                     case 20: /* fshto */
       
  3180                         if (!arm_feature(env, ARM_FEATURE_VFP3))
       
  3181                           return 1;
       
  3182                         gen_vfp_shto(dp, 16 - rm);
       
  3183                         break;
       
  3184                     case 21: /* fslto */
       
  3185                         if (!arm_feature(env, ARM_FEATURE_VFP3))
       
  3186                           return 1;
       
  3187                         gen_vfp_slto(dp, 32 - rm);
       
  3188                         break;
       
  3189                     case 22: /* fuhto */
       
  3190                         if (!arm_feature(env, ARM_FEATURE_VFP3))
       
  3191                           return 1;
       
  3192                         gen_vfp_uhto(dp, 16 - rm);
       
  3193                         break;
       
  3194                     case 23: /* fulto */
       
  3195                         if (!arm_feature(env, ARM_FEATURE_VFP3))
       
  3196                           return 1;
       
  3197                         gen_vfp_ulto(dp, 32 - rm);
       
  3198                         break;
       
  3199                     case 24: /* ftoui */
       
  3200                         gen_vfp_toui(dp);
       
  3201                         break;
       
  3202                     case 25: /* ftouiz */
       
  3203                         gen_vfp_touiz(dp);
       
  3204                         break;
       
  3205                     case 26: /* ftosi */
       
  3206                         gen_vfp_tosi(dp);
       
  3207                         break;
       
  3208                     case 27: /* ftosiz */
       
  3209                         gen_vfp_tosiz(dp);
       
  3210                         break;
       
  3211                     case 28: /* ftosh */
       
  3212                         if (!arm_feature(env, ARM_FEATURE_VFP3))
       
  3213                           return 1;
       
  3214                         gen_vfp_tosh(dp, 16 - rm);
       
  3215                         break;
       
  3216                     case 29: /* ftosl */
       
  3217                         if (!arm_feature(env, ARM_FEATURE_VFP3))
       
  3218                           return 1;
       
  3219                         gen_vfp_tosl(dp, 32 - rm);
       
  3220                         break;
       
  3221                     case 30: /* ftouh */
       
  3222                         if (!arm_feature(env, ARM_FEATURE_VFP3))
       
  3223                           return 1;
       
  3224                         gen_vfp_touh(dp, 16 - rm);
       
  3225                         break;
       
  3226                     case 31: /* ftoul */
       
  3227                         if (!arm_feature(env, ARM_FEATURE_VFP3))
       
  3228                           return 1;
       
  3229                         gen_vfp_toul(dp, 32 - rm);
       
  3230                         break;
       
  3231                     default: /* undefined */
       
  3232                         printf ("rn:%d\n", rn);
       
  3233                         return 1;
       
  3234                     }
       
  3235                     break;
       
  3236                 default: /* undefined */
       
  3237                     printf ("op:%d\n", op);
       
  3238                     return 1;
       
  3239                 }
       
  3240 
       
  3241                 /* Write back the result.  */
       
  3242                 if (op == 15 && (rn >= 8 && rn <= 11))
       
  3243                     ; /* Comparison, do nothing.  */
       
  3244                 else if (op == 15 && rn > 17)
       
  3245                     /* Integer result.  */
       
  3246                     gen_mov_vreg_F0(0, rd);
       
  3247                 else if (op == 15 && rn == 15)
       
  3248                     /* conversion */
       
  3249                     gen_mov_vreg_F0(!dp, rd);
       
  3250                 else
       
  3251                     gen_mov_vreg_F0(dp, rd);
       
  3252 
       
  3253                 /* break out of the loop if we have finished  */
       
  3254                 if (veclen == 0)
       
  3255                     break;
       
  3256 
       
  3257                 if (op == 15 && delta_m == 0) {
       
  3258                     /* single source one-many */
       
  3259                     while (veclen--) {
       
  3260                         rd = ((rd + delta_d) & (bank_mask - 1))
       
  3261                              | (rd & bank_mask);
       
  3262                         gen_mov_vreg_F0(dp, rd);
       
  3263                     }
       
  3264                     break;
       
  3265                 }
       
  3266                 /* Setup the next operands.  */
       
  3267                 veclen--;
       
  3268                 rd = ((rd + delta_d) & (bank_mask - 1))
       
  3269                      | (rd & bank_mask);
       
  3270 
       
  3271                 if (op == 15) {
       
  3272                     /* One source operand.  */
       
  3273                     rm = ((rm + delta_m) & (bank_mask - 1))
       
  3274                          | (rm & bank_mask);
       
  3275                     gen_mov_F0_vreg(dp, rm);
       
  3276                 } else {
       
  3277                     /* Two source operands.  */
       
  3278                     rn = ((rn + delta_d) & (bank_mask - 1))
       
  3279                          | (rn & bank_mask);
       
  3280                     gen_mov_F0_vreg(dp, rn);
       
  3281                     if (delta_m) {
       
  3282                         rm = ((rm + delta_m) & (bank_mask - 1))
       
  3283                              | (rm & bank_mask);
       
  3284                         gen_mov_F1_vreg(dp, rm);
       
  3285                     }
       
  3286                 }
       
  3287             }
       
  3288         }
       
  3289         break;
       
  3290     case 0xc:
       
  3291     case 0xd:
       
  3292         if (dp && (insn & 0x03e00000) == 0x00400000) {
       
  3293             /* two-register transfer */
       
  3294             rn = (insn >> 16) & 0xf;
       
  3295             rd = (insn >> 12) & 0xf;
       
  3296             if (dp) {
       
  3297                 VFP_DREG_M(rm, insn);
       
  3298             } else {
       
  3299                 rm = VFP_SREG_M(insn);
       
  3300             }
       
  3301 
       
  3302             if (insn & ARM_CP_RW_BIT) {
       
  3303                 /* vfp->arm */
       
  3304                 if (dp) {
       
  3305                     gen_mov_F0_vreg(0, rm * 2);
       
  3306                     tmp = gen_vfp_mrs();
       
  3307                     store_reg(s, rd, tmp);
       
  3308                     gen_mov_F0_vreg(0, rm * 2 + 1);
       
  3309                     tmp = gen_vfp_mrs();
       
  3310                     store_reg(s, rn, tmp);
       
  3311                 } else {
       
  3312                     gen_mov_F0_vreg(0, rm);
       
  3313                     tmp = gen_vfp_mrs();
       
  3314                     store_reg(s, rn, tmp);
       
  3315                     gen_mov_F0_vreg(0, rm + 1);
       
  3316                     tmp = gen_vfp_mrs();
       
  3317                     store_reg(s, rd, tmp);
       
  3318                 }
       
  3319             } else {
       
  3320                 /* arm->vfp */
       
  3321                 if (dp) {
       
  3322                     tmp = load_reg(s, rd);
       
  3323                     gen_vfp_msr(tmp);
       
  3324                     gen_mov_vreg_F0(0, rm * 2);
       
  3325                     tmp = load_reg(s, rn);
       
  3326                     gen_vfp_msr(tmp);
       
  3327                     gen_mov_vreg_F0(0, rm * 2 + 1);
       
  3328                 } else {
       
  3329                     tmp = load_reg(s, rn);
       
  3330                     gen_vfp_msr(tmp);
       
  3331                     gen_mov_vreg_F0(0, rm);
       
  3332                     tmp = load_reg(s, rd);
       
  3333                     gen_vfp_msr(tmp);
       
  3334                     gen_mov_vreg_F0(0, rm + 1);
       
  3335                 }
       
  3336             }
       
  3337         } else {
       
  3338             /* Load/store */
       
  3339             rn = (insn >> 16) & 0xf;
       
  3340             if (dp)
       
  3341                 VFP_DREG_D(rd, insn);
       
  3342             else
       
  3343                 rd = VFP_SREG_D(insn);
       
  3344             if (s->thumb && rn == 15) {
       
  3345                 gen_op_movl_T1_im(s->pc & ~2);
       
  3346             } else {
       
  3347                 gen_movl_T1_reg(s, rn);
       
  3348             }
       
  3349             if ((insn & 0x01200000) == 0x01000000) {
       
  3350                 /* Single load/store */
       
  3351                 offset = (insn & 0xff) << 2;
       
  3352                 if ((insn & (1 << 23)) == 0)
       
  3353                     offset = -offset;
       
  3354                 gen_op_addl_T1_im(offset);
       
  3355                 if (insn & (1 << 20)) {
       
  3356                     gen_vfp_ld(s, dp);
       
  3357                     gen_mov_vreg_F0(dp, rd);
       
  3358                 } else {
       
  3359                     gen_mov_F0_vreg(dp, rd);
       
  3360                     gen_vfp_st(s, dp);
       
  3361                 }
       
  3362             } else {
       
  3363                 /* load/store multiple */
       
  3364                 if (dp)
       
  3365                     n = (insn >> 1) & 0x7f;
       
  3366                 else
       
  3367                     n = insn & 0xff;
       
  3368 
       
  3369                 if (insn & (1 << 24)) /* pre-decrement */
       
  3370                     gen_op_addl_T1_im(-((insn & 0xff) << 2));
       
  3371 
       
  3372                 if (dp)
       
  3373                     offset = 8;
       
  3374                 else
       
  3375                     offset = 4;
       
  3376                 for (i = 0; i < n; i++) {
       
  3377                     if (insn & ARM_CP_RW_BIT) {
       
  3378                         /* load */
       
  3379                         gen_vfp_ld(s, dp);
       
  3380                         gen_mov_vreg_F0(dp, rd + i);
       
  3381                     } else {
       
  3382                         /* store */
       
  3383                         gen_mov_F0_vreg(dp, rd + i);
       
  3384                         gen_vfp_st(s, dp);
       
  3385                     }
       
  3386                     gen_op_addl_T1_im(offset);
       
  3387                 }
       
  3388                 if (insn & (1 << 21)) {
       
  3389                     /* writeback */
       
  3390                     if (insn & (1 << 24))
       
  3391                         offset = -offset * n;
       
  3392                     else if (dp && (insn & 1))
       
  3393                         offset = 4;
       
  3394                     else
       
  3395                         offset = 0;
       
  3396 
       
  3397                     if (offset != 0)
       
  3398                         gen_op_addl_T1_im(offset);
       
  3399                     gen_movl_reg_T1(s, rn);
       
  3400                 }
       
  3401             }
       
  3402         }
       
  3403         break;
       
  3404     default:
       
  3405         /* Should never happen.  */
       
  3406         return 1;
       
  3407     }
       
  3408     return 0;
       
  3409 }
       
  3410 
       
  3411 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
       
  3412 {
       
  3413     TranslationBlock *tb;
       
  3414 
       
  3415     tb = s->tb;
       
  3416     if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
       
  3417         tcg_gen_goto_tb(n);
       
  3418         gen_set_pc_im(dest);
       
  3419         tcg_gen_exit_tb((long)tb + n);
       
  3420     } else {
       
  3421         gen_set_pc_im(dest);
       
  3422         tcg_gen_exit_tb(0);
       
  3423     }
       
  3424 }
       
  3425 
       
  3426 static inline void gen_jmp (DisasContext *s, uint32_t dest)
       
  3427 {
       
  3428     if (unlikely(s->singlestep_enabled)) {
       
  3429         /* An indirect jump so that we still trigger the debug exception.  */
       
  3430         if (s->thumb)
       
  3431             dest |= 1;
       
  3432         gen_bx_im(s, dest);
       
  3433     } else {
       
  3434         gen_goto_tb(s, 0, dest);
       
  3435         s->is_jmp = DISAS_TB_JUMP;
       
  3436     }
       
  3437 }
       
  3438 
       
  3439 static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
       
  3440 {
       
  3441     if (x)
       
  3442         tcg_gen_sari_i32(t0, t0, 16);
       
  3443     else
       
  3444         gen_sxth(t0);
       
  3445     if (y)
       
  3446         tcg_gen_sari_i32(t1, t1, 16);
       
  3447     else
       
  3448         gen_sxth(t1);
       
  3449     tcg_gen_mul_i32(t0, t0, t1);
       
  3450 }
       
  3451 
       
  3452 /* Return the mask of PSR bits set by a MSR instruction.  */
       
  3453 static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
       
  3454     uint32_t mask;
       
  3455 
       
  3456     mask = 0;
       
  3457     if (flags & (1 << 0))
       
  3458         mask |= 0xff;
       
  3459     if (flags & (1 << 1))
       
  3460         mask |= 0xff00;
       
  3461     if (flags & (1 << 2))
       
  3462         mask |= 0xff0000;
       
  3463     if (flags & (1 << 3))
       
  3464         mask |= 0xff000000;
       
  3465 
       
  3466     /* Mask out undefined bits.  */
       
  3467     mask &= ~CPSR_RESERVED;
       
  3468     if (!arm_feature(env, ARM_FEATURE_V6))
       
  3469         mask &= ~(CPSR_E | CPSR_GE);
       
  3470     if (!arm_feature(env, ARM_FEATURE_THUMB2))
       
  3471         mask &= ~CPSR_IT;
       
  3472     /* Mask out execution state bits.  */
       
  3473     if (!spsr)
       
  3474         mask &= ~CPSR_EXEC;
       
  3475     /* Mask out privileged bits.  */
       
  3476     if (IS_USER(s))
       
  3477         mask &= CPSR_USER;
       
  3478     return mask;
       
  3479 }
       
  3480 
       
  3481 /* Returns nonzero if access to the PSR is not permitted.  */
       
  3482 static int gen_set_psr_T0(DisasContext *s, uint32_t mask, int spsr)
       
  3483 {
       
  3484     TCGv tmp;
       
  3485     if (spsr) {
       
  3486         /* ??? This is also undefined in system mode.  */
       
  3487         if (IS_USER(s))
       
  3488             return 1;
       
  3489 
       
  3490         tmp = load_cpu_field(spsr);
       
  3491         tcg_gen_andi_i32(tmp, tmp, ~mask);
       
  3492         tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);
       
  3493         tcg_gen_or_i32(tmp, tmp, cpu_T[0]);
       
  3494         store_cpu_field(tmp, spsr);
       
  3495     } else {
       
  3496         gen_set_cpsr(cpu_T[0], mask);
       
  3497     }
       
  3498     gen_lookup_tb(s);
       
  3499     return 0;
       
  3500 }
       
  3501 
       
  3502 /* Generate an old-style exception return.  */
       
  3503 static void gen_exception_return(DisasContext *s)
       
  3504 {
       
  3505     TCGv tmp;
       
  3506     gen_movl_reg_T0(s, 15);
       
  3507     tmp = load_cpu_field(spsr);
       
  3508     gen_set_cpsr(tmp, 0xffffffff);
       
  3509     dead_tmp(tmp);
       
  3510     s->is_jmp = DISAS_UPDATE;
       
  3511 }
       
  3512 
       
  3513 /* Generate a v6 exception return.  Marks both values as dead.  */
       
  3514 static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
       
  3515 {
       
  3516     gen_set_cpsr(cpsr, 0xffffffff);
       
  3517     dead_tmp(cpsr);
       
  3518     store_reg(s, 15, pc);
       
  3519     s->is_jmp = DISAS_UPDATE;
       
  3520 }
       
  3521 
       
  3522 static inline void
       
  3523 gen_set_condexec (DisasContext *s)
       
  3524 {
       
  3525     if (s->condexec_mask) {
       
  3526         uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
       
  3527         TCGv tmp = new_tmp();
       
  3528         tcg_gen_movi_i32(tmp, val);
       
  3529         store_cpu_field(tmp, condexec_bits);
       
  3530     }
       
  3531 }
       
  3532 
       
  3533 static void gen_nop_hint(DisasContext *s, int val)
       
  3534 {
       
  3535     switch (val) {
       
  3536     case 3: /* wfi */
       
  3537         gen_set_pc_im(s->pc);
       
  3538         s->is_jmp = DISAS_WFI;
       
  3539         break;
       
  3540     case 2: /* wfe */
       
  3541     case 4: /* sev */
       
  3542         /* TODO: Implement SEV and WFE.  May help SMP performance.  */
       
  3543     default: /* nop */
       
  3544         break;
       
  3545     }
       
  3546 }
       
  3547 
       
  3548 /* These macros help make the code more readable when migrating from the
       
  3549    old dyngen helpers.  They should probably be removed when
       
  3550    T0/T1 are removed.  */
       
  3551 #define CPU_T001 cpu_T[0], cpu_T[0], cpu_T[1]
       
  3552 #define CPU_T0E01 cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]
       
  3553 
       
  3554 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
       
  3555 
       
  3556 static inline int gen_neon_add(int size)
       
  3557 {
       
  3558     switch (size) {
       
  3559     case 0: gen_helper_neon_add_u8(CPU_T001); break;
       
  3560     case 1: gen_helper_neon_add_u16(CPU_T001); break;
       
  3561     case 2: gen_op_addl_T0_T1(); break;
       
  3562     default: return 1;
       
  3563     }
       
  3564     return 0;
       
  3565 }
       
  3566 
       
  3567 static inline void gen_neon_rsb(int size)
       
  3568 {
       
  3569     switch (size) {
       
  3570     case 0: gen_helper_neon_sub_u8(cpu_T[0], cpu_T[1], cpu_T[0]); break;
       
  3571     case 1: gen_helper_neon_sub_u16(cpu_T[0], cpu_T[1], cpu_T[0]); break;
       
  3572     case 2: gen_op_rsbl_T0_T1(); break;
       
  3573     default: return;
       
  3574     }
       
  3575 }
       
  3576 
       
  3577 /* 32-bit pairwise ops end up the same as the elementwise versions.  */
       
  3578 #define gen_helper_neon_pmax_s32  gen_helper_neon_max_s32
       
  3579 #define gen_helper_neon_pmax_u32  gen_helper_neon_max_u32
       
  3580 #define gen_helper_neon_pmin_s32  gen_helper_neon_min_s32
       
  3581 #define gen_helper_neon_pmin_u32  gen_helper_neon_min_u32
       
  3582 
       
  3583 /* FIXME: This is wrong.  They set the wrong overflow bit.  */
       
  3584 #define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
       
  3585 #define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
       
  3586 #define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
       
  3587 #define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
       
  3588 
       
  3589 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
       
  3590     switch ((size << 1) | u) { \
       
  3591     case 0: \
       
  3592         gen_helper_neon_##name##_s8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
       
  3593         break; \
       
  3594     case 1: \
       
  3595         gen_helper_neon_##name##_u8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
       
  3596         break; \
       
  3597     case 2: \
       
  3598         gen_helper_neon_##name##_s16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
       
  3599         break; \
       
  3600     case 3: \
       
  3601         gen_helper_neon_##name##_u16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
       
  3602         break; \
       
  3603     case 4: \
       
  3604         gen_helper_neon_##name##_s32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
       
  3605         break; \
       
  3606     case 5: \
       
  3607         gen_helper_neon_##name##_u32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
       
  3608         break; \
       
  3609     default: return 1; \
       
  3610     }} while (0)
       
  3611 
       
  3612 #define GEN_NEON_INTEGER_OP(name) do { \
       
  3613     switch ((size << 1) | u) { \
       
  3614     case 0: \
       
  3615         gen_helper_neon_##name##_s8(cpu_T[0], cpu_T[0], cpu_T[1]); \
       
  3616         break; \
       
  3617     case 1: \
       
  3618         gen_helper_neon_##name##_u8(cpu_T[0], cpu_T[0], cpu_T[1]); \
       
  3619         break; \
       
  3620     case 2: \
       
  3621         gen_helper_neon_##name##_s16(cpu_T[0], cpu_T[0], cpu_T[1]); \
       
  3622         break; \
       
  3623     case 3: \
       
  3624         gen_helper_neon_##name##_u16(cpu_T[0], cpu_T[0], cpu_T[1]); \
       
  3625         break; \
       
  3626     case 4: \
       
  3627         gen_helper_neon_##name##_s32(cpu_T[0], cpu_T[0], cpu_T[1]); \
       
  3628         break; \
       
  3629     case 5: \
       
  3630         gen_helper_neon_##name##_u32(cpu_T[0], cpu_T[0], cpu_T[1]); \
       
  3631         break; \
       
  3632     default: return 1; \
       
  3633     }} while (0)
       
  3634 
       
  3635 static inline void
       
  3636 gen_neon_movl_scratch_T0(int scratch)
       
  3637 {
       
  3638   uint32_t offset;
       
  3639 
       
  3640   offset = offsetof(CPUARMState, vfp.scratch[scratch]);
       
  3641   tcg_gen_st_i32(cpu_T[0], cpu_env, offset);
       
  3642 }
       
  3643 
       
  3644 static inline void
       
  3645 gen_neon_movl_scratch_T1(int scratch)
       
  3646 {
       
  3647   uint32_t offset;
       
  3648 
       
  3649   offset = offsetof(CPUARMState, vfp.scratch[scratch]);
       
  3650   tcg_gen_st_i32(cpu_T[1], cpu_env, offset);
       
  3651 }
       
  3652 
       
  3653 static inline void
       
  3654 gen_neon_movl_T0_scratch(int scratch)
       
  3655 {
       
  3656   uint32_t offset;
       
  3657 
       
  3658   offset = offsetof(CPUARMState, vfp.scratch[scratch]);
       
  3659   tcg_gen_ld_i32(cpu_T[0], cpu_env, offset);
       
  3660 }
       
  3661 
       
  3662 static inline void
       
  3663 gen_neon_movl_T1_scratch(int scratch)
       
  3664 {
       
  3665   uint32_t offset;
       
  3666 
       
  3667   offset = offsetof(CPUARMState, vfp.scratch[scratch]);
       
  3668   tcg_gen_ld_i32(cpu_T[1], cpu_env, offset);
       
  3669 }
       
  3670 
       
  3671 static inline void gen_neon_get_scalar(int size, int reg)
       
  3672 {
       
  3673     if (size == 1) {
       
  3674         NEON_GET_REG(T0, reg >> 1, reg & 1);
       
  3675     } else {
       
  3676         NEON_GET_REG(T0, reg >> 2, (reg >> 1) & 1);
       
  3677         if (reg & 1)
       
  3678             gen_neon_dup_low16(cpu_T[0]);
       
  3679         else
       
  3680             gen_neon_dup_high16(cpu_T[0]);
       
  3681     }
       
  3682 }
       
  3683 
       
  3684 static void gen_neon_unzip(int reg, int q, int tmp, int size)
       
  3685 {
       
  3686     int n;
       
  3687 
       
  3688     for (n = 0; n < q + 1; n += 2) {
       
  3689         NEON_GET_REG(T0, reg, n);
       
  3690         NEON_GET_REG(T0, reg, n + n);
       
  3691         switch (size) {
       
  3692         case 0: gen_helper_neon_unzip_u8(); break;
       
  3693         case 1: gen_helper_neon_zip_u16(); break; /* zip and unzip are the same.  */
       
  3694         case 2: /* no-op */; break;
       
  3695         default: abort();
       
  3696         }
       
  3697         gen_neon_movl_scratch_T0(tmp + n);
       
  3698         gen_neon_movl_scratch_T1(tmp + n + 1);
       
  3699     }
       
  3700 }
       
  3701 
       
  3702 static struct {
       
  3703     int nregs;
       
  3704     int interleave;
       
  3705     int spacing;
       
  3706 } neon_ls_element_type[11] = {
       
  3707     {4, 4, 1},
       
  3708     {4, 4, 2},
       
  3709     {4, 1, 1},
       
  3710     {4, 2, 1},
       
  3711     {3, 3, 1},
       
  3712     {3, 3, 2},
       
  3713     {3, 1, 1},
       
  3714     {1, 1, 1},
       
  3715     {2, 2, 1},
       
  3716     {2, 2, 2},
       
  3717     {2, 1, 1}
       
  3718 };
       
  3719 
       
  3720 /* Translate a NEON load/store element instruction.  Return nonzero if the
       
  3721    instruction is invalid.  */
       
  3722 static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
       
  3723 {
       
  3724     int rd, rn, rm;
       
  3725     int op;
       
  3726     int nregs;
       
  3727     int interleave;
       
  3728     int stride;
       
  3729     int size;
       
  3730     int reg;
       
  3731     int pass;
       
  3732     int load;
       
  3733     int shift;
       
  3734     int n;
       
  3735     TCGv tmp;
       
  3736     TCGv tmp2;
       
  3737 
       
  3738     if (!vfp_enabled(env))
       
  3739       return 1;
       
  3740     /* FIXME: Honour alignment specifiers.  */
       
  3741     VFP_DREG_D(rd, insn);
       
  3742     rn = (insn >> 16) & 0xf;
       
  3743     rm = insn & 0xf;
       
  3744     load = (insn & (1 << 21)) != 0;
       
  3745     if ((insn & (1 << 23)) == 0) {
       
  3746         /* Load store all elements.  */
       
  3747         op = (insn >> 8) & 0xf;
       
  3748         size = (insn >> 6) & 3;
       
  3749         if (op > 10 || size == 3)
       
  3750             return 1;
       
  3751         nregs = neon_ls_element_type[op].nregs;
       
  3752         interleave = neon_ls_element_type[op].interleave;
       
  3753         gen_movl_T1_reg(s, rn);
       
  3754         stride = (1 << size) * interleave;
       
  3755         for (reg = 0; reg < nregs; reg++) {
       
  3756             if (interleave > 2 || (interleave == 2 && nregs == 2)) {
       
  3757                 gen_movl_T1_reg(s, rn);
       
  3758                 gen_op_addl_T1_im((1 << size) * reg);
       
  3759             } else if (interleave == 2 && nregs == 4 && reg == 2) {
       
  3760                 gen_movl_T1_reg(s, rn);
       
  3761                 gen_op_addl_T1_im(1 << size);
       
  3762             }
       
  3763             for (pass = 0; pass < 2; pass++) {
       
  3764                 if (size == 2) {
       
  3765                     if (load) {
       
  3766                         tmp = gen_ld32(cpu_T[1], IS_USER(s));
       
  3767                         neon_store_reg(rd, pass, tmp);
       
  3768                     } else {
       
  3769                         tmp = neon_load_reg(rd, pass);
       
  3770                         gen_st32(tmp, cpu_T[1], IS_USER(s));
       
  3771                     }
       
  3772                     gen_op_addl_T1_im(stride);
       
  3773                 } else if (size == 1) {
       
  3774                     if (load) {
       
  3775                         tmp = gen_ld16u(cpu_T[1], IS_USER(s));
       
  3776                         gen_op_addl_T1_im(stride);
       
  3777                         tmp2 = gen_ld16u(cpu_T[1], IS_USER(s));
       
  3778                         gen_op_addl_T1_im(stride);
       
  3779                         tcg_gen_shli_i32(tmp2, tmp, 16);
       
  3780                         tcg_gen_or_i32(tmp, tmp, tmp2);
       
  3781                         dead_tmp(tmp2);
       
  3782                         neon_store_reg(rd, pass, tmp);
       
  3783                     } else {
       
  3784                         tmp = neon_load_reg(rd, pass);
       
  3785                         tmp2 = new_tmp();
       
  3786                         tcg_gen_shri_i32(tmp2, tmp, 16);
       
  3787                         gen_st16(tmp, cpu_T[1], IS_USER(s));
       
  3788                         gen_op_addl_T1_im(stride);
       
  3789                         gen_st16(tmp2, cpu_T[1], IS_USER(s));
       
  3790                         gen_op_addl_T1_im(stride);
       
  3791                     }
       
  3792                 } else /* size == 0 */ {
       
  3793                     if (load) {
       
  3794                         TCGV_UNUSED(tmp2);
       
  3795                         for (n = 0; n < 4; n++) {
       
  3796                             tmp = gen_ld8u(cpu_T[1], IS_USER(s));
       
  3797                             gen_op_addl_T1_im(stride);
       
  3798                             if (n == 0) {
       
  3799                                 tmp2 = tmp;
       
  3800                             } else {
       
  3801                                 tcg_gen_shli_i32(tmp, tmp, n * 8);
       
  3802                                 tcg_gen_or_i32(tmp2, tmp2, tmp);
       
  3803                                 dead_tmp(tmp);
       
  3804                             }
       
  3805                         }
       
  3806                         neon_store_reg(rd, pass, tmp2);
       
  3807                     } else {
       
  3808                         tmp2 = neon_load_reg(rd, pass);
       
  3809                         for (n = 0; n < 4; n++) {
       
  3810                             tmp = new_tmp();
       
  3811                             if (n == 0) {
       
  3812                                 tcg_gen_mov_i32(tmp, tmp2);
       
  3813                             } else {
       
  3814                                 tcg_gen_shri_i32(tmp, tmp2, n * 8);
       
  3815                             }
       
  3816                             gen_st8(tmp, cpu_T[1], IS_USER(s));
       
  3817                             gen_op_addl_T1_im(stride);
       
  3818                         }
       
  3819                         dead_tmp(tmp2);
       
  3820                     }
       
  3821                 }
       
  3822             }
       
  3823             rd += neon_ls_element_type[op].spacing;
       
  3824         }
       
  3825         stride = nregs * 8;
       
  3826     } else {
       
  3827         size = (insn >> 10) & 3;
       
  3828         if (size == 3) {
       
  3829             /* Load single element to all lanes.  */
       
  3830             if (!load)
       
  3831                 return 1;
       
  3832             size = (insn >> 6) & 3;
       
  3833             nregs = ((insn >> 8) & 3) + 1;
       
  3834             stride = (insn & (1 << 5)) ? 2 : 1;
       
  3835             gen_movl_T1_reg(s, rn);
       
  3836             for (reg = 0; reg < nregs; reg++) {
       
  3837                 switch (size) {
       
  3838                 case 0:
       
  3839                     tmp = gen_ld8u(cpu_T[1], IS_USER(s));
       
  3840                     gen_neon_dup_u8(tmp, 0);
       
  3841                     break;
       
  3842                 case 1:
       
  3843                     tmp = gen_ld16u(cpu_T[1], IS_USER(s));
       
  3844                     gen_neon_dup_low16(tmp);
       
  3845                     break;
       
  3846                 case 2:
       
  3847                     tmp = gen_ld32(cpu_T[0], IS_USER(s));
       
  3848                     break;
       
  3849                 case 3:
       
  3850                     return 1;
       
  3851                 default: /* Avoid compiler warnings.  */
       
  3852                     abort();
       
  3853                 }
       
  3854                 gen_op_addl_T1_im(1 << size);
       
  3855                 tmp2 = new_tmp();
       
  3856                 tcg_gen_mov_i32(tmp2, tmp);
       
  3857                 neon_store_reg(rd, 0, tmp2);
       
  3858                 neon_store_reg(rd, 1, tmp);
       
  3859                 rd += stride;
       
  3860             }
       
  3861             stride = (1 << size) * nregs;
       
  3862         } else {
       
  3863             /* Single element.  */
       
  3864             pass = (insn >> 7) & 1;
       
  3865             switch (size) {
       
  3866             case 0:
       
  3867                 shift = ((insn >> 5) & 3) * 8;
       
  3868                 stride = 1;
       
  3869                 break;
       
  3870             case 1:
       
  3871                 shift = ((insn >> 6) & 1) * 16;
       
  3872                 stride = (insn & (1 << 5)) ? 2 : 1;
       
  3873                 break;
       
  3874             case 2:
       
  3875                 shift = 0;
       
  3876                 stride = (insn & (1 << 6)) ? 2 : 1;
       
  3877                 break;
       
  3878             default:
       
  3879                 abort();
       
  3880             }
       
  3881             nregs = ((insn >> 8) & 3) + 1;
       
  3882             gen_movl_T1_reg(s, rn);
       
  3883             for (reg = 0; reg < nregs; reg++) {
       
  3884                 if (load) {
       
  3885                     switch (size) {
       
  3886                     case 0:
       
  3887                         tmp = gen_ld8u(cpu_T[1], IS_USER(s));
       
  3888                         break;
       
  3889                     case 1:
       
  3890                         tmp = gen_ld16u(cpu_T[1], IS_USER(s));
       
  3891                         break;
       
  3892                     case 2:
       
  3893                         tmp = gen_ld32(cpu_T[1], IS_USER(s));
       
  3894                         break;
       
  3895                     default: /* Avoid compiler warnings.  */
       
  3896                         abort();
       
  3897                     }
       
  3898                     if (size != 2) {
       
  3899                         tmp2 = neon_load_reg(rd, pass);
       
  3900                         gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
       
  3901                         dead_tmp(tmp2);
       
  3902                     }
       
  3903                     neon_store_reg(rd, pass, tmp);
       
  3904                 } else { /* Store */
       
  3905                     tmp = neon_load_reg(rd, pass);
       
  3906                     if (shift)
       
  3907                         tcg_gen_shri_i32(tmp, tmp, shift);
       
  3908                     switch (size) {
       
  3909                     case 0:
       
  3910                         gen_st8(tmp, cpu_T[1], IS_USER(s));
       
  3911                         break;
       
  3912                     case 1:
       
  3913                         gen_st16(tmp, cpu_T[1], IS_USER(s));
       
  3914                         break;
       
  3915                     case 2:
       
  3916                         gen_st32(tmp, cpu_T[1], IS_USER(s));
       
  3917                         break;
       
  3918                     }
       
  3919                 }
       
  3920                 rd += stride;
       
  3921                 gen_op_addl_T1_im(1 << size);
       
  3922             }
       
  3923             stride = nregs * (1 << size);
       
  3924         }
       
  3925     }
       
  3926     if (rm != 15) {
       
  3927         TCGv base;
       
  3928 
       
  3929         base = load_reg(s, rn);
       
  3930         if (rm == 13) {
       
  3931             tcg_gen_addi_i32(base, base, stride);
       
  3932         } else {
       
  3933             TCGv index;
       
  3934             index = load_reg(s, rm);
       
  3935             tcg_gen_add_i32(base, base, index);
       
  3936             dead_tmp(index);
       
  3937         }
       
  3938         store_reg(s, rn, base);
       
  3939     }
       
  3940     return 0;
       
  3941 }
       
  3942 
       
  3943 /* Bitwise select.  dest = c ? t : f.  Clobbers T and F.  */
       
  3944 static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
       
  3945 {
       
  3946     tcg_gen_and_i32(t, t, c);
       
  3947     tcg_gen_bic_i32(f, f, c);
       
  3948     tcg_gen_or_i32(dest, t, f);
       
  3949 }
       
  3950 
       
  3951 static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
       
  3952 {
       
  3953     switch (size) {
       
  3954     case 0: gen_helper_neon_narrow_u8(dest, src); break;
       
  3955     case 1: gen_helper_neon_narrow_u16(dest, src); break;
       
  3956     case 2: tcg_gen_trunc_i64_i32(dest, src); break;
       
  3957     default: abort();
       
  3958     }
       
  3959 }
       
  3960 
       
  3961 static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
       
  3962 {
       
  3963     switch (size) {
       
  3964     case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
       
  3965     case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
       
  3966     case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
       
  3967     default: abort();
       
  3968     }
       
  3969 }
       
  3970 
       
  3971 static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
       
  3972 {
       
  3973     switch (size) {
       
  3974     case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
       
  3975     case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
       
  3976     case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
       
  3977     default: abort();
       
  3978     }
       
  3979 }
       
  3980 
       
  3981 static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
       
  3982                                          int q, int u)
       
  3983 {
       
  3984     if (q) {
       
  3985         if (u) {
       
  3986             switch (size) {
       
  3987             case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
       
  3988             case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
       
  3989             default: abort();
       
  3990             }
       
  3991         } else {
       
  3992             switch (size) {
       
  3993             case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
       
  3994             case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
       
  3995             default: abort();
       
  3996             }
       
  3997         }
       
  3998     } else {
       
  3999         if (u) {
       
  4000             switch (size) {
       
  4001             case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
       
  4002             case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
       
  4003             default: abort();
       
  4004             }
       
  4005         } else {
       
  4006             switch (size) {
       
  4007             case 1: gen_helper_neon_shl_s16(var, var, shift); break;
       
  4008             case 2: gen_helper_neon_shl_s32(var, var, shift); break;
       
  4009             default: abort();
       
  4010             }
       
  4011         }
       
  4012     }
       
  4013 }
       
  4014 
       
  4015 static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
       
  4016 {
       
  4017     if (u) {
       
  4018         switch (size) {
       
  4019         case 0: gen_helper_neon_widen_u8(dest, src); break;
       
  4020         case 1: gen_helper_neon_widen_u16(dest, src); break;
       
  4021         case 2: tcg_gen_extu_i32_i64(dest, src); break;
       
  4022         default: abort();
       
  4023         }
       
  4024     } else {
       
  4025         switch (size) {
       
  4026         case 0: gen_helper_neon_widen_s8(dest, src); break;
       
  4027         case 1: gen_helper_neon_widen_s16(dest, src); break;
       
  4028         case 2: tcg_gen_ext_i32_i64(dest, src); break;
       
  4029         default: abort();
       
  4030         }
       
  4031     }
       
  4032     dead_tmp(src);
       
  4033 }
       
  4034 
       
  4035 static inline void gen_neon_addl(int size)
       
  4036 {
       
  4037     switch (size) {
       
  4038     case 0: gen_helper_neon_addl_u16(CPU_V001); break;
       
  4039     case 1: gen_helper_neon_addl_u32(CPU_V001); break;
       
  4040     case 2: tcg_gen_add_i64(CPU_V001); break;
       
  4041     default: abort();
       
  4042     }
       
  4043 }
       
  4044 
       
  4045 static inline void gen_neon_subl(int size)
       
  4046 {
       
  4047     switch (size) {
       
  4048     case 0: gen_helper_neon_subl_u16(CPU_V001); break;
       
  4049     case 1: gen_helper_neon_subl_u32(CPU_V001); break;
       
  4050     case 2: tcg_gen_sub_i64(CPU_V001); break;
       
  4051     default: abort();
       
  4052     }
       
  4053 }
       
  4054 
       
  4055 static inline void gen_neon_negl(TCGv_i64 var, int size)
       
  4056 {
       
  4057     switch (size) {
       
  4058     case 0: gen_helper_neon_negl_u16(var, var); break;
       
  4059     case 1: gen_helper_neon_negl_u32(var, var); break;
       
  4060     case 2: gen_helper_neon_negl_u64(var, var); break;
       
  4061     default: abort();
       
  4062     }
       
  4063 }
       
  4064 
       
  4065 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
       
  4066 {
       
  4067     switch (size) {
       
  4068     case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
       
  4069     case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
       
  4070     default: abort();
       
  4071     }
       
  4072 }
       
  4073 
       
  4074 static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
       
  4075 {
       
  4076     TCGv_i64 tmp;
       
  4077 
       
  4078     switch ((size << 1) | u) {
       
  4079     case 0: gen_helper_neon_mull_s8(dest, a, b); break;
       
  4080     case 1: gen_helper_neon_mull_u8(dest, a, b); break;
       
  4081     case 2: gen_helper_neon_mull_s16(dest, a, b); break;
       
  4082     case 3: gen_helper_neon_mull_u16(dest, a, b); break;
       
  4083     case 4:
       
  4084         tmp = gen_muls_i64_i32(a, b);
       
  4085         tcg_gen_mov_i64(dest, tmp);
       
  4086         break;
       
  4087     case 5:
       
  4088         tmp = gen_mulu_i64_i32(a, b);
       
  4089         tcg_gen_mov_i64(dest, tmp);
       
  4090         break;
       
  4091     default: abort();
       
  4092     }
       
  4093     if (size < 2) {
       
  4094         dead_tmp(b);
       
  4095         dead_tmp(a);
       
  4096     }
       
  4097 }
       
  4098 
       
  4099 /* Translate a NEON data processing instruction.  Return nonzero if the
       
  4100    instruction is invalid.
       
  4101    We process data in a mixture of 32-bit and 64-bit chunks.
       
  4102    Mostly we use 32-bit chunks so we can use normal scalar instructions.  */
       
  4103 
       
  4104 static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
       
  4105 {
       
  4106     int op;
       
  4107     int q;
       
  4108     int rd, rn, rm;
       
  4109     int size;
       
  4110     int shift;
       
  4111     int pass;
       
  4112     int count;
       
  4113     int pairwise;
       
  4114     int u;
       
  4115     int n;
       
  4116     uint32_t imm;
       
  4117     TCGv tmp;
       
  4118     TCGv tmp2;
       
  4119     TCGv tmp3;
       
  4120     TCGv_i64 tmp64;
       
  4121 
       
  4122     if (!vfp_enabled(env))
       
  4123       return 1;
       
  4124     q = (insn & (1 << 6)) != 0;
       
  4125     u = (insn >> 24) & 1;
       
  4126     VFP_DREG_D(rd, insn);
       
  4127     VFP_DREG_N(rn, insn);
       
  4128     VFP_DREG_M(rm, insn);
       
  4129     size = (insn >> 20) & 3;
       
  4130     if ((insn & (1 << 23)) == 0) {
       
  4131         /* Three register same length.  */
       
  4132         op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
       
  4133         if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
       
  4134                           || op == 10 || op  == 11 || op == 16)) {
       
  4135             /* 64-bit element instructions.  */
       
  4136             for (pass = 0; pass < (q ? 2 : 1); pass++) {
       
  4137                 neon_load_reg64(cpu_V0, rn + pass);
       
  4138                 neon_load_reg64(cpu_V1, rm + pass);
       
  4139                 switch (op) {
       
  4140                 case 1: /* VQADD */
       
  4141                     if (u) {
       
  4142                         gen_helper_neon_add_saturate_u64(CPU_V001);
       
  4143                     } else {
       
  4144                         gen_helper_neon_add_saturate_s64(CPU_V001);
       
  4145                     }
       
  4146                     break;
       
  4147                 case 5: /* VQSUB */
       
  4148                     if (u) {
       
  4149                         gen_helper_neon_sub_saturate_u64(CPU_V001);
       
  4150                     } else {
       
  4151                         gen_helper_neon_sub_saturate_s64(CPU_V001);
       
  4152                     }
       
  4153                     break;
       
  4154                 case 8: /* VSHL */
       
  4155                     if (u) {
       
  4156                         gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
       
  4157                     } else {
       
  4158                         gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
       
  4159                     }
       
  4160                     break;
       
  4161                 case 9: /* VQSHL */
       
  4162                     if (u) {
       
  4163                         gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
       
  4164                                                  cpu_V0, cpu_V0);
       
  4165                     } else {
       
  4166                         gen_helper_neon_qshl_s64(cpu_V1, cpu_env,
       
  4167                                                  cpu_V1, cpu_V0);
       
  4168                     }
       
  4169                     break;
       
  4170                 case 10: /* VRSHL */
       
  4171                     if (u) {
       
  4172                         gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
       
  4173                     } else {
       
  4174                         gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
       
  4175                     }
       
  4176                     break;
       
  4177                 case 11: /* VQRSHL */
       
  4178                     if (u) {
       
  4179                         gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
       
  4180                                                   cpu_V1, cpu_V0);
       
  4181                     } else {
       
  4182                         gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
       
  4183                                                   cpu_V1, cpu_V0);
       
  4184                     }
       
  4185                     break;
       
  4186                 case 16:
       
  4187                     if (u) {
       
  4188                         tcg_gen_sub_i64(CPU_V001);
       
  4189                     } else {
       
  4190                         tcg_gen_add_i64(CPU_V001);
       
  4191                     }
       
  4192                     break;
       
  4193                 default:
       
  4194                     abort();
       
  4195                 }
       
  4196                 neon_store_reg64(cpu_V0, rd + pass);
       
  4197             }
       
  4198             return 0;
       
  4199         }
       
  4200         switch (op) {
       
  4201         case 8: /* VSHL */
       
  4202         case 9: /* VQSHL */
       
  4203         case 10: /* VRSHL */
       
  4204         case 11: /* VQRSHL */
       
  4205             {
       
  4206                 int rtmp;
       
  4207                 /* Shift instruction operands are reversed.  */
       
  4208                 rtmp = rn;
       
  4209                 rn = rm;
       
  4210                 rm = rtmp;
       
  4211                 pairwise = 0;
       
  4212             }
       
  4213             break;
       
  4214         case 20: /* VPMAX */
       
  4215         case 21: /* VPMIN */
       
  4216         case 23: /* VPADD */
       
  4217             pairwise = 1;
       
  4218             break;
       
  4219         case 26: /* VPADD (float) */
       
  4220             pairwise = (u && size < 2);
       
  4221             break;
       
  4222         case 30: /* VPMIN/VPMAX (float) */
       
  4223             pairwise = u;
       
  4224             break;
       
  4225         default:
       
  4226             pairwise = 0;
       
  4227             break;
       
  4228         }
       
  4229         for (pass = 0; pass < (q ? 4 : 2); pass++) {
       
  4230 
       
  4231         if (pairwise) {
       
  4232             /* Pairwise.  */
       
  4233             if (q)
       
  4234                 n = (pass & 1) * 2;
       
  4235             else
       
  4236                 n = 0;
       
  4237             if (pass < q + 1) {
       
  4238                 NEON_GET_REG(T0, rn, n);
       
  4239                 NEON_GET_REG(T1, rn, n + 1);
       
  4240             } else {
       
  4241                 NEON_GET_REG(T0, rm, n);
       
  4242                 NEON_GET_REG(T1, rm, n + 1);
       
  4243             }
       
  4244         } else {
       
  4245             /* Elementwise.  */
       
  4246             NEON_GET_REG(T0, rn, pass);
       
  4247             NEON_GET_REG(T1, rm, pass);
       
  4248         }
       
  4249         switch (op) {
       
  4250         case 0: /* VHADD */
       
  4251             GEN_NEON_INTEGER_OP(hadd);
       
  4252             break;
       
  4253         case 1: /* VQADD */
       
  4254             GEN_NEON_INTEGER_OP_ENV(qadd);
       
  4255             break;
       
  4256         case 2: /* VRHADD */
       
  4257             GEN_NEON_INTEGER_OP(rhadd);
       
  4258             break;
       
  4259         case 3: /* Logic ops.  */
       
  4260             switch ((u << 2) | size) {
       
  4261             case 0: /* VAND */
       
  4262                 gen_op_andl_T0_T1();
       
  4263                 break;
       
  4264             case 1: /* BIC */
       
  4265                 gen_op_bicl_T0_T1();
       
  4266                 break;
       
  4267             case 2: /* VORR */
       
  4268                 gen_op_orl_T0_T1();
       
  4269                 break;
       
  4270             case 3: /* VORN */
       
  4271                 gen_op_notl_T1();
       
  4272                 gen_op_orl_T0_T1();
       
  4273                 break;
       
  4274             case 4: /* VEOR */
       
  4275                 gen_op_xorl_T0_T1();
       
  4276                 break;
       
  4277             case 5: /* VBSL */
       
  4278                 tmp = neon_load_reg(rd, pass);
       
  4279                 gen_neon_bsl(cpu_T[0], cpu_T[0], cpu_T[1], tmp);
       
  4280                 dead_tmp(tmp);
       
  4281                 break;
       
  4282             case 6: /* VBIT */
       
  4283                 tmp = neon_load_reg(rd, pass);
       
  4284                 gen_neon_bsl(cpu_T[0], cpu_T[0], tmp, cpu_T[1]);
       
  4285                 dead_tmp(tmp);
       
  4286                 break;
       
  4287             case 7: /* VBIF */
       
  4288                 tmp = neon_load_reg(rd, pass);
       
  4289                 gen_neon_bsl(cpu_T[0], tmp, cpu_T[0], cpu_T[1]);
       
  4290                 dead_tmp(tmp);
       
  4291                 break;
       
  4292             }
       
  4293             break;
       
  4294         case 4: /* VHSUB */
       
  4295             GEN_NEON_INTEGER_OP(hsub);
       
  4296             break;
       
  4297         case 5: /* VQSUB */
       
  4298             GEN_NEON_INTEGER_OP_ENV(qsub);
       
  4299             break;
       
  4300         case 6: /* VCGT */
       
  4301             GEN_NEON_INTEGER_OP(cgt);
       
  4302             break;
       
  4303         case 7: /* VCGE */
       
  4304             GEN_NEON_INTEGER_OP(cge);
       
  4305             break;
       
  4306         case 8: /* VSHL */
       
  4307             GEN_NEON_INTEGER_OP(shl);
       
  4308             break;
       
  4309         case 9: /* VQSHL */
       
  4310             GEN_NEON_INTEGER_OP_ENV(qshl);
       
  4311             break;
       
  4312         case 10: /* VRSHL */
       
  4313             GEN_NEON_INTEGER_OP(rshl);
       
  4314             break;
       
  4315         case 11: /* VQRSHL */
       
  4316             GEN_NEON_INTEGER_OP_ENV(qrshl);
       
  4317             break;
       
  4318         case 12: /* VMAX */
       
  4319             GEN_NEON_INTEGER_OP(max);
       
  4320             break;
       
  4321         case 13: /* VMIN */
       
  4322             GEN_NEON_INTEGER_OP(min);
       
  4323             break;
       
  4324         case 14: /* VABD */
       
  4325             GEN_NEON_INTEGER_OP(abd);
       
  4326             break;
       
  4327         case 15: /* VABA */
       
  4328             GEN_NEON_INTEGER_OP(abd);
       
  4329             NEON_GET_REG(T1, rd, pass);
       
  4330             gen_neon_add(size);
       
  4331             break;
       
  4332         case 16:
       
  4333             if (!u) { /* VADD */
       
  4334                 if (gen_neon_add(size))
       
  4335                     return 1;
       
  4336             } else { /* VSUB */
       
  4337                 switch (size) {
       
  4338                 case 0: gen_helper_neon_sub_u8(CPU_T001); break;
       
  4339                 case 1: gen_helper_neon_sub_u16(CPU_T001); break;
       
  4340                 case 2: gen_op_subl_T0_T1(); break;
       
  4341                 default: return 1;
       
  4342                 }
       
  4343             }
       
  4344             break;
       
  4345         case 17:
       
  4346             if (!u) { /* VTST */
       
  4347                 switch (size) {
       
  4348                 case 0: gen_helper_neon_tst_u8(CPU_T001); break;
       
  4349                 case 1: gen_helper_neon_tst_u16(CPU_T001); break;
       
  4350                 case 2: gen_helper_neon_tst_u32(CPU_T001); break;
       
  4351                 default: return 1;
       
  4352                 }
       
  4353             } else { /* VCEQ */
       
  4354                 switch (size) {
       
  4355                 case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
       
  4356                 case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
       
  4357                 case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
       
  4358                 default: return 1;
       
  4359                 }
       
  4360             }
       
  4361             break;
       
  4362         case 18: /* Multiply.  */
       
  4363             switch (size) {
       
  4364             case 0: gen_helper_neon_mul_u8(CPU_T001); break;
       
  4365             case 1: gen_helper_neon_mul_u16(CPU_T001); break;
       
  4366             case 2: gen_op_mul_T0_T1(); break;
       
  4367             default: return 1;
       
  4368             }
       
  4369             NEON_GET_REG(T1, rd, pass);
       
  4370             if (u) { /* VMLS */
       
  4371                 gen_neon_rsb(size);
       
  4372             } else { /* VMLA */
       
  4373                 gen_neon_add(size);
       
  4374             }
       
  4375             break;
       
  4376         case 19: /* VMUL */
       
  4377             if (u) { /* polynomial */
       
  4378                 gen_helper_neon_mul_p8(CPU_T001);
       
  4379             } else { /* Integer */
       
  4380                 switch (size) {
       
  4381                 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
       
  4382                 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
       
  4383                 case 2: gen_op_mul_T0_T1(); break;
       
  4384                 default: return 1;
       
  4385                 }
       
  4386             }
       
  4387             break;
       
  4388         case 20: /* VPMAX */
       
  4389             GEN_NEON_INTEGER_OP(pmax);
       
  4390             break;
       
  4391         case 21: /* VPMIN */
       
  4392             GEN_NEON_INTEGER_OP(pmin);
       
  4393             break;
       
  4394         case 22: /* Hultiply high.  */
       
  4395             if (!u) { /* VQDMULH */
       
  4396                 switch (size) {
       
  4397                 case 1: gen_helper_neon_qdmulh_s16(CPU_T0E01); break;
       
  4398                 case 2: gen_helper_neon_qdmulh_s32(CPU_T0E01); break;
       
  4399                 default: return 1;
       
  4400                 }
       
  4401             } else { /* VQRDHMUL */
       
  4402                 switch (size) {
       
  4403                 case 1: gen_helper_neon_qrdmulh_s16(CPU_T0E01); break;
       
  4404                 case 2: gen_helper_neon_qrdmulh_s32(CPU_T0E01); break;
       
  4405                 default: return 1;
       
  4406                 }
       
  4407             }
       
  4408             break;
       
  4409         case 23: /* VPADD */
       
  4410             if (u)
       
  4411                 return 1;
       
  4412             switch (size) {
       
  4413             case 0: gen_helper_neon_padd_u8(CPU_T001); break;
       
  4414             case 1: gen_helper_neon_padd_u16(CPU_T001); break;
       
  4415             case 2: gen_op_addl_T0_T1(); break;
       
  4416             default: return 1;
       
  4417             }
       
  4418             break;
       
  4419         case 26: /* Floating point arithnetic.  */
       
  4420             switch ((u << 2) | size) {
       
  4421             case 0: /* VADD */
       
  4422                 gen_helper_neon_add_f32(CPU_T001);
       
  4423                 break;
       
  4424             case 2: /* VSUB */
       
  4425                 gen_helper_neon_sub_f32(CPU_T001);
       
  4426                 break;
       
  4427             case 4: /* VPADD */
       
  4428                 gen_helper_neon_add_f32(CPU_T001);
       
  4429                 break;
       
  4430             case 6: /* VABD */
       
  4431                 gen_helper_neon_abd_f32(CPU_T001);
       
  4432                 break;
       
  4433             default:
       
  4434                 return 1;
       
  4435             }
       
  4436             break;
       
  4437         case 27: /* Float multiply.  */
       
  4438             gen_helper_neon_mul_f32(CPU_T001);
       
  4439             if (!u) {
       
  4440                 NEON_GET_REG(T1, rd, pass);
       
  4441                 if (size == 0) {
       
  4442                     gen_helper_neon_add_f32(CPU_T001);
       
  4443                 } else {
       
  4444                     gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
       
  4445                 }
       
  4446             }
       
  4447             break;
       
  4448         case 28: /* Float compare.  */
       
  4449             if (!u) {
       
  4450                 gen_helper_neon_ceq_f32(CPU_T001);
       
  4451             } else {
       
  4452                 if (size == 0)
       
  4453                     gen_helper_neon_cge_f32(CPU_T001);
       
  4454                 else
       
  4455                     gen_helper_neon_cgt_f32(CPU_T001);
       
  4456             }
       
  4457             break;
       
  4458         case 29: /* Float compare absolute.  */
       
  4459             if (!u)
       
  4460                 return 1;
       
  4461             if (size == 0)
       
  4462                 gen_helper_neon_acge_f32(CPU_T001);
       
  4463             else
       
  4464                 gen_helper_neon_acgt_f32(CPU_T001);
       
  4465             break;
       
  4466         case 30: /* Float min/max.  */
       
  4467             if (size == 0)
       
  4468                 gen_helper_neon_max_f32(CPU_T001);
       
  4469             else
       
  4470                 gen_helper_neon_min_f32(CPU_T001);
       
  4471             break;
       
  4472         case 31:
       
  4473             if (size == 0)
       
  4474                 gen_helper_recps_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
       
  4475             else
       
  4476                 gen_helper_rsqrts_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
       
  4477             break;
       
  4478         default:
       
  4479             abort();
       
  4480         }
       
  4481         /* Save the result.  For elementwise operations we can put it
       
  4482            straight into the destination register.  For pairwise operations
       
  4483            we have to be careful to avoid clobbering the source operands.  */
       
  4484         if (pairwise && rd == rm) {
       
  4485             gen_neon_movl_scratch_T0(pass);
       
  4486         } else {
       
  4487             NEON_SET_REG(T0, rd, pass);
       
  4488         }
       
  4489 
       
  4490         } /* for pass */
       
  4491         if (pairwise && rd == rm) {
       
  4492             for (pass = 0; pass < (q ? 4 : 2); pass++) {
       
  4493                 gen_neon_movl_T0_scratch(pass);
       
  4494                 NEON_SET_REG(T0, rd, pass);
       
  4495             }
       
  4496         }
       
  4497         /* End of 3 register same size operations.  */
       
  4498     } else if (insn & (1 << 4)) {
       
  4499         if ((insn & 0x00380080) != 0) {
       
  4500             /* Two registers and shift.  */
       
  4501             op = (insn >> 8) & 0xf;
       
  4502             if (insn & (1 << 7)) {
       
  4503                 /* 64-bit shift.   */
       
  4504                 size = 3;
       
  4505             } else {
       
  4506                 size = 2;
       
  4507                 while ((insn & (1 << (size + 19))) == 0)
       
  4508                     size--;
       
  4509             }
       
  4510             shift = (insn >> 16) & ((1 << (3 + size)) - 1);
       
  4511             /* To avoid excessive dumplication of ops we implement shift
       
  4512                by immediate using the variable shift operations.  */
       
  4513             if (op < 8) {
       
  4514                 /* Shift by immediate:
       
  4515                    VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU.  */
       
  4516                 /* Right shifts are encoded as N - shift, where N is the
       
  4517                    element size in bits.  */
       
  4518                 if (op <= 4)
       
  4519                     shift = shift - (1 << (size + 3));
       
  4520                 if (size == 3) {
       
  4521                     count = q + 1;
       
  4522                 } else {
       
  4523                     count = q ? 4: 2;
       
  4524                 }
       
  4525                 switch (size) {
       
  4526                 case 0:
       
  4527                     imm = (uint8_t) shift;
       
  4528                     imm |= imm << 8;
       
  4529                     imm |= imm << 16;
       
  4530                     break;
       
  4531                 case 1:
       
  4532                     imm = (uint16_t) shift;
       
  4533                     imm |= imm << 16;
       
  4534                     break;
       
  4535                 case 2:
       
  4536                 case 3:
       
  4537                     imm = shift;
       
  4538                     break;
       
  4539                 default:
       
  4540                     abort();
       
  4541                 }
       
  4542 
       
  4543                 for (pass = 0; pass < count; pass++) {
       
  4544                     if (size == 3) {
       
  4545                         neon_load_reg64(cpu_V0, rm + pass);
       
  4546                         tcg_gen_movi_i64(cpu_V1, imm);
       
  4547                         switch (op) {
       
  4548                         case 0:  /* VSHR */
       
  4549                         case 1:  /* VSRA */
       
  4550                             if (u)
       
  4551                                 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
       
  4552                             else
       
  4553                                 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
       
  4554                             break;
       
  4555                         case 2: /* VRSHR */
       
  4556                         case 3: /* VRSRA */
       
  4557                             if (u)
       
  4558                                 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
       
  4559                             else
       
  4560                                 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
       
  4561                             break;
       
  4562                         case 4: /* VSRI */
       
  4563                             if (!u)
       
  4564                                 return 1;
       
  4565                             gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
       
  4566                             break;
       
  4567                         case 5: /* VSHL, VSLI */
       
  4568                             gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
       
  4569                             break;
       
  4570                         case 6: /* VQSHL */
       
  4571                             if (u)
       
  4572                                 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
       
  4573                             else
       
  4574                                 gen_helper_neon_qshl_s64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
       
  4575                             break;
       
  4576                         case 7: /* VQSHLU */
       
  4577                             gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
       
  4578                             break;
       
  4579                         }
       
  4580                         if (op == 1 || op == 3) {
       
  4581                             /* Accumulate.  */
       
  4582                             neon_load_reg64(cpu_V0, rd + pass);
       
  4583                             tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
       
  4584                         } else if (op == 4 || (op == 5 && u)) {
       
  4585                             /* Insert */
       
  4586                             cpu_abort(env, "VS[LR]I.64 not implemented");
       
  4587                         }
       
  4588                         neon_store_reg64(cpu_V0, rd + pass);
       
  4589                     } else { /* size < 3 */
       
  4590                         /* Operands in T0 and T1.  */
       
  4591                         gen_op_movl_T1_im(imm);
       
  4592                         NEON_GET_REG(T0, rm, pass);
       
  4593                         switch (op) {
       
  4594                         case 0:  /* VSHR */
       
  4595                         case 1:  /* VSRA */
       
  4596                             GEN_NEON_INTEGER_OP(shl);
       
  4597                             break;
       
  4598                         case 2: /* VRSHR */
       
  4599                         case 3: /* VRSRA */
       
  4600                             GEN_NEON_INTEGER_OP(rshl);
       
  4601                             break;
       
  4602                         case 4: /* VSRI */
       
  4603                             if (!u)
       
  4604                                 return 1;
       
  4605                             GEN_NEON_INTEGER_OP(shl);
       
  4606                             break;
       
  4607                         case 5: /* VSHL, VSLI */
       
  4608                             switch (size) {
       
  4609                             case 0: gen_helper_neon_shl_u8(CPU_T001); break;
       
  4610                             case 1: gen_helper_neon_shl_u16(CPU_T001); break;
       
  4611                             case 2: gen_helper_neon_shl_u32(CPU_T001); break;
       
  4612                             default: return 1;
       
  4613                             }
       
  4614                             break;
       
  4615                         case 6: /* VQSHL */
       
  4616                             GEN_NEON_INTEGER_OP_ENV(qshl);
       
  4617                             break;
       
  4618                         case 7: /* VQSHLU */
       
  4619                             switch (size) {
       
  4620                             case 0: gen_helper_neon_qshl_u8(CPU_T0E01); break;
       
  4621                             case 1: gen_helper_neon_qshl_u16(CPU_T0E01); break;
       
  4622                             case 2: gen_helper_neon_qshl_u32(CPU_T0E01); break;
       
  4623                             default: return 1;
       
  4624                             }
       
  4625                             break;
       
  4626                         }
       
  4627 
       
  4628                         if (op == 1 || op == 3) {
       
  4629                             /* Accumulate.  */
       
  4630                             NEON_GET_REG(T1, rd, pass);
       
  4631                             gen_neon_add(size);
       
  4632                         } else if (op == 4 || (op == 5 && u)) {
       
  4633                             /* Insert */
       
  4634                             switch (size) {
       
  4635                             case 0:
       
  4636                                 if (op == 4)
       
  4637                                     imm = 0xff >> -shift;
       
  4638                                 else
       
  4639                                     imm = (uint8_t)(0xff << shift);
       
  4640                                 imm |= imm << 8;
       
  4641                                 imm |= imm << 16;
       
  4642                                 break;
       
  4643                             case 1:
       
  4644                                 if (op == 4)
       
  4645                                     imm = 0xffff >> -shift;
       
  4646                                 else
       
  4647                                     imm = (uint16_t)(0xffff << shift);
       
  4648                                 imm |= imm << 16;
       
  4649                                 break;
       
  4650                             case 2:
       
  4651                                 if (op == 4)
       
  4652                                     imm = 0xffffffffu >> -shift;
       
  4653                                 else
       
  4654                                     imm = 0xffffffffu << shift;
       
  4655                                 break;
       
  4656                             default:
       
  4657                                 abort();
       
  4658                             }
       
  4659                             tmp = neon_load_reg(rd, pass);
       
  4660                             tcg_gen_andi_i32(cpu_T[0], cpu_T[0], imm);
       
  4661                             tcg_gen_andi_i32(tmp, tmp, ~imm);
       
  4662                             tcg_gen_or_i32(cpu_T[0], cpu_T[0], tmp);
       
  4663                         }
       
  4664                         NEON_SET_REG(T0, rd, pass);
       
  4665                     }
       
  4666                 } /* for pass */
       
  4667             } else if (op < 10) {
       
  4668                 /* Shift by immediate and narrow:
       
  4669                    VSHRN, VRSHRN, VQSHRN, VQRSHRN.  */
       
  4670                 shift = shift - (1 << (size + 3));
       
  4671                 size++;
       
  4672                 switch (size) {
       
  4673                 case 1:
       
  4674                     imm = (uint16_t)shift;
       
  4675                     imm |= imm << 16;
       
  4676                     tmp2 = tcg_const_i32(imm);
       
  4677                     TCGV_UNUSED_I64(tmp64);
       
  4678                     break;
       
  4679                 case 2:
       
  4680                     imm = (uint32_t)shift;
       
  4681                     tmp2 = tcg_const_i32(imm);
       
  4682                     TCGV_UNUSED_I64(tmp64);
       
  4683                     break;
       
  4684                 case 3:
       
  4685                     tmp64 = tcg_const_i64(shift);
       
  4686                     TCGV_UNUSED(tmp2);
       
  4687                     break;
       
  4688                 default:
       
  4689                     abort();
       
  4690                 }
       
  4691 
       
  4692                 for (pass = 0; pass < 2; pass++) {
       
  4693                     if (size == 3) {
       
  4694                         neon_load_reg64(cpu_V0, rm + pass);
       
  4695                         if (q) {
       
  4696                           if (u)
       
  4697                             gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp64);
       
  4698                           else
       
  4699                             gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp64);
       
  4700                         } else {
       
  4701                           if (u)
       
  4702                             gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp64);
       
  4703                           else
       
  4704                             gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp64);
       
  4705                         }
       
  4706                     } else {
       
  4707                         tmp = neon_load_reg(rm + pass, 0);
       
  4708                         gen_neon_shift_narrow(size, tmp, tmp2, q, u);
       
  4709                         tmp3 = neon_load_reg(rm + pass, 1);
       
  4710                         gen_neon_shift_narrow(size, tmp3, tmp2, q, u);
       
  4711                         tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
       
  4712                         dead_tmp(tmp);
       
  4713                         dead_tmp(tmp3);
       
  4714                     }
       
  4715                     tmp = new_tmp();
       
  4716                     if (op == 8 && !u) {
       
  4717                         gen_neon_narrow(size - 1, tmp, cpu_V0);
       
  4718                     } else {
       
  4719                         if (op == 8)
       
  4720                             gen_neon_narrow_sats(size - 1, tmp, cpu_V0);
       
  4721                         else
       
  4722                             gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
       
  4723                     }
       
  4724                     if (pass == 0) {
       
  4725                         tmp2 = tmp;
       
  4726                     } else {
       
  4727                         neon_store_reg(rd, 0, tmp2);
       
  4728                         neon_store_reg(rd, 1, tmp);
       
  4729                     }
       
  4730                 } /* for pass */
       
  4731             } else if (op == 10) {
       
  4732                 /* VSHLL */
       
  4733                 if (q || size == 3)
       
  4734                     return 1;
       
  4735                 tmp = neon_load_reg(rm, 0);
       
  4736                 tmp2 = neon_load_reg(rm, 1);
       
  4737                 for (pass = 0; pass < 2; pass++) {
       
  4738                     if (pass == 1)
       
  4739                         tmp = tmp2;
       
  4740 
       
  4741                     gen_neon_widen(cpu_V0, tmp, size, u);
       
  4742 
       
  4743                     if (shift != 0) {
       
  4744                         /* The shift is less than the width of the source
       
  4745                            type, so we can just shift the whole register.  */
       
  4746                         tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
       
  4747                         if (size < 2 || !u) {
       
  4748                             uint64_t imm64;
       
  4749                             if (size == 0) {
       
  4750                                 imm = (0xffu >> (8 - shift));
       
  4751                                 imm |= imm << 16;
       
  4752                             } else {
       
  4753                                 imm = 0xffff >> (16 - shift);
       
  4754                             }
       
  4755                             imm64 = imm | (((uint64_t)imm) << 32);
       
  4756                             tcg_gen_andi_i64(cpu_V0, cpu_V0, imm64);
       
  4757                         }
       
  4758                     }
       
  4759                     neon_store_reg64(cpu_V0, rd + pass);
       
  4760                 }
       
  4761             } else if (op == 15 || op == 16) {
       
  4762                 /* VCVT fixed-point.  */
       
  4763                 for (pass = 0; pass < (q ? 4 : 2); pass++) {
       
  4764                     tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
       
  4765                     if (op & 1) {
       
  4766                         if (u)
       
  4767                             gen_vfp_ulto(0, shift);
       
  4768                         else
       
  4769                             gen_vfp_slto(0, shift);
       
  4770                     } else {
       
  4771                         if (u)
       
  4772                             gen_vfp_toul(0, shift);
       
  4773                         else
       
  4774                             gen_vfp_tosl(0, shift);
       
  4775                     }
       
  4776                     tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
       
  4777                 }
       
  4778             } else {
       
  4779                 return 1;
       
  4780             }
       
  4781         } else { /* (insn & 0x00380080) == 0 */
       
  4782             int invert;
       
  4783 
       
  4784             op = (insn >> 8) & 0xf;
       
  4785             /* One register and immediate.  */
       
  4786             imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
       
  4787             invert = (insn & (1 << 5)) != 0;
       
  4788             switch (op) {
       
  4789             case 0: case 1:
       
  4790                 /* no-op */
       
  4791                 break;
       
  4792             case 2: case 3:
       
  4793                 imm <<= 8;
       
  4794                 break;
       
  4795             case 4: case 5:
       
  4796                 imm <<= 16;
       
  4797                 break;
       
  4798             case 6: case 7:
       
  4799                 imm <<= 24;
       
  4800                 break;
       
  4801             case 8: case 9:
       
  4802                 imm |= imm << 16;
       
  4803                 break;
       
  4804             case 10: case 11:
       
  4805                 imm = (imm << 8) | (imm << 24);
       
  4806                 break;
       
  4807             case 12:
       
  4808                 imm = (imm < 8) | 0xff;
       
  4809                 break;
       
  4810             case 13:
       
  4811                 imm = (imm << 16) | 0xffff;
       
  4812                 break;
       
  4813             case 14:
       
  4814                 imm |= (imm << 8) | (imm << 16) | (imm << 24);
       
  4815                 if (invert)
       
  4816                     imm = ~imm;
       
  4817                 break;
       
  4818             case 15:
       
  4819                 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
       
  4820                       | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
       
  4821                 break;
       
  4822             }
       
  4823             if (invert)
       
  4824                 imm = ~imm;
       
  4825 
       
  4826             if (op != 14 || !invert)
       
  4827                 gen_op_movl_T1_im(imm);
       
  4828 
       
  4829             for (pass = 0; pass < (q ? 4 : 2); pass++) {
       
  4830                 if (op & 1 && op < 12) {
       
  4831                     tmp = neon_load_reg(rd, pass);
       
  4832                     if (invert) {
       
  4833                         /* The immediate value has already been inverted, so
       
  4834                            BIC becomes AND.  */
       
  4835                         tcg_gen_andi_i32(tmp, tmp, imm);
       
  4836                     } else {
       
  4837                         tcg_gen_ori_i32(tmp, tmp, imm);
       
  4838                     }
       
  4839                 } else {
       
  4840                     /* VMOV, VMVN.  */
       
  4841                     tmp = new_tmp();
       
  4842                     if (op == 14 && invert) {
       
  4843                         uint32_t val;
       
  4844                         val = 0;
       
  4845                         for (n = 0; n < 4; n++) {
       
  4846                             if (imm & (1 << (n + (pass & 1) * 4)))
       
  4847                                 val |= 0xff << (n * 8);
       
  4848                         }
       
  4849                         tcg_gen_movi_i32(tmp, val);
       
  4850                     } else {
       
  4851                         tcg_gen_movi_i32(tmp, imm);
       
  4852                     }
       
  4853                 }
       
  4854                 neon_store_reg(rd, pass, tmp);
       
  4855             }
       
  4856         }
       
  4857     } else { /* (insn & 0x00800010 == 0x00800000) */
       
  4858         if (size != 3) {
       
  4859             op = (insn >> 8) & 0xf;
       
  4860             if ((insn & (1 << 6)) == 0) {
       
  4861                 /* Three registers of different lengths.  */
       
  4862                 int src1_wide;
       
  4863                 int src2_wide;
       
  4864                 int prewiden;
       
  4865                 /* prewiden, src1_wide, src2_wide */
       
  4866                 static const int neon_3reg_wide[16][3] = {
       
  4867                     {1, 0, 0}, /* VADDL */
       
  4868                     {1, 1, 0}, /* VADDW */
       
  4869                     {1, 0, 0}, /* VSUBL */
       
  4870                     {1, 1, 0}, /* VSUBW */
       
  4871                     {0, 1, 1}, /* VADDHN */
       
  4872                     {0, 0, 0}, /* VABAL */
       
  4873                     {0, 1, 1}, /* VSUBHN */
       
  4874                     {0, 0, 0}, /* VABDL */
       
  4875                     {0, 0, 0}, /* VMLAL */
       
  4876                     {0, 0, 0}, /* VQDMLAL */
       
  4877                     {0, 0, 0}, /* VMLSL */
       
  4878                     {0, 0, 0}, /* VQDMLSL */
       
  4879                     {0, 0, 0}, /* Integer VMULL */
       
  4880                     {0, 0, 0}, /* VQDMULL */
       
  4881                     {0, 0, 0}  /* Polynomial VMULL */
       
  4882                 };
       
  4883 
       
  4884                 prewiden = neon_3reg_wide[op][0];
       
  4885                 src1_wide = neon_3reg_wide[op][1];
       
  4886                 src2_wide = neon_3reg_wide[op][2];
       
  4887 
       
  4888                 if (size == 0 && (op == 9 || op == 11 || op == 13))
       
  4889                     return 1;
       
  4890 
       
  4891                 /* Avoid overlapping operands.  Wide source operands are
       
  4892                    always aligned so will never overlap with wide
       
  4893                    destinations in problematic ways.  */
       
  4894                 if (rd == rm && !src2_wide) {
       
  4895                     NEON_GET_REG(T0, rm, 1);
       
  4896                     gen_neon_movl_scratch_T0(2);
       
  4897                 } else if (rd == rn && !src1_wide) {
       
  4898                     NEON_GET_REG(T0, rn, 1);
       
  4899                     gen_neon_movl_scratch_T0(2);
       
  4900                 }
       
  4901                 TCGV_UNUSED(tmp3);
       
  4902                 for (pass = 0; pass < 2; pass++) {
       
  4903                     if (src1_wide) {
       
  4904                         neon_load_reg64(cpu_V0, rn + pass);
       
  4905                         TCGV_UNUSED(tmp);
       
  4906                     } else {
       
  4907                         if (pass == 1 && rd == rn) {
       
  4908                             gen_neon_movl_T0_scratch(2);
       
  4909                             tmp = new_tmp();
       
  4910                             tcg_gen_mov_i32(tmp, cpu_T[0]);
       
  4911                         } else {
       
  4912                             tmp = neon_load_reg(rn, pass);
       
  4913                         }
       
  4914                         if (prewiden) {
       
  4915                             gen_neon_widen(cpu_V0, tmp, size, u);
       
  4916                         }
       
  4917                     }
       
  4918                     if (src2_wide) {
       
  4919                         neon_load_reg64(cpu_V1, rm + pass);
       
  4920                         TCGV_UNUSED(tmp2);
       
  4921                     } else {
       
  4922                         if (pass == 1 && rd == rm) {
       
  4923                             gen_neon_movl_T0_scratch(2);
       
  4924                             tmp2 = new_tmp();
       
  4925                             tcg_gen_mov_i32(tmp2, cpu_T[0]);
       
  4926                         } else {
       
  4927                             tmp2 = neon_load_reg(rm, pass);
       
  4928                         }
       
  4929                         if (prewiden) {
       
  4930                             gen_neon_widen(cpu_V1, tmp2, size, u);
       
  4931                         }
       
  4932                     }
       
  4933                     switch (op) {
       
  4934                     case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
       
  4935                         gen_neon_addl(size);
       
  4936                         break;
       
  4937                     case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHL, VRSUBHL */
       
  4938                         gen_neon_subl(size);
       
  4939                         break;
       
  4940                     case 5: case 7: /* VABAL, VABDL */
       
  4941                         switch ((size << 1) | u) {
       
  4942                         case 0:
       
  4943                             gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
       
  4944                             break;
       
  4945                         case 1:
       
  4946                             gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
       
  4947                             break;
       
  4948                         case 2:
       
  4949                             gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
       
  4950                             break;
       
  4951                         case 3:
       
  4952                             gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
       
  4953                             break;
       
  4954                         case 4:
       
  4955                             gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
       
  4956                             break;
       
  4957                         case 5:
       
  4958                             gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
       
  4959                             break;
       
  4960                         default: abort();
       
  4961                         }
       
  4962                         dead_tmp(tmp2);
       
  4963                         dead_tmp(tmp);
       
  4964                         break;
       
  4965                     case 8: case 9: case 10: case 11: case 12: case 13:
       
  4966                         /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
       
  4967                         gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
       
  4968                         break;
       
  4969                     case 14: /* Polynomial VMULL */
       
  4970                         cpu_abort(env, "Polynomial VMULL not implemented");
       
  4971 
       
  4972                     default: /* 15 is RESERVED.  */
       
  4973                         return 1;
       
  4974                     }
       
  4975                     if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
       
  4976                         /* Accumulate.  */
       
  4977                         if (op == 10 || op == 11) {
       
  4978                             gen_neon_negl(cpu_V0, size);
       
  4979                         }
       
  4980 
       
  4981                         if (op != 13) {
       
  4982                             neon_load_reg64(cpu_V1, rd + pass);
       
  4983                         }
       
  4984 
       
  4985                         switch (op) {
       
  4986                         case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
       
  4987                             gen_neon_addl(size);
       
  4988                             break;
       
  4989                         case 9: case 11: /* VQDMLAL, VQDMLSL */
       
  4990                             gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
       
  4991                             gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
       
  4992                             break;
       
  4993                             /* Fall through.  */
       
  4994                         case 13: /* VQDMULL */
       
  4995                             gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
       
  4996                             break;
       
  4997                         default:
       
  4998                             abort();
       
  4999                         }
       
  5000                         neon_store_reg64(cpu_V0, rd + pass);
       
  5001                     } else if (op == 4 || op == 6) {
       
  5002                         /* Narrowing operation.  */
       
  5003                         tmp = new_tmp();
       
  5004                         if (u) {
       
  5005                             switch (size) {
       
  5006                             case 0:
       
  5007                                 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
       
  5008                                 break;
       
  5009                             case 1:
       
  5010                                 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
       
  5011                                 break;
       
  5012                             case 2:
       
  5013                                 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
       
  5014                                 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
       
  5015                                 break;
       
  5016                             default: abort();
       
  5017                             }
       
  5018                         } else {
       
  5019                             switch (size) {
       
  5020                             case 0:
       
  5021                                 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
       
  5022                                 break;
       
  5023                             case 1:
       
  5024                                 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
       
  5025                                 break;
       
  5026                             case 2:
       
  5027                                 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
       
  5028                                 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
       
  5029                                 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
       
  5030                                 break;
       
  5031                             default: abort();
       
  5032                             }
       
  5033                         }
       
  5034                         if (pass == 0) {
       
  5035                             tmp3 = tmp;
       
  5036                         } else {
       
  5037                             neon_store_reg(rd, 0, tmp3);
       
  5038                             neon_store_reg(rd, 1, tmp);
       
  5039                         }
       
  5040                     } else {
       
  5041                         /* Write back the result.  */
       
  5042                         neon_store_reg64(cpu_V0, rd + pass);
       
  5043                     }
       
  5044                 }
       
  5045             } else {
       
  5046                 /* Two registers and a scalar.  */
       
  5047                 switch (op) {
       
  5048                 case 0: /* Integer VMLA scalar */
       
  5049                 case 1: /* Float VMLA scalar */
       
  5050                 case 4: /* Integer VMLS scalar */
       
  5051                 case 5: /* Floating point VMLS scalar */
       
  5052                 case 8: /* Integer VMUL scalar */
       
  5053                 case 9: /* Floating point VMUL scalar */
       
  5054                 case 12: /* VQDMULH scalar */
       
  5055                 case 13: /* VQRDMULH scalar */
       
  5056                     gen_neon_get_scalar(size, rm);
       
  5057                     gen_neon_movl_scratch_T0(0);
       
  5058                     for (pass = 0; pass < (u ? 4 : 2); pass++) {
       
  5059                         if (pass != 0)
       
  5060                             gen_neon_movl_T0_scratch(0);
       
  5061                         NEON_GET_REG(T1, rn, pass);
       
  5062                         if (op == 12) {
       
  5063                             if (size == 1) {
       
  5064                                 gen_helper_neon_qdmulh_s16(CPU_T0E01);
       
  5065                             } else {
       
  5066                                 gen_helper_neon_qdmulh_s32(CPU_T0E01);
       
  5067                             }
       
  5068                         } else if (op == 13) {
       
  5069                             if (size == 1) {
       
  5070                                 gen_helper_neon_qrdmulh_s16(CPU_T0E01);
       
  5071                             } else {
       
  5072                                 gen_helper_neon_qrdmulh_s32(CPU_T0E01);
       
  5073                             }
       
  5074                         } else if (op & 1) {
       
  5075                             gen_helper_neon_mul_f32(CPU_T001);
       
  5076                         } else {
       
  5077                             switch (size) {
       
  5078                             case 0: gen_helper_neon_mul_u8(CPU_T001); break;
       
  5079                             case 1: gen_helper_neon_mul_u16(CPU_T001); break;
       
  5080                             case 2: gen_op_mul_T0_T1(); break;
       
  5081                             default: return 1;
       
  5082                             }
       
  5083                         }
       
  5084                         if (op < 8) {
       
  5085                             /* Accumulate.  */
       
  5086                             NEON_GET_REG(T1, rd, pass);
       
  5087                             switch (op) {
       
  5088                             case 0:
       
  5089                                 gen_neon_add(size);
       
  5090                                 break;
       
  5091                             case 1:
       
  5092                                 gen_helper_neon_add_f32(CPU_T001);
       
  5093                                 break;
       
  5094                             case 4:
       
  5095                                 gen_neon_rsb(size);
       
  5096                                 break;
       
  5097                             case 5:
       
  5098                                 gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
       
  5099                                 break;
       
  5100                             default:
       
  5101                                 abort();
       
  5102                             }
       
  5103                         }
       
  5104                         NEON_SET_REG(T0, rd, pass);
       
  5105                     }
       
  5106                     break;
       
  5107                 case 2: /* VMLAL sclar */
       
  5108                 case 3: /* VQDMLAL scalar */
       
  5109                 case 6: /* VMLSL scalar */
       
  5110                 case 7: /* VQDMLSL scalar */
       
  5111                 case 10: /* VMULL scalar */
       
  5112                 case 11: /* VQDMULL scalar */
       
  5113                     if (size == 0 && (op == 3 || op == 7 || op == 11))
       
  5114                         return 1;
       
  5115 
       
  5116                     gen_neon_get_scalar(size, rm);
       
  5117                     NEON_GET_REG(T1, rn, 1);
       
  5118 
       
  5119                     for (pass = 0; pass < 2; pass++) {
       
  5120                         if (pass == 0) {
       
  5121                             tmp = neon_load_reg(rn, 0);
       
  5122                         } else {
       
  5123                             tmp = new_tmp();
       
  5124                             tcg_gen_mov_i32(tmp, cpu_T[1]);
       
  5125                         }
       
  5126                         tmp2 = new_tmp();
       
  5127                         tcg_gen_mov_i32(tmp2, cpu_T[0]);
       
  5128                         gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
       
  5129                         if (op == 6 || op == 7) {
       
  5130                             gen_neon_negl(cpu_V0, size);
       
  5131                         }
       
  5132                         if (op != 11) {
       
  5133                             neon_load_reg64(cpu_V1, rd + pass);
       
  5134                         }
       
  5135                         switch (op) {
       
  5136                         case 2: case 6:
       
  5137                             gen_neon_addl(size);
       
  5138                             break;
       
  5139                         case 3: case 7:
       
  5140                             gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
       
  5141                             gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
       
  5142                             break;
       
  5143                         case 10:
       
  5144                             /* no-op */
       
  5145                             break;
       
  5146                         case 11:
       
  5147                             gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
       
  5148                             break;
       
  5149                         default:
       
  5150                             abort();
       
  5151                         }
       
  5152                         neon_store_reg64(cpu_V0, rd + pass);
       
  5153                     }
       
  5154                     break;
       
  5155                 default: /* 14 and 15 are RESERVED */
       
  5156                     return 1;
       
  5157                 }
       
  5158             }
       
  5159         } else { /* size == 3 */
       
  5160             if (!u) {
       
  5161                 /* Extract.  */
       
  5162                 imm = (insn >> 8) & 0xf;
       
  5163                 count = q + 1;
       
  5164 
       
  5165                 if (imm > 7 && !q)
       
  5166                     return 1;
       
  5167 
       
  5168                 if (imm == 0) {
       
  5169                     neon_load_reg64(cpu_V0, rn);
       
  5170                     if (q) {
       
  5171                         neon_load_reg64(cpu_V1, rn + 1);
       
  5172                     }
       
  5173                 } else if (imm == 8) {
       
  5174                     neon_load_reg64(cpu_V0, rn + 1);
       
  5175                     if (q) {
       
  5176                         neon_load_reg64(cpu_V1, rm);
       
  5177                     }
       
  5178                 } else if (q) {
       
  5179                     tmp64 = tcg_temp_new_i64();
       
  5180                     if (imm < 8) {
       
  5181                         neon_load_reg64(cpu_V0, rn);
       
  5182                         neon_load_reg64(tmp64, rn + 1);
       
  5183                     } else {
       
  5184                         neon_load_reg64(cpu_V0, rn + 1);
       
  5185                         neon_load_reg64(tmp64, rm);
       
  5186                     }
       
  5187                     tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
       
  5188                     tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
       
  5189                     tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
       
  5190                     if (imm < 8) {
       
  5191                         neon_load_reg64(cpu_V1, rm);
       
  5192                     } else {
       
  5193                         neon_load_reg64(cpu_V1, rm + 1);
       
  5194                         imm -= 8;
       
  5195                     }
       
  5196                     tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
       
  5197                     tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
       
  5198                     tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
       
  5199                 } else {
       
  5200                     /* BUGFIX */
       
  5201                     neon_load_reg64(cpu_V0, rn);
       
  5202                     tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
       
  5203                     neon_load_reg64(cpu_V1, rm);
       
  5204                     tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
       
  5205                     tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
       
  5206                 }
       
  5207                 neon_store_reg64(cpu_V0, rd);
       
  5208                 if (q) {
       
  5209                     neon_store_reg64(cpu_V1, rd + 1);
       
  5210                 }
       
  5211             } else if ((insn & (1 << 11)) == 0) {
       
  5212                 /* Two register misc.  */
       
  5213                 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
       
  5214                 size = (insn >> 18) & 3;
       
  5215                 switch (op) {
       
  5216                 case 0: /* VREV64 */
       
  5217                     if (size == 3)
       
  5218                         return 1;
       
  5219                     for (pass = 0; pass < (q ? 2 : 1); pass++) {
       
  5220                         NEON_GET_REG(T0, rm, pass * 2);
       
  5221                         NEON_GET_REG(T1, rm, pass * 2 + 1);
       
  5222                         switch (size) {
       
  5223                         case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
       
  5224                         case 1: gen_swap_half(cpu_T[0]); break;
       
  5225                         case 2: /* no-op */ break;
       
  5226                         default: abort();
       
  5227                         }
       
  5228                         NEON_SET_REG(T0, rd, pass * 2 + 1);
       
  5229                         if (size == 2) {
       
  5230                             NEON_SET_REG(T1, rd, pass * 2);
       
  5231                         } else {
       
  5232                             gen_op_movl_T0_T1();
       
  5233                             switch (size) {
       
  5234                             case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
       
  5235                             case 1: gen_swap_half(cpu_T[0]); break;
       
  5236                             default: abort();
       
  5237                             }
       
  5238                             NEON_SET_REG(T0, rd, pass * 2);
       
  5239                         }
       
  5240                     }
       
  5241                     break;
       
  5242                 case 4: case 5: /* VPADDL */
       
  5243                 case 12: case 13: /* VPADAL */
       
  5244                     if (size == 3)
       
  5245                         return 1;
       
  5246                     for (pass = 0; pass < q + 1; pass++) {
       
  5247                         tmp = neon_load_reg(rm, pass * 2);
       
  5248                         gen_neon_widen(cpu_V0, tmp, size, op & 1);
       
  5249                         tmp = neon_load_reg(rm, pass * 2 + 1);
       
  5250                         gen_neon_widen(cpu_V1, tmp, size, op & 1);
       
  5251                         switch (size) {
       
  5252                         case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
       
  5253                         case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
       
  5254                         case 2: tcg_gen_add_i64(CPU_V001); break;
       
  5255                         default: abort();
       
  5256                         }
       
  5257                         if (op >= 12) {
       
  5258                             /* Accumulate.  */
       
  5259                             neon_load_reg64(cpu_V1, rd + pass);
       
  5260                             gen_neon_addl(size);
       
  5261                         }
       
  5262                         neon_store_reg64(cpu_V0, rd + pass);
       
  5263                     }
       
  5264                     break;
       
  5265                 case 33: /* VTRN */
       
  5266                     if (size == 2) {
       
  5267                         for (n = 0; n < (q ? 4 : 2); n += 2) {
       
  5268                             NEON_GET_REG(T0, rm, n);
       
  5269                             NEON_GET_REG(T1, rd, n + 1);
       
  5270                             NEON_SET_REG(T1, rm, n);
       
  5271                             NEON_SET_REG(T0, rd, n + 1);
       
  5272                         }
       
  5273                     } else {
       
  5274                         goto elementwise;
       
  5275                     }
       
  5276                     break;
       
  5277                 case 34: /* VUZP */
       
  5278                     /* Reg  Before       After
       
  5279                        Rd   A3 A2 A1 A0  B2 B0 A2 A0
       
  5280                        Rm   B3 B2 B1 B0  B3 B1 A3 A1
       
  5281                      */
       
  5282                     if (size == 3)
       
  5283                         return 1;
       
  5284                     gen_neon_unzip(rd, q, 0, size);
       
  5285                     gen_neon_unzip(rm, q, 4, size);
       
  5286                     if (q) {
       
  5287                         static int unzip_order_q[8] =
       
  5288                             {0, 2, 4, 6, 1, 3, 5, 7};
       
  5289                         for (n = 0; n < 8; n++) {
       
  5290                             int reg = (n < 4) ? rd : rm;
       
  5291                             gen_neon_movl_T0_scratch(unzip_order_q[n]);
       
  5292                             NEON_SET_REG(T0, reg, n % 4);
       
  5293                         }
       
  5294                     } else {
       
  5295                         static int unzip_order[4] =
       
  5296                             {0, 4, 1, 5};
       
  5297                         for (n = 0; n < 4; n++) {
       
  5298                             int reg = (n < 2) ? rd : rm;
       
  5299                             gen_neon_movl_T0_scratch(unzip_order[n]);
       
  5300                             NEON_SET_REG(T0, reg, n % 2);
       
  5301                         }
       
  5302                     }
       
  5303                     break;
       
  5304                 case 35: /* VZIP */
       
  5305                     /* Reg  Before       After
       
  5306                        Rd   A3 A2 A1 A0  B1 A1 B0 A0
       
  5307                        Rm   B3 B2 B1 B0  B3 A3 B2 A2
       
  5308                      */
       
  5309                     if (size == 3)
       
  5310                         return 1;
       
  5311                     count = (q ? 4 : 2);
       
  5312                     for (n = 0; n < count; n++) {
       
  5313                         NEON_GET_REG(T0, rd, n);
       
  5314                         NEON_GET_REG(T1, rd, n);
       
  5315                         switch (size) {
       
  5316                         case 0: gen_helper_neon_zip_u8(); break;
       
  5317                         case 1: gen_helper_neon_zip_u16(); break;
       
  5318                         case 2: /* no-op */; break;
       
  5319                         default: abort();
       
  5320                         }
       
  5321                         gen_neon_movl_scratch_T0(n * 2);
       
  5322                         gen_neon_movl_scratch_T1(n * 2 + 1);
       
  5323                     }
       
  5324                     for (n = 0; n < count * 2; n++) {
       
  5325                         int reg = (n < count) ? rd : rm;
       
  5326                         gen_neon_movl_T0_scratch(n);
       
  5327                         NEON_SET_REG(T0, reg, n % count);
       
  5328                     }
       
  5329                     break;
       
  5330                 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
       
  5331                     if (size == 3)
       
  5332                         return 1;
       
  5333                     TCGV_UNUSED(tmp2);
       
  5334                     for (pass = 0; pass < 2; pass++) {
       
  5335                         neon_load_reg64(cpu_V0, rm + pass);
       
  5336                         tmp = new_tmp();
       
  5337                         if (op == 36 && q == 0) {
       
  5338                             gen_neon_narrow(size, tmp, cpu_V0);
       
  5339                         } else if (q) {
       
  5340                             gen_neon_narrow_satu(size, tmp, cpu_V0);
       
  5341                         } else {
       
  5342                             gen_neon_narrow_sats(size, tmp, cpu_V0);
       
  5343                         }
       
  5344                         if (pass == 0) {
       
  5345                             tmp2 = tmp;
       
  5346                         } else {
       
  5347                             neon_store_reg(rd, 0, tmp2);
       
  5348                             neon_store_reg(rd, 1, tmp);
       
  5349                         }
       
  5350                     }
       
  5351                     break;
       
  5352                 case 38: /* VSHLL */
       
  5353                     if (q || size == 3)
       
  5354                         return 1;
       
  5355                     tmp = neon_load_reg(rm, 0);
       
  5356                     tmp2 = neon_load_reg(rm, 1);
       
  5357                     for (pass = 0; pass < 2; pass++) {
       
  5358                         if (pass == 1)
       
  5359                             tmp = tmp2;
       
  5360                         gen_neon_widen(cpu_V0, tmp, size, 1);
       
  5361                         neon_store_reg64(cpu_V0, rd + pass);
       
  5362                     }
       
  5363                     break;
       
  5364                 case 44: /* VCVT.F16.F32 */
       
  5365                     tmp = new_tmp();
       
  5366                     tmp2 = new_tmp();
       
  5367                     tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
       
  5368                     gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
       
  5369                     tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
       
  5370                     gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
       
  5371                     tcg_gen_shli_i32(tmp2, tmp2, 16);
       
  5372                     tcg_gen_or_i32(tmp2, tmp2, tmp);
       
  5373                     tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
       
  5374                     gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
       
  5375                     tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
       
  5376                     neon_store_reg(rd, 0, tmp2);
       
  5377                     tmp2 = new_tmp();
       
  5378                     gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
       
  5379                     tcg_gen_shli_i32(tmp2, tmp2, 16);
       
  5380                     tcg_gen_or_i32(tmp2, tmp2, tmp);
       
  5381                     neon_store_reg(rd, 1, tmp2);
       
  5382                     dead_tmp(tmp);
       
  5383                     break;
       
  5384                 case 46: /* VCVT.F32.F16 */
       
  5385                     tmp3 = new_tmp();
       
  5386                     tmp = neon_load_reg(rm, 0);
       
  5387                     tmp2 = neon_load_reg(rm, 1);
       
  5388                     tcg_gen_ext16u_i32(tmp3, tmp);
       
  5389                     gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
       
  5390                     tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
       
  5391                     tcg_gen_shri_i32(tmp3, tmp, 16);
       
  5392                     gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
       
  5393                     tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
       
  5394                     dead_tmp(tmp);
       
  5395                     tcg_gen_ext16u_i32(tmp3, tmp2);
       
  5396                     gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
       
  5397                     tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
       
  5398                     tcg_gen_shri_i32(tmp3, tmp2, 16);
       
  5399                     gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
       
  5400                     tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
       
  5401                     dead_tmp(tmp2);
       
  5402                     dead_tmp(tmp3);
       
  5403                     break;
       
  5404                 default:
       
  5405                 elementwise:
       
  5406                     for (pass = 0; pass < (q ? 4 : 2); pass++) {
       
  5407                         if (op == 30 || op == 31 || op >= 58) {
       
  5408                             tcg_gen_ld_f32(cpu_F0s, cpu_env,
       
  5409                                            neon_reg_offset(rm, pass));
       
  5410                         } else {
       
  5411                             NEON_GET_REG(T0, rm, pass);
       
  5412                         }
       
  5413                         switch (op) {
       
  5414                         case 1: /* VREV32 */
       
  5415                             switch (size) {
       
  5416                             case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
       
  5417                             case 1: gen_swap_half(cpu_T[0]); break;
       
  5418                             default: return 1;
       
  5419                             }
       
  5420                             break;
       
  5421                         case 2: /* VREV16 */
       
  5422                             if (size != 0)
       
  5423                                 return 1;
       
  5424                             gen_rev16(cpu_T[0]);
       
  5425                             break;
       
  5426                         case 8: /* CLS */
       
  5427                             switch (size) {
       
  5428                             case 0: gen_helper_neon_cls_s8(cpu_T[0], cpu_T[0]); break;
       
  5429                             case 1: gen_helper_neon_cls_s16(cpu_T[0], cpu_T[0]); break;
       
  5430                             case 2: gen_helper_neon_cls_s32(cpu_T[0], cpu_T[0]); break;
       
  5431                             default: return 1;
       
  5432                             }
       
  5433                             break;
       
  5434                         case 9: /* CLZ */
       
  5435                             switch (size) {
       
  5436                             case 0: gen_helper_neon_clz_u8(cpu_T[0], cpu_T[0]); break;
       
  5437                             case 1: gen_helper_neon_clz_u16(cpu_T[0], cpu_T[0]); break;
       
  5438                             case 2: gen_helper_clz(cpu_T[0], cpu_T[0]); break;
       
  5439                             default: return 1;
       
  5440                             }
       
  5441                             break;
       
  5442                         case 10: /* CNT */
       
  5443                             if (size != 0)
       
  5444                                 return 1;
       
  5445                             gen_helper_neon_cnt_u8(cpu_T[0], cpu_T[0]);
       
  5446                             break;
       
  5447                         case 11: /* VNOT */
       
  5448                             if (size != 0)
       
  5449                                 return 1;
       
  5450                             gen_op_notl_T0();
       
  5451                             break;
       
  5452                         case 14: /* VQABS */
       
  5453                             switch (size) {
       
  5454                             case 0: gen_helper_neon_qabs_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
       
  5455                             case 1: gen_helper_neon_qabs_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
       
  5456                             case 2: gen_helper_neon_qabs_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
       
  5457                             default: return 1;
       
  5458                             }
       
  5459                             break;
       
  5460                         case 15: /* VQNEG */
       
  5461                             switch (size) {
       
  5462                             case 0: gen_helper_neon_qneg_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
       
  5463                             case 1: gen_helper_neon_qneg_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
       
  5464                             case 2: gen_helper_neon_qneg_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
       
  5465                             default: return 1;
       
  5466                             }
       
  5467                             break;
       
  5468                         case 16: case 19: /* VCGT #0, VCLE #0 */
       
  5469                             gen_op_movl_T1_im(0);
       
  5470                             switch(size) {
       
  5471                             case 0: gen_helper_neon_cgt_s8(CPU_T001); break;
       
  5472                             case 1: gen_helper_neon_cgt_s16(CPU_T001); break;
       
  5473                             case 2: gen_helper_neon_cgt_s32(CPU_T001); break;
       
  5474                             default: return 1;
       
  5475                             }
       
  5476                             if (op == 19)
       
  5477                                 gen_op_notl_T0();
       
  5478                             break;
       
  5479                         case 17: case 20: /* VCGE #0, VCLT #0 */
       
  5480                             gen_op_movl_T1_im(0);
       
  5481                             switch(size) {
       
  5482                             case 0: gen_helper_neon_cge_s8(CPU_T001); break;
       
  5483                             case 1: gen_helper_neon_cge_s16(CPU_T001); break;
       
  5484                             case 2: gen_helper_neon_cge_s32(CPU_T001); break;
       
  5485                             default: return 1;
       
  5486                             }
       
  5487                             if (op == 20)
       
  5488                                 gen_op_notl_T0();
       
  5489                             break;
       
  5490                         case 18: /* VCEQ #0 */
       
  5491                             gen_op_movl_T1_im(0);
       
  5492                             switch(size) {
       
  5493                             case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
       
  5494                             case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
       
  5495                             case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
       
  5496                             default: return 1;
       
  5497                             }
       
  5498                             break;
       
  5499                         case 22: /* VABS */
       
  5500                             switch(size) {
       
  5501                             case 0: gen_helper_neon_abs_s8(cpu_T[0], cpu_T[0]); break;
       
  5502                             case 1: gen_helper_neon_abs_s16(cpu_T[0], cpu_T[0]); break;
       
  5503                             case 2: tcg_gen_abs_i32(cpu_T[0], cpu_T[0]); break;
       
  5504                             default: return 1;
       
  5505                             }
       
  5506                             break;
       
  5507                         case 23: /* VNEG */
       
  5508                             gen_op_movl_T1_im(0);
       
  5509                             if (size == 3)
       
  5510                                 return 1;
       
  5511                             gen_neon_rsb(size);
       
  5512                             break;
       
  5513                         case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
       
  5514                             gen_op_movl_T1_im(0);
       
  5515                             gen_helper_neon_cgt_f32(CPU_T001);
       
  5516                             if (op == 27)
       
  5517                                 gen_op_notl_T0();
       
  5518                             break;
       
  5519                         case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
       
  5520                             gen_op_movl_T1_im(0);
       
  5521                             gen_helper_neon_cge_f32(CPU_T001);
       
  5522                             if (op == 28)
       
  5523                                 gen_op_notl_T0();
       
  5524                             break;
       
  5525                         case 26: /* Float VCEQ #0 */
       
  5526                             gen_op_movl_T1_im(0);
       
  5527                             gen_helper_neon_ceq_f32(CPU_T001);
       
  5528                             break;
       
  5529                         case 30: /* Float VABS */
       
  5530                             gen_vfp_abs(0);
       
  5531                             break;
       
  5532                         case 31: /* Float VNEG */
       
  5533                             gen_vfp_neg(0);
       
  5534                             break;
       
  5535                         case 32: /* VSWP */
       
  5536                             NEON_GET_REG(T1, rd, pass);
       
  5537                             NEON_SET_REG(T1, rm, pass);
       
  5538                             break;
       
  5539                         case 33: /* VTRN */
       
  5540                             NEON_GET_REG(T1, rd, pass);
       
  5541                             switch (size) {
       
  5542                             case 0: gen_helper_neon_trn_u8(); break;
       
  5543                             case 1: gen_helper_neon_trn_u16(); break;
       
  5544                             case 2: abort();
       
  5545                             default: return 1;
       
  5546                             }
       
  5547                             NEON_SET_REG(T1, rm, pass);
       
  5548                             break;
       
  5549                         case 56: /* Integer VRECPE */
       
  5550                             gen_helper_recpe_u32(cpu_T[0], cpu_T[0], cpu_env);
       
  5551                             break;
       
  5552                         case 57: /* Integer VRSQRTE */
       
  5553                             gen_helper_rsqrte_u32(cpu_T[0], cpu_T[0], cpu_env);
       
  5554                             break;
       
  5555                         case 58: /* Float VRECPE */
       
  5556                             gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
       
  5557                             break;
       
  5558                         case 59: /* Float VRSQRTE */
       
  5559                             gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
       
  5560                             break;
       
  5561                         case 60: /* VCVT.F32.S32 */
       
  5562                             gen_vfp_tosiz(0);
       
  5563                             break;
       
  5564                         case 61: /* VCVT.F32.U32 */
       
  5565                             gen_vfp_touiz(0);
       
  5566                             break;
       
  5567                         case 62: /* VCVT.S32.F32 */
       
  5568                             gen_vfp_sito(0);
       
  5569                             break;
       
  5570                         case 63: /* VCVT.U32.F32 */
       
  5571                             gen_vfp_uito(0);
       
  5572                             break;
       
  5573                         default:
       
  5574                             /* Reserved: 21, 29, 39-56 */
       
  5575                             return 1;
       
  5576                         }
       
  5577                         if (op == 30 || op == 31 || op >= 58) {
       
  5578                             tcg_gen_st_f32(cpu_F0s, cpu_env,
       
  5579                                            neon_reg_offset(rd, pass));
       
  5580                         } else {
       
  5581                             NEON_SET_REG(T0, rd, pass);
       
  5582                         }
       
  5583                     }
       
  5584                     break;
       
  5585                 }
       
  5586             } else if ((insn & (1 << 10)) == 0) {
       
  5587                 /* VTBL, VTBX.  */
       
  5588                 n = ((insn >> 5) & 0x18) + 8;
       
  5589                 if (insn & (1 << 6)) {
       
  5590                     tmp = neon_load_reg(rd, 0);
       
  5591                 } else {
       
  5592                     tmp = new_tmp();
       
  5593                     tcg_gen_movi_i32(tmp, 0);
       
  5594                 }
       
  5595                 tmp2 = neon_load_reg(rm, 0);
       
  5596                 gen_helper_neon_tbl(tmp2, tmp2, tmp, tcg_const_i32(rn),
       
  5597                                     tcg_const_i32(n));
       
  5598                 dead_tmp(tmp);
       
  5599                 if (insn & (1 << 6)) {
       
  5600                     tmp = neon_load_reg(rd, 1);
       
  5601                 } else {
       
  5602                     tmp = new_tmp();
       
  5603                     tcg_gen_movi_i32(tmp, 0);
       
  5604                 }
       
  5605                 tmp3 = neon_load_reg(rm, 1);
       
  5606                 gen_helper_neon_tbl(tmp3, tmp3, tmp, tcg_const_i32(rn),
       
  5607                                     tcg_const_i32(n));
       
  5608                 neon_store_reg(rd, 0, tmp2);
       
  5609                 neon_store_reg(rd, 1, tmp3);
       
  5610                 dead_tmp(tmp);
       
  5611             } else if ((insn & 0x380) == 0) {
       
  5612                 /* VDUP */
       
  5613                 if (insn & (1 << 19)) {
       
  5614                     NEON_SET_REG(T0, rm, 1);
       
  5615                 } else {
       
  5616                     NEON_SET_REG(T0, rm, 0);
       
  5617                 }
       
  5618                 if (insn & (1 << 16)) {
       
  5619                     gen_neon_dup_u8(cpu_T[0], ((insn >> 17) & 3) * 8);
       
  5620                 } else if (insn & (1 << 17)) {
       
  5621                     if ((insn >> 18) & 1)
       
  5622                         gen_neon_dup_high16(cpu_T[0]);
       
  5623                     else
       
  5624                         gen_neon_dup_low16(cpu_T[0]);
       
  5625                 }
       
  5626                 for (pass = 0; pass < (q ? 4 : 2); pass++) {
       
  5627                     NEON_SET_REG(T0, rd, pass);
       
  5628                 }
       
  5629             } else {
       
  5630                 return 1;
       
  5631             }
       
  5632         }
       
  5633     }
       
  5634     return 0;
       
  5635 }
       
  5636 
       
  5637 static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
       
  5638 {
       
  5639     int crn = (insn >> 16) & 0xf;
       
  5640     int crm = insn & 0xf;
       
  5641     int op1 = (insn >> 21) & 7;
       
  5642     int op2 = (insn >> 5) & 7;
       
  5643     int rt = (insn >> 12) & 0xf;
       
  5644     TCGv tmp;
       
  5645 
       
  5646     if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
       
  5647         if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
       
  5648             /* TEECR */
       
  5649             if (IS_USER(s))
       
  5650                 return 1;
       
  5651             tmp = load_cpu_field(teecr);
       
  5652             store_reg(s, rt, tmp);
       
  5653             return 0;
       
  5654         }
       
  5655         if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
       
  5656             /* TEEHBR */
       
  5657             if (IS_USER(s) && (env->teecr & 1))
       
  5658                 return 1;
       
  5659             tmp = load_cpu_field(teehbr);
       
  5660             store_reg(s, rt, tmp);
       
  5661             return 0;
       
  5662         }
       
  5663     }
       
  5664     fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
       
  5665             op1, crn, crm, op2);
       
  5666     return 1;
       
  5667 }
       
  5668 
       
  5669 static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
       
  5670 {
       
  5671     int crn = (insn >> 16) & 0xf;
       
  5672     int crm = insn & 0xf;
       
  5673     int op1 = (insn >> 21) & 7;
       
  5674     int op2 = (insn >> 5) & 7;
       
  5675     int rt = (insn >> 12) & 0xf;
       
  5676     TCGv tmp;
       
  5677 
       
  5678     if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
       
  5679         if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
       
  5680             /* TEECR */
       
  5681             if (IS_USER(s))
       
  5682                 return 1;
       
  5683             tmp = load_reg(s, rt);
       
  5684             gen_helper_set_teecr(cpu_env, tmp);
       
  5685             dead_tmp(tmp);
       
  5686             return 0;
       
  5687         }
       
  5688         if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
       
  5689             /* TEEHBR */
       
  5690             if (IS_USER(s) && (env->teecr & 1))
       
  5691                 return 1;
       
  5692             tmp = load_reg(s, rt);
       
  5693             store_cpu_field(tmp, teehbr);
       
  5694             return 0;
       
  5695         }
       
  5696     }
       
  5697     fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
       
  5698             op1, crn, crm, op2);
       
  5699     return 1;
       
  5700 }
       
  5701 
       
  5702 static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
       
  5703 {
       
  5704     int cpnum;
       
  5705 
       
  5706     cpnum = (insn >> 8) & 0xf;
       
  5707     if (arm_feature(env, ARM_FEATURE_XSCALE)
       
  5708 	    && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
       
  5709 	return 1;
       
  5710 
       
  5711     switch (cpnum) {
       
  5712       case 0:
       
  5713       case 1:
       
  5714 	if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
       
  5715 	    return disas_iwmmxt_insn(env, s, insn);
       
  5716 	} else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
       
  5717 	    return disas_dsp_insn(env, s, insn);
       
  5718 	}
       
  5719 	return 1;
       
  5720     case 10:
       
  5721     case 11:
       
  5722 	return disas_vfp_insn (env, s, insn);
       
  5723     case 14:
       
  5724         /* Coprocessors 7-15 are architecturally reserved by ARM.
       
  5725            Unfortunately Intel decided to ignore this.  */
       
  5726         if (arm_feature(env, ARM_FEATURE_XSCALE))
       
  5727             goto board;
       
  5728         if (insn & (1 << 20))
       
  5729             return disas_cp14_read(env, s, insn);
       
  5730         else
       
  5731             return disas_cp14_write(env, s, insn);
       
  5732     case 15:
       
  5733 	return disas_cp15_insn (env, s, insn);
       
  5734     default:
       
  5735     board:
       
  5736 	/* Unknown coprocessor.  See if the board has hooked it.  */
       
  5737 	return disas_cp_insn (env, s, insn);
       
  5738     }
       
  5739 }
       
  5740 
       
  5741 
       
  5742 /* Store a 64-bit value to a register pair.  Clobbers val.  */
       
  5743 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
       
  5744 {
       
  5745     TCGv tmp;
       
  5746     tmp = new_tmp();
       
  5747     tcg_gen_trunc_i64_i32(tmp, val);
       
  5748     store_reg(s, rlow, tmp);
       
  5749     tmp = new_tmp();
       
  5750     tcg_gen_shri_i64(val, val, 32);
       
  5751     tcg_gen_trunc_i64_i32(tmp, val);
       
  5752     store_reg(s, rhigh, tmp);
       
  5753 }
       
  5754 
       
  5755 /* load a 32-bit value from a register and perform a 64-bit accumulate.  */
       
  5756 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
       
  5757 {
       
  5758     TCGv_i64 tmp;
       
  5759     TCGv tmp2;
       
  5760 
       
  5761     /* Load value and extend to 64 bits.  */
       
  5762     tmp = tcg_temp_new_i64();
       
  5763     tmp2 = load_reg(s, rlow);
       
  5764     tcg_gen_extu_i32_i64(tmp, tmp2);
       
  5765     dead_tmp(tmp2);
       
  5766     tcg_gen_add_i64(val, val, tmp);
       
  5767 }
       
  5768 
       
  5769 /* load and add a 64-bit value from a register pair.  */
       
  5770 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
       
  5771 {
       
  5772     TCGv_i64 tmp;
       
  5773     TCGv tmpl;
       
  5774     TCGv tmph;
       
  5775 
       
  5776     /* Load 64-bit value rd:rn.  */
       
  5777     tmpl = load_reg(s, rlow);
       
  5778     tmph = load_reg(s, rhigh);
       
  5779     tmp = tcg_temp_new_i64();
       
  5780     tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
       
  5781     dead_tmp(tmpl);
       
  5782     dead_tmp(tmph);
       
  5783     tcg_gen_add_i64(val, val, tmp);
       
  5784 }
       
  5785 
       
  5786 /* Set N and Z flags from a 64-bit value.  */
       
  5787 static void gen_logicq_cc(TCGv_i64 val)
       
  5788 {
       
  5789     TCGv tmp = new_tmp();
       
  5790     gen_helper_logicq_cc(tmp, val);
       
  5791     gen_logic_CC(tmp);
       
  5792     dead_tmp(tmp);
       
  5793 }
       
  5794 
       
  5795 static void disas_arm_insn(CPUState * env, DisasContext *s)
       
  5796 {
       
  5797     unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
       
  5798     TCGv tmp;
       
  5799     TCGv tmp2;
       
  5800     TCGv tmp3;
       
  5801     TCGv addr;
       
  5802     TCGv_i64 tmp64;
       
  5803 
       
  5804     insn = ldl_code(s->pc);
       
  5805     if (env->bswap_code)
       
  5806         insn = bswap32(insn);
       
  5807     s->pc += 4;
       
  5808 
       
  5809     /* M variants do not implement ARM mode.  */
       
  5810     if (IS_M(env))
       
  5811         goto illegal_op;
       
  5812     cond = insn >> 28;
       
  5813     if (cond == 0xf){
       
  5814         /* Unconditional instructions.  */
       
  5815         if (((insn >> 25) & 7) == 1) {
       
  5816             /* NEON Data processing.  */
       
  5817             if (!arm_feature(env, ARM_FEATURE_NEON))
       
  5818                 goto illegal_op;
       
  5819 
       
  5820             if (disas_neon_data_insn(env, s, insn))
       
  5821                 goto illegal_op;
       
  5822             return;
       
  5823         }
       
  5824         if ((insn & 0x0f100000) == 0x04000000) {
       
  5825             /* NEON load/store.  */
       
  5826             if (!arm_feature(env, ARM_FEATURE_NEON))
       
  5827                 goto illegal_op;
       
  5828 
       
  5829             if (disas_neon_ls_insn(env, s, insn))
       
  5830                 goto illegal_op;
       
  5831             return;
       
  5832         }
       
  5833         if ((insn & 0x0d70f000) == 0x0550f000)
       
  5834             return; /* PLD */
       
  5835         else if ((insn & 0x0ffffdff) == 0x01010000) {
       
  5836             ARCH(6);
       
  5837             /* setend */
       
  5838             if (insn & (1 << 9)) {
       
  5839                 /* BE8 mode not implemented.  */
       
  5840                 goto illegal_op;
       
  5841             }
       
  5842             return;
       
  5843         } else if ((insn & 0x0fffff00) == 0x057ff000) {
       
  5844             switch ((insn >> 4) & 0xf) {
       
  5845             case 1: /* clrex */
       
  5846                 ARCH(6K);
       
  5847                 gen_helper_clrex(cpu_env);
       
  5848                 return;
       
  5849             case 4: /* dsb */
       
  5850             case 5: /* dmb */
       
  5851             case 6: /* isb */
       
  5852                 ARCH(7);
       
  5853                 /* We don't emulate caches so these are a no-op.  */
       
  5854                 return;
       
  5855             default:
       
  5856                 goto illegal_op;
       
  5857             }
       
  5858         } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
       
  5859             /* srs */
       
  5860             uint32_t offset;
       
  5861             if (IS_USER(s))
       
  5862                 goto illegal_op;
       
  5863             ARCH(6);
       
  5864             op1 = (insn & 0x1f);
       
  5865             if (op1 == (env->uncached_cpsr & CPSR_M)) {
       
  5866                 addr = load_reg(s, 13);
       
  5867             } else {
       
  5868                 addr = new_tmp();
       
  5869                 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op1));
       
  5870             }
       
  5871             i = (insn >> 23) & 3;
       
  5872             switch (i) {
       
  5873             case 0: offset = -4; break; /* DA */
       
  5874             case 1: offset = -8; break; /* DB */
       
  5875             case 2: offset = 0; break; /* IA */
       
  5876             case 3: offset = 4; break; /* IB */
       
  5877             default: abort();
       
  5878             }
       
  5879             if (offset)
       
  5880                 tcg_gen_addi_i32(addr, addr, offset);
       
  5881             tmp = load_reg(s, 14);
       
  5882             gen_st32(tmp, addr, 0);
       
  5883             tmp = new_tmp();
       
  5884             gen_helper_cpsr_read(tmp);
       
  5885             tcg_gen_addi_i32(addr, addr, 4);
       
  5886             gen_st32(tmp, addr, 0);
       
  5887             if (insn & (1 << 21)) {
       
  5888                 /* Base writeback.  */
       
  5889                 switch (i) {
       
  5890                 case 0: offset = -8; break;
       
  5891                 case 1: offset = -4; break;
       
  5892                 case 2: offset = 4; break;
       
  5893                 case 3: offset = 0; break;
       
  5894                 default: abort();
       
  5895                 }
       
  5896                 if (offset)
       
  5897                     tcg_gen_addi_i32(addr, tmp, offset);
       
  5898                 if (op1 == (env->uncached_cpsr & CPSR_M)) {
       
  5899                     gen_movl_reg_T1(s, 13);
       
  5900                 } else {
       
  5901                     gen_helper_set_r13_banked(cpu_env, tcg_const_i32(op1), cpu_T[1]);
       
  5902                 }
       
  5903             } else {
       
  5904                 dead_tmp(addr);
       
  5905             }
       
  5906         } else if ((insn & 0x0e5fffe0) == 0x081d0a00) {
       
  5907             /* rfe */
       
  5908             uint32_t offset;
       
  5909             if (IS_USER(s))
       
  5910                 goto illegal_op;
       
  5911             ARCH(6);
       
  5912             rn = (insn >> 16) & 0xf;
       
  5913             addr = load_reg(s, rn);
       
  5914             i = (insn >> 23) & 3;
       
  5915             switch (i) {
       
  5916             case 0: offset = -4; break; /* DA */
       
  5917             case 1: offset = -8; break; /* DB */
       
  5918             case 2: offset = 0; break; /* IA */
       
  5919             case 3: offset = 4; break; /* IB */
       
  5920             default: abort();
       
  5921             }
       
  5922             if (offset)
       
  5923                 tcg_gen_addi_i32(addr, addr, offset);
       
  5924             /* Load PC into tmp and CPSR into tmp2.  */
       
  5925             tmp = gen_ld32(addr, 0);
       
  5926             tcg_gen_addi_i32(addr, addr, 4);
       
  5927             tmp2 = gen_ld32(addr, 0);
       
  5928             if (insn & (1 << 21)) {
       
  5929                 /* Base writeback.  */
       
  5930                 switch (i) {
       
  5931                 case 0: offset = -8; break;
       
  5932                 case 1: offset = -4; break;
       
  5933                 case 2: offset = 4; break;
       
  5934                 case 3: offset = 0; break;
       
  5935                 default: abort();
       
  5936                 }
       
  5937                 if (offset)
       
  5938                     tcg_gen_addi_i32(addr, addr, offset);
       
  5939                 store_reg(s, rn, addr);
       
  5940             } else {
       
  5941                 dead_tmp(addr);
       
  5942             }
       
  5943             gen_rfe(s, tmp, tmp2);
       
  5944         } else if ((insn & 0x0e000000) == 0x0a000000) {
       
  5945             /* branch link and change to thumb (blx <offset>) */
       
  5946             int32_t offset;
       
  5947 
       
  5948             val = (uint32_t)s->pc;
       
  5949             tmp = new_tmp();
       
  5950             tcg_gen_movi_i32(tmp, val);
       
  5951             store_reg(s, 14, tmp);
       
  5952             /* Sign-extend the 24-bit offset */
       
  5953             offset = (((int32_t)insn) << 8) >> 8;
       
  5954             /* offset * 4 + bit24 * 2 + (thumb bit) */
       
  5955             val += (offset << 2) | ((insn >> 23) & 2) | 1;
       
  5956             /* pipeline offset */
       
  5957             val += 4;
       
  5958             gen_bx_im(s, val);
       
  5959             return;
       
  5960         } else if ((insn & 0x0e000f00) == 0x0c000100) {
       
  5961             if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
       
  5962                 /* iWMMXt register transfer.  */
       
  5963                 if (env->cp15.c15_cpar & (1 << 1))
       
  5964                     if (!disas_iwmmxt_insn(env, s, insn))
       
  5965                         return;
       
  5966             }
       
  5967         } else if ((insn & 0x0fe00000) == 0x0c400000) {
       
  5968             /* Coprocessor double register transfer.  */
       
  5969         } else if ((insn & 0x0f000010) == 0x0e000010) {
       
  5970             /* Additional coprocessor register transfer.  */
       
  5971         } else if ((insn & 0x0ff10020) == 0x01000000) {
       
  5972             uint32_t mask;
       
  5973             uint32_t val;
       
  5974             /* cps (privileged) */
       
  5975             if (IS_USER(s))
       
  5976                 return;
       
  5977             mask = val = 0;
       
  5978             if (insn & (1 << 19)) {
       
  5979                 if (insn & (1 << 8))
       
  5980                     mask |= CPSR_A;
       
  5981                 if (insn & (1 << 7))
       
  5982                     mask |= CPSR_I;
       
  5983                 if (insn & (1 << 6))
       
  5984                     mask |= CPSR_F;
       
  5985                 if (insn & (1 << 18))
       
  5986                     val |= mask;
       
  5987             }
       
  5988             if (insn & (1 << 17)) {
       
  5989                 mask |= CPSR_M;
       
  5990                 val |= (insn & 0x1f);
       
  5991             }
       
  5992             if (mask) {
       
  5993                 gen_op_movl_T0_im(val);
       
  5994                 gen_set_psr_T0(s, mask, 0);
       
  5995             }
       
  5996             return;
       
  5997         }
       
  5998         goto illegal_op;
       
  5999     }
       
  6000     if (cond != 0xe) {
       
  6001         /* if not always execute, we generate a conditional jump to
       
  6002            next instruction */
       
  6003         s->condlabel = gen_new_label();
       
  6004         gen_test_cc(cond ^ 1, s->condlabel);
       
  6005         s->condjmp = 1;
       
  6006     }
       
  6007     if ((insn & 0x0f900000) == 0x03000000) {
       
  6008         if ((insn & (1 << 21)) == 0) {
       
  6009             ARCH(6T2);
       
  6010             rd = (insn >> 12) & 0xf;
       
  6011             val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
       
  6012             if ((insn & (1 << 22)) == 0) {
       
  6013                 /* MOVW */
       
  6014                 tmp = new_tmp();
       
  6015                 tcg_gen_movi_i32(tmp, val);
       
  6016             } else {
       
  6017                 /* MOVT */
       
  6018                 tmp = load_reg(s, rd);
       
  6019                 tcg_gen_ext16u_i32(tmp, tmp);
       
  6020                 tcg_gen_ori_i32(tmp, tmp, val << 16);
       
  6021             }
       
  6022             store_reg(s, rd, tmp);
       
  6023         } else {
       
  6024             if (((insn >> 12) & 0xf) != 0xf)
       
  6025                 goto illegal_op;
       
  6026             if (((insn >> 16) & 0xf) == 0) {
       
  6027                 gen_nop_hint(s, insn & 0xff);
       
  6028             } else {
       
  6029                 /* CPSR = immediate */
       
  6030                 val = insn & 0xff;
       
  6031                 shift = ((insn >> 8) & 0xf) * 2;
       
  6032                 if (shift)
       
  6033                     val = (val >> shift) | (val << (32 - shift));
       
  6034                 gen_op_movl_T0_im(val);
       
  6035                 i = ((insn & (1 << 22)) != 0);
       
  6036                 if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
       
  6037                     goto illegal_op;
       
  6038             }
       
  6039         }
       
  6040     } else if ((insn & 0x0f900000) == 0x01000000
       
  6041                && (insn & 0x00000090) != 0x00000090) {
       
  6042         /* miscellaneous instructions */
       
  6043         op1 = (insn >> 21) & 3;
       
  6044         sh = (insn >> 4) & 0xf;
       
  6045         rm = insn & 0xf;
       
  6046         switch (sh) {
       
  6047         case 0x0: /* move program status register */
       
  6048             if (op1 & 1) {
       
  6049                 /* PSR = reg */
       
  6050                 gen_movl_T0_reg(s, rm);
       
  6051                 i = ((op1 & 2) != 0);
       
  6052                 if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
       
  6053                     goto illegal_op;
       
  6054             } else {
       
  6055                 /* reg = PSR */
       
  6056                 rd = (insn >> 12) & 0xf;
       
  6057                 if (op1 & 2) {
       
  6058                     if (IS_USER(s))
       
  6059                         goto illegal_op;
       
  6060                     tmp = load_cpu_field(spsr);
       
  6061                 } else {
       
  6062                     tmp = new_tmp();
       
  6063                     gen_helper_cpsr_read(tmp);
       
  6064                 }
       
  6065                 store_reg(s, rd, tmp);
       
  6066             }
       
  6067             break;
       
  6068         case 0x1:
       
  6069             if (op1 == 1) {
       
  6070                 /* branch/exchange thumb (bx).  */
       
  6071                 tmp = load_reg(s, rm);
       
  6072                 gen_bx(s, tmp);
       
  6073             } else if (op1 == 3) {
       
  6074                 /* clz */
       
  6075                 rd = (insn >> 12) & 0xf;
       
  6076                 tmp = load_reg(s, rm);
       
  6077                 gen_helper_clz(tmp, tmp);
       
  6078                 store_reg(s, rd, tmp);
       
  6079             } else {
       
  6080                 goto illegal_op;
       
  6081             }
       
  6082             break;
       
  6083         case 0x2:
       
  6084             if (op1 == 1) {
       
  6085                 ARCH(5J); /* bxj */
       
  6086                 /* Trivial implementation equivalent to bx.  */
       
  6087                 tmp = load_reg(s, rm);
       
  6088                 gen_bx(s, tmp);
       
  6089             } else {
       
  6090                 goto illegal_op;
       
  6091             }
       
  6092             break;
       
  6093         case 0x3:
       
  6094             if (op1 != 1)
       
  6095               goto illegal_op;
       
  6096 
       
  6097             /* branch link/exchange thumb (blx) */
       
  6098             tmp = load_reg(s, rm);
       
  6099             tmp2 = new_tmp();
       
  6100             tcg_gen_movi_i32(tmp2, s->pc);
       
  6101             store_reg(s, 14, tmp2);
       
  6102             gen_bx(s, tmp);
       
  6103             break;
       
  6104         case 0x5: /* saturating add/subtract */
       
  6105             rd = (insn >> 12) & 0xf;
       
  6106             rn = (insn >> 16) & 0xf;
       
  6107             tmp = load_reg(s, rm);
       
  6108             tmp2 = load_reg(s, rn);
       
  6109             if (op1 & 2)
       
  6110                 gen_helper_double_saturate(tmp2, tmp2);
       
  6111             if (op1 & 1)
       
  6112                 gen_helper_sub_saturate(tmp, tmp, tmp2);
       
  6113             else
       
  6114                 gen_helper_add_saturate(tmp, tmp, tmp2);
       
  6115             dead_tmp(tmp2);
       
  6116             store_reg(s, rd, tmp);
       
  6117             break;
       
  6118         case 7: /* bkpt */
       
  6119             gen_set_condexec(s);
       
  6120             gen_set_pc_im(s->pc - 4);
       
  6121             gen_exception(EXCP_BKPT);
       
  6122             s->is_jmp = DISAS_JUMP;
       
  6123             break;
       
  6124         case 0x8: /* signed multiply */
       
  6125         case 0xa:
       
  6126         case 0xc:
       
  6127         case 0xe:
       
  6128             rs = (insn >> 8) & 0xf;
       
  6129             rn = (insn >> 12) & 0xf;
       
  6130             rd = (insn >> 16) & 0xf;
       
  6131             if (op1 == 1) {
       
  6132                 /* (32 * 16) >> 16 */
       
  6133                 tmp = load_reg(s, rm);
       
  6134                 tmp2 = load_reg(s, rs);
       
  6135                 if (sh & 4)
       
  6136                     tcg_gen_sari_i32(tmp2, tmp2, 16);
       
  6137                 else
       
  6138                     gen_sxth(tmp2);
       
  6139                 tmp64 = gen_muls_i64_i32(tmp, tmp2);
       
  6140                 tcg_gen_shri_i64(tmp64, tmp64, 16);
       
  6141                 tmp = new_tmp();
       
  6142                 tcg_gen_trunc_i64_i32(tmp, tmp64);
       
  6143                 if ((sh & 2) == 0) {
       
  6144                     tmp2 = load_reg(s, rn);
       
  6145                     gen_helper_add_setq(tmp, tmp, tmp2);
       
  6146                     dead_tmp(tmp2);
       
  6147                 }
       
  6148                 store_reg(s, rd, tmp);
       
  6149             } else {
       
  6150                 /* 16 * 16 */
       
  6151                 tmp = load_reg(s, rm);
       
  6152                 tmp2 = load_reg(s, rs);
       
  6153                 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
       
  6154                 dead_tmp(tmp2);
       
  6155                 if (op1 == 2) {
       
  6156                     tmp64 = tcg_temp_new_i64();
       
  6157                     tcg_gen_ext_i32_i64(tmp64, tmp);
       
  6158                     dead_tmp(tmp);
       
  6159                     gen_addq(s, tmp64, rn, rd);
       
  6160                     gen_storeq_reg(s, rn, rd, tmp64);
       
  6161                 } else {
       
  6162                     if (op1 == 0) {
       
  6163                         tmp2 = load_reg(s, rn);
       
  6164                         gen_helper_add_setq(tmp, tmp, tmp2);
       
  6165                         dead_tmp(tmp2);
       
  6166                     }
       
  6167                     store_reg(s, rd, tmp);
       
  6168                 }
       
  6169             }
       
  6170             break;
       
  6171         default:
       
  6172             goto illegal_op;
       
  6173         }
       
  6174     } else if (((insn & 0x0e000000) == 0 &&
       
  6175                 (insn & 0x00000090) != 0x90) ||
       
  6176                ((insn & 0x0e000000) == (1 << 25))) {
       
  6177         int set_cc, logic_cc, shiftop;
       
  6178 
       
  6179         op1 = (insn >> 21) & 0xf;
       
  6180         set_cc = (insn >> 20) & 1;
       
  6181         logic_cc = table_logic_cc[op1] & set_cc;
       
  6182 
       
  6183         /* data processing instruction */
       
  6184         if (insn & (1 << 25)) {
       
  6185             /* immediate operand */
       
  6186             val = insn & 0xff;
       
  6187             shift = ((insn >> 8) & 0xf) * 2;
       
  6188             if (shift)
       
  6189                 val = (val >> shift) | (val << (32 - shift));
       
  6190             gen_op_movl_T1_im(val);
       
  6191             if (logic_cc && shift)
       
  6192                 gen_set_CF_bit31(cpu_T[1]);
       
  6193         } else {
       
  6194             /* register */
       
  6195             rm = (insn) & 0xf;
       
  6196             gen_movl_T1_reg(s, rm);
       
  6197             shiftop = (insn >> 5) & 3;
       
  6198             if (!(insn & (1 << 4))) {
       
  6199                 shift = (insn >> 7) & 0x1f;
       
  6200                 gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc);
       
  6201             } else {
       
  6202                 rs = (insn >> 8) & 0xf;
       
  6203                 tmp = load_reg(s, rs);
       
  6204                 gen_arm_shift_reg(cpu_T[1], shiftop, tmp, logic_cc);
       
  6205             }
       
  6206         }
       
  6207         if (op1 != 0x0f && op1 != 0x0d) {
       
  6208             rn = (insn >> 16) & 0xf;
       
  6209             gen_movl_T0_reg(s, rn);
       
  6210         }
       
  6211         rd = (insn >> 12) & 0xf;
       
  6212         switch(op1) {
       
  6213         case 0x00:
       
  6214             gen_op_andl_T0_T1();
       
  6215             gen_movl_reg_T0(s, rd);
       
  6216             if (logic_cc)
       
  6217                 gen_op_logic_T0_cc();
       
  6218             break;
       
  6219         case 0x01:
       
  6220             gen_op_xorl_T0_T1();
       
  6221             gen_movl_reg_T0(s, rd);
       
  6222             if (logic_cc)
       
  6223                 gen_op_logic_T0_cc();
       
  6224             break;
       
  6225         case 0x02:
       
  6226             if (set_cc && rd == 15) {
       
  6227                 /* SUBS r15, ... is used for exception return.  */
       
  6228                 if (IS_USER(s))
       
  6229                     goto illegal_op;
       
  6230                 gen_op_subl_T0_T1_cc();
       
  6231                 gen_exception_return(s);
       
  6232             } else {
       
  6233                 if (set_cc)
       
  6234                     gen_op_subl_T0_T1_cc();
       
  6235                 else
       
  6236                     gen_op_subl_T0_T1();
       
  6237                 gen_movl_reg_T0(s, rd);
       
  6238             }
       
  6239             break;
       
  6240         case 0x03:
       
  6241             if (set_cc)
       
  6242                 gen_op_rsbl_T0_T1_cc();
       
  6243             else
       
  6244                 gen_op_rsbl_T0_T1();
       
  6245             gen_movl_reg_T0(s, rd);
       
  6246             break;
       
  6247         case 0x04:
       
  6248             if (set_cc)
       
  6249                 gen_op_addl_T0_T1_cc();
       
  6250             else
       
  6251                 gen_op_addl_T0_T1();
       
  6252             gen_movl_reg_T0(s, rd);
       
  6253             break;
       
  6254         case 0x05:
       
  6255             if (set_cc)
       
  6256                 gen_op_adcl_T0_T1_cc();
       
  6257             else
       
  6258                 gen_adc_T0_T1();
       
  6259             gen_movl_reg_T0(s, rd);
       
  6260             break;
       
  6261         case 0x06:
       
  6262             if (set_cc)
       
  6263                 gen_op_sbcl_T0_T1_cc();
       
  6264             else
       
  6265                 gen_sbc_T0_T1();
       
  6266             gen_movl_reg_T0(s, rd);
       
  6267             break;
       
  6268         case 0x07:
       
  6269             if (set_cc)
       
  6270                 gen_op_rscl_T0_T1_cc();
       
  6271             else
       
  6272                 gen_rsc_T0_T1();
       
  6273             gen_movl_reg_T0(s, rd);
       
  6274             break;
       
  6275         case 0x08:
       
  6276             if (set_cc) {
       
  6277                 gen_op_andl_T0_T1();
       
  6278                 gen_op_logic_T0_cc();
       
  6279             }
       
  6280             break;
       
  6281         case 0x09:
       
  6282             if (set_cc) {
       
  6283                 gen_op_xorl_T0_T1();
       
  6284                 gen_op_logic_T0_cc();
       
  6285             }
       
  6286             break;
       
  6287         case 0x0a:
       
  6288             if (set_cc) {
       
  6289                 gen_op_subl_T0_T1_cc();
       
  6290             }
       
  6291             break;
       
  6292         case 0x0b:
       
  6293             if (set_cc) {
       
  6294                 gen_op_addl_T0_T1_cc();
       
  6295             }
       
  6296             break;
       
  6297         case 0x0c:
       
  6298             gen_op_orl_T0_T1();
       
  6299             gen_movl_reg_T0(s, rd);
       
  6300             if (logic_cc)
       
  6301                 gen_op_logic_T0_cc();
       
  6302             break;
       
  6303         case 0x0d:
       
  6304             if (logic_cc && rd == 15) {
       
  6305                 /* MOVS r15, ... is used for exception return.  */
       
  6306                 if (IS_USER(s))
       
  6307                     goto illegal_op;
       
  6308                 gen_op_movl_T0_T1();
       
  6309                 gen_exception_return(s);
       
  6310             } else {
       
  6311                 gen_movl_reg_T1(s, rd);
       
  6312                 if (logic_cc)
       
  6313                     gen_op_logic_T1_cc();
       
  6314             }
       
  6315             break;
       
  6316         case 0x0e:
       
  6317             gen_op_bicl_T0_T1();
       
  6318             gen_movl_reg_T0(s, rd);
       
  6319             if (logic_cc)
       
  6320                 gen_op_logic_T0_cc();
       
  6321             break;
       
  6322         default:
       
  6323         case 0x0f:
       
  6324             gen_op_notl_T1();
       
  6325             gen_movl_reg_T1(s, rd);
       
  6326             if (logic_cc)
       
  6327                 gen_op_logic_T1_cc();
       
  6328             break;
       
  6329         }
       
  6330     } else {
       
  6331         /* other instructions */
       
  6332         op1 = (insn >> 24) & 0xf;
       
  6333         switch(op1) {
       
  6334         case 0x0:
       
  6335         case 0x1:
       
  6336             /* multiplies, extra load/stores */
       
  6337             sh = (insn >> 5) & 3;
       
  6338             if (sh == 0) {
       
  6339                 if (op1 == 0x0) {
       
  6340                     rd = (insn >> 16) & 0xf;
       
  6341                     rn = (insn >> 12) & 0xf;
       
  6342                     rs = (insn >> 8) & 0xf;
       
  6343                     rm = (insn) & 0xf;
       
  6344                     op1 = (insn >> 20) & 0xf;
       
  6345                     switch (op1) {
       
  6346                     case 0: case 1: case 2: case 3: case 6:
       
  6347                         /* 32 bit mul */
       
  6348                         tmp = load_reg(s, rs);
       
  6349                         tmp2 = load_reg(s, rm);
       
  6350                         tcg_gen_mul_i32(tmp, tmp, tmp2);
       
  6351                         dead_tmp(tmp2);
       
  6352                         if (insn & (1 << 22)) {
       
  6353                             /* Subtract (mls) */
       
  6354                             ARCH(6T2);
       
  6355                             tmp2 = load_reg(s, rn);
       
  6356                             tcg_gen_sub_i32(tmp, tmp2, tmp);
       
  6357                             dead_tmp(tmp2);
       
  6358                         } else if (insn & (1 << 21)) {
       
  6359                             /* Add */
       
  6360                             tmp2 = load_reg(s, rn);
       
  6361                             tcg_gen_add_i32(tmp, tmp, tmp2);
       
  6362                             dead_tmp(tmp2);
       
  6363                         }
       
  6364                         if (insn & (1 << 20))
       
  6365                             gen_logic_CC(tmp);
       
  6366                         store_reg(s, rd, tmp);
       
  6367                         break;
       
  6368                     default:
       
  6369                         /* 64 bit mul */
       
  6370                         tmp = load_reg(s, rs);
       
  6371                         tmp2 = load_reg(s, rm);
       
  6372                         if (insn & (1 << 22))
       
  6373                             tmp64 = gen_muls_i64_i32(tmp, tmp2);
       
  6374                         else
       
  6375                             tmp64 = gen_mulu_i64_i32(tmp, tmp2);
       
  6376                         if (insn & (1 << 21)) /* mult accumulate */
       
  6377                             gen_addq(s, tmp64, rn, rd);
       
  6378                         if (!(insn & (1 << 23))) { /* double accumulate */
       
  6379                             ARCH(6);
       
  6380                             gen_addq_lo(s, tmp64, rn);
       
  6381                             gen_addq_lo(s, tmp64, rd);
       
  6382                         }
       
  6383                         if (insn & (1 << 20))
       
  6384                             gen_logicq_cc(tmp64);
       
  6385                         gen_storeq_reg(s, rn, rd, tmp64);
       
  6386                         break;
       
  6387                     }
       
  6388                 } else {
       
  6389                     rn = (insn >> 16) & 0xf;
       
  6390                     rd = (insn >> 12) & 0xf;
       
  6391                     if (insn & (1 << 23)) {
       
  6392                         /* load/store exclusive */
       
  6393                         op1 = (insn >> 21) & 0x3;
       
  6394                         if (op1)
       
  6395                             ARCH(6K);
       
  6396                         else
       
  6397                             ARCH(6);
       
  6398                         gen_movl_T1_reg(s, rn);
       
  6399                         addr = cpu_T[1];
       
  6400                         if (insn & (1 << 20)) {
       
  6401                             gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
       
  6402                             switch (op1) {
       
  6403                             case 0: /* ldrex */
       
  6404                                 tmp = gen_ld32(addr, IS_USER(s));
       
  6405                                 break;
       
  6406                             case 1: /* ldrexd */
       
  6407                                 tmp = gen_ld32(addr, IS_USER(s));
       
  6408                                 store_reg(s, rd, tmp);
       
  6409                                 tcg_gen_addi_i32(addr, addr, 4);
       
  6410                                 tmp = gen_ld32(addr, IS_USER(s));
       
  6411                                 rd++;
       
  6412                                 break;
       
  6413                             case 2: /* ldrexb */
       
  6414                                 tmp = gen_ld8u(addr, IS_USER(s));
       
  6415                                 break;
       
  6416                             case 3: /* ldrexh */
       
  6417                                 tmp = gen_ld16u(addr, IS_USER(s));
       
  6418                                 break;
       
  6419                             default:
       
  6420                                 abort();
       
  6421                             }
       
  6422                             store_reg(s, rd, tmp);
       
  6423                         } else {
       
  6424                             int label = gen_new_label();
       
  6425                             rm = insn & 0xf;
       
  6426                             gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
       
  6427                             tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0],
       
  6428                                                 0, label);
       
  6429                             tmp = load_reg(s,rm);
       
  6430                             switch (op1) {
       
  6431                             case 0:  /*  strex */
       
  6432                                 gen_st32(tmp, addr, IS_USER(s));
       
  6433                                 break;
       
  6434                             case 1: /*  strexd */
       
  6435                                 gen_st32(tmp, addr, IS_USER(s));
       
  6436                                 tcg_gen_addi_i32(addr, addr, 4);
       
  6437                                 tmp = load_reg(s, rm + 1);
       
  6438                                 gen_st32(tmp, addr, IS_USER(s));
       
  6439                                 break;
       
  6440                             case 2: /*  strexb */
       
  6441                                 gen_st8(tmp, addr, IS_USER(s));
       
  6442                                 break;
       
  6443                             case 3: /* strexh */
       
  6444                                 gen_st16(tmp, addr, IS_USER(s));
       
  6445                                 break;
       
  6446                             default:
       
  6447                                 abort();
       
  6448                             }
       
  6449                             gen_set_label(label);
       
  6450                             gen_movl_reg_T0(s, rd);
       
  6451                         }
       
  6452                     } else {
       
  6453                         /* SWP instruction */
       
  6454                         rm = (insn) & 0xf;
       
  6455 
       
  6456                         /* ??? This is not really atomic.  However we know
       
  6457                            we never have multiple CPUs running in parallel,
       
  6458                            so it is good enough.  */
       
  6459                         addr = load_reg(s, rn);
       
  6460                         tmp = load_reg(s, rm);
       
  6461                         if (insn & (1 << 22)) {
       
  6462                             tmp2 = gen_ld8u(addr, IS_USER(s));
       
  6463                             gen_st8(tmp, addr, IS_USER(s));
       
  6464                         } else {
       
  6465                             tmp2 = gen_ld32(addr, IS_USER(s));
       
  6466                             gen_st32(tmp, addr, IS_USER(s));
       
  6467                         }
       
  6468                         dead_tmp(addr);
       
  6469                         store_reg(s, rd, tmp2);
       
  6470                     }
       
  6471                 }
       
  6472             } else {
       
  6473                 int address_offset;
       
  6474                 int load;
       
  6475                 /* Misc load/store */
       
  6476                 rn = (insn >> 16) & 0xf;
       
  6477                 rd = (insn >> 12) & 0xf;
       
  6478                 addr = load_reg(s, rn);
       
  6479                 if (insn & (1 << 24))
       
  6480                     gen_add_datah_offset(s, insn, 0, addr);
       
  6481                 address_offset = 0;
       
  6482                 if (insn & (1 << 20)) {
       
  6483                     /* load */
       
  6484                     switch(sh) {
       
  6485                     case 1:
       
  6486                         tmp = gen_ld16u(addr, IS_USER(s));
       
  6487                         break;
       
  6488                     case 2:
       
  6489                         tmp = gen_ld8s(addr, IS_USER(s));
       
  6490                         break;
       
  6491                     default:
       
  6492                     case 3:
       
  6493                         tmp = gen_ld16s(addr, IS_USER(s));
       
  6494                         break;
       
  6495                     }
       
  6496                     load = 1;
       
  6497                 } else if (sh & 2) {
       
  6498                     /* doubleword */
       
  6499                     if (sh & 1) {
       
  6500                         /* store */
       
  6501                         tmp = load_reg(s, rd);
       
  6502                         gen_st32(tmp, addr, IS_USER(s));
       
  6503                         tcg_gen_addi_i32(addr, addr, 4);
       
  6504                         tmp = load_reg(s, rd + 1);
       
  6505                         gen_st32(tmp, addr, IS_USER(s));
       
  6506                         load = 0;
       
  6507                     } else {
       
  6508                         /* load */
       
  6509                         tmp = gen_ld32(addr, IS_USER(s));
       
  6510                         store_reg(s, rd, tmp);
       
  6511                         tcg_gen_addi_i32(addr, addr, 4);
       
  6512                         tmp = gen_ld32(addr, IS_USER(s));
       
  6513                         rd++;
       
  6514                         load = 1;
       
  6515                     }
       
  6516                     address_offset = -4;
       
  6517                 } else {
       
  6518                     /* store */
       
  6519                     tmp = load_reg(s, rd);
       
  6520                     gen_st16(tmp, addr, IS_USER(s));
       
  6521                     load = 0;
       
  6522                 }
       
  6523                 /* Perform base writeback before the loaded value to
       
  6524                    ensure correct behavior with overlapping index registers.
       
  6525                    ldrd with base writeback is is undefined if the
       
  6526                    destination and index registers overlap.  */
       
  6527                 if (!(insn & (1 << 24))) {
       
  6528                     gen_add_datah_offset(s, insn, address_offset, addr);
       
  6529                     store_reg(s, rn, addr);
       
  6530                 } else if (insn & (1 << 21)) {
       
  6531                     if (address_offset)
       
  6532                         tcg_gen_addi_i32(addr, addr, address_offset);
       
  6533                     store_reg(s, rn, addr);
       
  6534                 } else {
       
  6535                     dead_tmp(addr);
       
  6536                 }
       
  6537                 if (load) {
       
  6538                     /* Complete the load.  */
       
  6539                     store_reg(s, rd, tmp);
       
  6540                 }
       
  6541             }
       
  6542             break;
       
  6543         case 0x4:
       
  6544         case 0x5:
       
  6545             goto do_ldst;
       
  6546         case 0x6:
       
  6547         case 0x7:
       
  6548             if (insn & (1 << 4)) {
       
  6549                 ARCH(6);
       
  6550                 /* Armv6 Media instructions.  */
       
  6551                 rm = insn & 0xf;
       
  6552                 rn = (insn >> 16) & 0xf;
       
  6553                 rd = (insn >> 12) & 0xf;
       
  6554                 rs = (insn >> 8) & 0xf;
       
  6555                 switch ((insn >> 23) & 3) {
       
  6556                 case 0: /* Parallel add/subtract.  */
       
  6557                     op1 = (insn >> 20) & 7;
       
  6558                     tmp = load_reg(s, rn);
       
  6559                     tmp2 = load_reg(s, rm);
       
  6560                     sh = (insn >> 5) & 7;
       
  6561                     if ((op1 & 3) == 0 || sh == 5 || sh == 6)
       
  6562                         goto illegal_op;
       
  6563                     gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
       
  6564                     dead_tmp(tmp2);
       
  6565                     store_reg(s, rd, tmp);
       
  6566                     break;
       
  6567                 case 1:
       
  6568                     if ((insn & 0x00700020) == 0) {
       
  6569                         /* Halfword pack.  */
       
  6570                         tmp = load_reg(s, rn);
       
  6571                         tmp2 = load_reg(s, rm);
       
  6572                         shift = (insn >> 7) & 0x1f;
       
  6573                         if (insn & (1 << 6)) {
       
  6574                             /* pkhtb */
       
  6575                             if (shift == 0)
       
  6576                                 shift = 31;
       
  6577                             tcg_gen_sari_i32(tmp2, tmp2, shift);
       
  6578                             tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
       
  6579                             tcg_gen_ext16u_i32(tmp2, tmp2);
       
  6580                         } else {
       
  6581                             /* pkhbt */
       
  6582                             if (shift)
       
  6583                                 tcg_gen_shli_i32(tmp2, tmp2, shift);
       
  6584                             tcg_gen_ext16u_i32(tmp, tmp);
       
  6585                             tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
       
  6586                         }
       
  6587                         tcg_gen_or_i32(tmp, tmp, tmp2);
       
  6588                         dead_tmp(tmp2);
       
  6589                         store_reg(s, rd, tmp);
       
  6590                     } else if ((insn & 0x00200020) == 0x00200000) {
       
  6591                         /* [us]sat */
       
  6592                         tmp = load_reg(s, rm);
       
  6593                         shift = (insn >> 7) & 0x1f;
       
  6594                         if (insn & (1 << 6)) {
       
  6595                             if (shift == 0)
       
  6596                                 shift = 31;
       
  6597                             tcg_gen_sari_i32(tmp, tmp, shift);
       
  6598                         } else {
       
  6599                             tcg_gen_shli_i32(tmp, tmp, shift);
       
  6600                         }
       
  6601                         sh = (insn >> 16) & 0x1f;
       
  6602                         if (sh != 0) {
       
  6603                             if (insn & (1 << 22))
       
  6604                                 gen_helper_usat(tmp, tmp, tcg_const_i32(sh));
       
  6605                             else
       
  6606                                 gen_helper_ssat(tmp, tmp, tcg_const_i32(sh));
       
  6607                         }
       
  6608                         store_reg(s, rd, tmp);
       
  6609                     } else if ((insn & 0x00300fe0) == 0x00200f20) {
       
  6610                         /* [us]sat16 */
       
  6611                         tmp = load_reg(s, rm);
       
  6612                         sh = (insn >> 16) & 0x1f;
       
  6613                         if (sh != 0) {
       
  6614                             if (insn & (1 << 22))
       
  6615                                 gen_helper_usat16(tmp, tmp, tcg_const_i32(sh));
       
  6616                             else
       
  6617                                 gen_helper_ssat16(tmp, tmp, tcg_const_i32(sh));
       
  6618                         }
       
  6619                         store_reg(s, rd, tmp);
       
  6620                     } else if ((insn & 0x00700fe0) == 0x00000fa0) {
       
  6621                         /* Select bytes.  */
       
  6622                         tmp = load_reg(s, rn);
       
  6623                         tmp2 = load_reg(s, rm);
       
  6624                         tmp3 = new_tmp();
       
  6625                         tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
       
  6626                         gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
       
  6627                         dead_tmp(tmp3);
       
  6628                         dead_tmp(tmp2);
       
  6629                         store_reg(s, rd, tmp);
       
  6630                     } else if ((insn & 0x000003e0) == 0x00000060) {
       
  6631                         tmp = load_reg(s, rm);
       
  6632                         shift = (insn >> 10) & 3;
       
  6633                         /* ??? In many cases it's not neccessary to do a
       
  6634                            rotate, a shift is sufficient.  */
       
  6635                         if (shift != 0)
       
  6636                             tcg_gen_rori_i32(tmp, tmp, shift * 8);
       
  6637                         op1 = (insn >> 20) & 7;
       
  6638                         switch (op1) {
       
  6639                         case 0: gen_sxtb16(tmp);  break;
       
  6640                         case 2: gen_sxtb(tmp);    break;
       
  6641                         case 3: gen_sxth(tmp);    break;
       
  6642                         case 4: gen_uxtb16(tmp);  break;
       
  6643                         case 6: gen_uxtb(tmp);    break;
       
  6644                         case 7: gen_uxth(tmp);    break;
       
  6645                         default: goto illegal_op;
       
  6646                         }
       
  6647                         if (rn != 15) {
       
  6648                             tmp2 = load_reg(s, rn);
       
  6649                             if ((op1 & 3) == 0) {
       
  6650                                 gen_add16(tmp, tmp2);
       
  6651                             } else {
       
  6652                                 tcg_gen_add_i32(tmp, tmp, tmp2);
       
  6653                                 dead_tmp(tmp2);
       
  6654                             }
       
  6655                         }
       
  6656                         store_reg(s, rd, tmp);
       
  6657                     } else if ((insn & 0x003f0f60) == 0x003f0f20) {
       
  6658                         /* rev */
       
  6659                         tmp = load_reg(s, rm);
       
  6660                         if (insn & (1 << 22)) {
       
  6661                             if (insn & (1 << 7)) {
       
  6662                                 gen_revsh(tmp);
       
  6663                             } else {
       
  6664                                 ARCH(6T2);
       
  6665                                 gen_helper_rbit(tmp, tmp);
       
  6666                             }
       
  6667                         } else {
       
  6668                             if (insn & (1 << 7))
       
  6669                                 gen_rev16(tmp);
       
  6670                             else
       
  6671                                 tcg_gen_bswap_i32(tmp, tmp);
       
  6672                         }
       
  6673                         store_reg(s, rd, tmp);
       
  6674                     } else {
       
  6675                         goto illegal_op;
       
  6676                     }
       
  6677                     break;
       
  6678                 case 2: /* Multiplies (Type 3).  */
       
  6679                     tmp = load_reg(s, rm);
       
  6680                     tmp2 = load_reg(s, rs);
       
  6681                     if (insn & (1 << 20)) {
       
  6682                         /* Signed multiply most significant [accumulate].  */
       
  6683                         tmp64 = gen_muls_i64_i32(tmp, tmp2);
       
  6684                         if (insn & (1 << 5))
       
  6685                             tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
       
  6686                         tcg_gen_shri_i64(tmp64, tmp64, 32);
       
  6687                         tmp = new_tmp();
       
  6688                         tcg_gen_trunc_i64_i32(tmp, tmp64);
       
  6689                         if (rd != 15) {
       
  6690                             tmp2 = load_reg(s, rd);
       
  6691                             if (insn & (1 << 6)) {
       
  6692                                 tcg_gen_sub_i32(tmp, tmp, tmp2);
       
  6693                             } else {
       
  6694                                 tcg_gen_add_i32(tmp, tmp, tmp2);
       
  6695                             }
       
  6696                             dead_tmp(tmp2);
       
  6697                         }
       
  6698                         store_reg(s, rn, tmp);
       
  6699                     } else {
       
  6700                         if (insn & (1 << 5))
       
  6701                             gen_swap_half(tmp2);
       
  6702                         gen_smul_dual(tmp, tmp2);
       
  6703                         /* This addition cannot overflow.  */
       
  6704                         if (insn & (1 << 6)) {
       
  6705                             tcg_gen_sub_i32(tmp, tmp, tmp2);
       
  6706                         } else {
       
  6707                             tcg_gen_add_i32(tmp, tmp, tmp2);
       
  6708                         }
       
  6709                         dead_tmp(tmp2);
       
  6710                         if (insn & (1 << 22)) {
       
  6711                             /* smlald, smlsld */
       
  6712                             tmp64 = tcg_temp_new_i64();
       
  6713                             tcg_gen_ext_i32_i64(tmp64, tmp);
       
  6714                             dead_tmp(tmp);
       
  6715                             gen_addq(s, tmp64, rd, rn);
       
  6716                             gen_storeq_reg(s, rd, rn, tmp64);
       
  6717                         } else {
       
  6718                             /* smuad, smusd, smlad, smlsd */
       
  6719                             if (rd != 15)
       
  6720                               {
       
  6721                                 tmp2 = load_reg(s, rd);
       
  6722                                 gen_helper_add_setq(tmp, tmp, tmp2);
       
  6723                                 dead_tmp(tmp2);
       
  6724                               }
       
  6725                             store_reg(s, rn, tmp);
       
  6726                         }
       
  6727                     }
       
  6728                     break;
       
  6729                 case 3:
       
  6730                     op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
       
  6731                     switch (op1) {
       
  6732                     case 0: /* Unsigned sum of absolute differences.  */
       
  6733                         ARCH(6);
       
  6734                         tmp = load_reg(s, rm);
       
  6735                         tmp2 = load_reg(s, rs);
       
  6736                         gen_helper_usad8(tmp, tmp, tmp2);
       
  6737                         dead_tmp(tmp2);
       
  6738                         if (rd != 15) {
       
  6739                             tmp2 = load_reg(s, rd);
       
  6740                             tcg_gen_add_i32(tmp, tmp, tmp2);
       
  6741                             dead_tmp(tmp2);
       
  6742                         }
       
  6743                         store_reg(s, rn, tmp);
       
  6744                         break;
       
  6745                     case 0x20: case 0x24: case 0x28: case 0x2c:
       
  6746                         /* Bitfield insert/clear.  */
       
  6747                         ARCH(6T2);
       
  6748                         shift = (insn >> 7) & 0x1f;
       
  6749                         i = (insn >> 16) & 0x1f;
       
  6750                         i = i + 1 - shift;
       
  6751                         if (rm == 15) {
       
  6752                             tmp = new_tmp();
       
  6753                             tcg_gen_movi_i32(tmp, 0);
       
  6754                         } else {
       
  6755                             tmp = load_reg(s, rm);
       
  6756                         }
       
  6757                         if (i != 32) {
       
  6758                             tmp2 = load_reg(s, rd);
       
  6759                             gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
       
  6760                             dead_tmp(tmp2);
       
  6761                         }
       
  6762                         store_reg(s, rd, tmp);
       
  6763                         break;
       
  6764                     case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
       
  6765                     case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
       
  6766                         ARCH(6T2);
       
  6767                         tmp = load_reg(s, rm);
       
  6768                         shift = (insn >> 7) & 0x1f;
       
  6769                         i = ((insn >> 16) & 0x1f) + 1;
       
  6770                         if (shift + i > 32)
       
  6771                             goto illegal_op;
       
  6772                         if (i < 32) {
       
  6773                             if (op1 & 0x20) {
       
  6774                                 gen_ubfx(tmp, shift, (1u << i) - 1);
       
  6775                             } else {
       
  6776                                 gen_sbfx(tmp, shift, i);
       
  6777                             }
       
  6778                         }
       
  6779                         store_reg(s, rd, tmp);
       
  6780                         break;
       
  6781                     default:
       
  6782                         goto illegal_op;
       
  6783                     }
       
  6784                     break;
       
  6785                 }
       
  6786                 break;
       
  6787             }
       
  6788         do_ldst:
       
  6789             /* Check for undefined extension instructions
       
  6790              * per the ARM Bible IE:
       
  6791              * xxxx 0111 1111 xxxx  xxxx xxxx 1111 xxxx
       
  6792              */
       
  6793             sh = (0xf << 20) | (0xf << 4);
       
  6794             if (op1 == 0x7 && ((insn & sh) == sh))
       
  6795             {
       
  6796                 goto illegal_op;
       
  6797             }
       
  6798             /* load/store byte/word */
       
  6799             rn = (insn >> 16) & 0xf;
       
  6800             rd = (insn >> 12) & 0xf;
       
  6801             tmp2 = load_reg(s, rn);
       
  6802             i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
       
  6803             if (insn & (1 << 24))
       
  6804                 gen_add_data_offset(s, insn, tmp2);
       
  6805             if (insn & (1 << 20)) {
       
  6806                 /* load */
       
  6807                 if (insn & (1 << 22)) {
       
  6808                     tmp = gen_ld8u(tmp2, i);
       
  6809                 } else {
       
  6810                     tmp = gen_ld32(tmp2, i);
       
  6811                 }
       
  6812             } else {
       
  6813                 /* store */
       
  6814                 tmp = load_reg(s, rd);
       
  6815                 if (insn & (1 << 22))
       
  6816                     gen_st8(tmp, tmp2, i);
       
  6817                 else
       
  6818                     gen_st32(tmp, tmp2, i);
       
  6819             }
       
  6820             if (!(insn & (1 << 24))) {
       
  6821                 gen_add_data_offset(s, insn, tmp2);
       
  6822                 store_reg(s, rn, tmp2);
       
  6823             } else if (insn & (1 << 21)) {
       
  6824                 store_reg(s, rn, tmp2);
       
  6825             } else {
       
  6826                 dead_tmp(tmp2);
       
  6827             }
       
  6828             if (insn & (1 << 20)) {
       
  6829                 /* Complete the load.  */
       
  6830                 if (rd == 15)
       
  6831                     gen_bx(s, tmp);
       
  6832                 else
       
  6833                     store_reg(s, rd, tmp);
       
  6834             }
       
  6835             break;
       
  6836         case 0x08:
       
  6837         case 0x09:
       
  6838             {
       
  6839                 int j, n, user, loaded_base;
       
  6840                 TCGv loaded_var;
       
  6841                 /* load/store multiple words */
       
  6842                 /* XXX: store correct base if write back */
       
  6843                 user = 0;
       
  6844                 if (insn & (1 << 22)) {
       
  6845                     if (IS_USER(s))
       
  6846                         goto illegal_op; /* only usable in supervisor mode */
       
  6847 
       
  6848                     if ((insn & (1 << 15)) == 0)
       
  6849                         user = 1;
       
  6850                 }
       
  6851                 rn = (insn >> 16) & 0xf;
       
  6852                 addr = load_reg(s, rn);
       
  6853 
       
  6854                 /* compute total size */
       
  6855                 loaded_base = 0;
       
  6856                 TCGV_UNUSED(loaded_var);
       
  6857                 n = 0;
       
  6858                 for(i=0;i<16;i++) {
       
  6859                     if (insn & (1 << i))
       
  6860                         n++;
       
  6861                 }
       
  6862                 /* XXX: test invalid n == 0 case ? */
       
  6863                 if (insn & (1 << 23)) {
       
  6864                     if (insn & (1 << 24)) {
       
  6865                         /* pre increment */
       
  6866                         tcg_gen_addi_i32(addr, addr, 4);
       
  6867                     } else {
       
  6868                         /* post increment */
       
  6869                     }
       
  6870                 } else {
       
  6871                     if (insn & (1 << 24)) {
       
  6872                         /* pre decrement */
       
  6873                         tcg_gen_addi_i32(addr, addr, -(n * 4));
       
  6874                     } else {
       
  6875                         /* post decrement */
       
  6876                         if (n != 1)
       
  6877                         tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
       
  6878                     }
       
  6879                 }
       
  6880                 j = 0;
       
  6881                 for(i=0;i<16;i++) {
       
  6882                     if (insn & (1 << i)) {
       
  6883                         if (insn & (1 << 20)) {
       
  6884                             /* load */
       
  6885                             tmp = gen_ld32(addr, IS_USER(s));
       
  6886                             if (i == 15) {
       
  6887                                 gen_bx(s, tmp);
       
  6888                             } else if (user) {
       
  6889                                 gen_helper_set_user_reg(tcg_const_i32(i), tmp);
       
  6890                                 dead_tmp(tmp);
       
  6891                             } else if (i == rn) {
       
  6892                                 loaded_var = tmp;
       
  6893                                 loaded_base = 1;
       
  6894                             } else {
       
  6895                                 store_reg(s, i, tmp);
       
  6896                             }
       
  6897                         } else {
       
  6898                             /* store */
       
  6899                             if (i == 15) {
       
  6900                                 /* special case: r15 = PC + 8 */
       
  6901                                 val = (long)s->pc + 4;
       
  6902                                 tmp = new_tmp();
       
  6903                                 tcg_gen_movi_i32(tmp, val);
       
  6904                             } else if (user) {
       
  6905                                 tmp = new_tmp();
       
  6906                                 gen_helper_get_user_reg(tmp, tcg_const_i32(i));
       
  6907                             } else {
       
  6908                                 tmp = load_reg(s, i);
       
  6909                             }
       
  6910                             gen_st32(tmp, addr, IS_USER(s));
       
  6911                         }
       
  6912                         j++;
       
  6913                         /* no need to add after the last transfer */
       
  6914                         if (j != n)
       
  6915                             tcg_gen_addi_i32(addr, addr, 4);
       
  6916                     }
       
  6917                 }
       
  6918                 if (insn & (1 << 21)) {
       
  6919                     /* write back */
       
  6920                     if (insn & (1 << 23)) {
       
  6921                         if (insn & (1 << 24)) {
       
  6922                             /* pre increment */
       
  6923                         } else {
       
  6924                             /* post increment */
       
  6925                             tcg_gen_addi_i32(addr, addr, 4);
       
  6926                         }
       
  6927                     } else {
       
  6928                         if (insn & (1 << 24)) {
       
  6929                             /* pre decrement */
       
  6930                             if (n != 1)
       
  6931                                 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
       
  6932                         } else {
       
  6933                             /* post decrement */
       
  6934                             tcg_gen_addi_i32(addr, addr, -(n * 4));
       
  6935                         }
       
  6936                     }
       
  6937                     store_reg(s, rn, addr);
       
  6938                 } else {
       
  6939                     dead_tmp(addr);
       
  6940                 }
       
  6941                 if (loaded_base) {
       
  6942                     store_reg(s, rn, loaded_var);
       
  6943                 }
       
  6944                 if ((insn & (1 << 22)) && !user) {
       
  6945                     /* Restore CPSR from SPSR.  */
       
  6946                     tmp = load_cpu_field(spsr);
       
  6947                     gen_set_cpsr(tmp, 0xffffffff);
       
  6948                     dead_tmp(tmp);
       
  6949                     s->is_jmp = DISAS_UPDATE;
       
  6950                 }
       
  6951             }
       
  6952             break;
       
  6953         case 0xa:
       
  6954         case 0xb:
       
  6955             {
       
  6956                 int32_t offset;
       
  6957 
       
  6958                 /* branch (and link) */
       
  6959                 val = (int32_t)s->pc;
       
  6960                 if (insn & (1 << 24)) {
       
  6961                     tmp = new_tmp();
       
  6962                     tcg_gen_movi_i32(tmp, val);
       
  6963                     store_reg(s, 14, tmp);
       
  6964                 }
       
  6965                 offset = (((int32_t)insn << 8) >> 8);
       
  6966                 val += (offset << 2) + 4;
       
  6967                 gen_jmp(s, val);
       
  6968             }
       
  6969             break;
       
  6970         case 0xc:
       
  6971         case 0xd:
       
  6972         case 0xe:
       
  6973             /* Coprocessor.  */
       
  6974             if (disas_coproc_insn(env, s, insn))
       
  6975                 goto illegal_op;
       
  6976             break;
       
  6977         case 0xf:
       
  6978             /* swi */
       
  6979             gen_set_pc_im(s->pc);
       
  6980             s->is_jmp = DISAS_SWI;
       
  6981             break;
       
  6982         default:
       
  6983         illegal_op:
       
  6984             gen_set_condexec(s);
       
  6985             gen_set_pc_im(s->pc - 4);
       
  6986             gen_exception(EXCP_UDEF);
       
  6987             s->is_jmp = DISAS_JUMP;
       
  6988             break;
       
  6989         }
       
  6990     }
       
  6991 }
       
  6992 
       
  6993 /* Return true if this is a Thumb-2 logical op.  */
       
  6994 static int
       
  6995 thumb2_logic_op(int op)
       
  6996 {
       
  6997     return (op < 8);
       
  6998 }
       
  6999 
       
  7000 /* Generate code for a Thumb-2 data processing operation.  If CONDS is nonzero
       
  7001    then set condition code flags based on the result of the operation.
       
  7002    If SHIFTER_OUT is nonzero then set the carry flag for logical operations
       
  7003    to the high bit of T1.
       
  7004    Returns zero if the opcode is valid.  */
       
  7005 
       
  7006 static int
       
  7007 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out)
       
  7008 {
       
  7009     int logic_cc;
       
  7010 
       
  7011     logic_cc = 0;
       
  7012     switch (op) {
       
  7013     case 0: /* and */
       
  7014         gen_op_andl_T0_T1();
       
  7015         logic_cc = conds;
       
  7016         break;
       
  7017     case 1: /* bic */
       
  7018         gen_op_bicl_T0_T1();
       
  7019         logic_cc = conds;
       
  7020         break;
       
  7021     case 2: /* orr */
       
  7022         gen_op_orl_T0_T1();
       
  7023         logic_cc = conds;
       
  7024         break;
       
  7025     case 3: /* orn */
       
  7026         gen_op_notl_T1();
       
  7027         gen_op_orl_T0_T1();
       
  7028         logic_cc = conds;
       
  7029         break;
       
  7030     case 4: /* eor */
       
  7031         gen_op_xorl_T0_T1();
       
  7032         logic_cc = conds;
       
  7033         break;
       
  7034     case 8: /* add */
       
  7035         if (conds)
       
  7036             gen_op_addl_T0_T1_cc();
       
  7037         else
       
  7038             gen_op_addl_T0_T1();
       
  7039         break;
       
  7040     case 10: /* adc */
       
  7041         if (conds)
       
  7042             gen_op_adcl_T0_T1_cc();
       
  7043         else
       
  7044             gen_adc_T0_T1();
       
  7045         break;
       
  7046     case 11: /* sbc */
       
  7047         if (conds)
       
  7048             gen_op_sbcl_T0_T1_cc();
       
  7049         else
       
  7050             gen_sbc_T0_T1();
       
  7051         break;
       
  7052     case 13: /* sub */
       
  7053         if (conds)
       
  7054             gen_op_subl_T0_T1_cc();
       
  7055         else
       
  7056             gen_op_subl_T0_T1();
       
  7057         break;
       
  7058     case 14: /* rsb */
       
  7059         if (conds)
       
  7060             gen_op_rsbl_T0_T1_cc();
       
  7061         else
       
  7062             gen_op_rsbl_T0_T1();
       
  7063         break;
       
  7064     default: /* 5, 6, 7, 9, 12, 15. */
       
  7065         return 1;
       
  7066     }
       
  7067     if (logic_cc) {
       
  7068         gen_op_logic_T0_cc();
       
  7069         if (shifter_out)
       
  7070             gen_set_CF_bit31(cpu_T[1]);
       
  7071     }
       
  7072     return 0;
       
  7073 }
       
  7074 
       
  7075 /* Translate a 32-bit thumb instruction.  Returns nonzero if the instruction
       
  7076    is not legal.  */
       
  7077 static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
       
  7078 {
       
  7079     uint32_t insn, imm, shift, offset;
       
  7080     uint32_t rd, rn, rm, rs;
       
  7081     TCGv tmp;
       
  7082     TCGv tmp2;
       
  7083     TCGv tmp3;
       
  7084     TCGv addr;
       
  7085     TCGv_i64 tmp64;
       
  7086     int op;
       
  7087     int shiftop;
       
  7088     int conds;
       
  7089     int logic_cc;
       
  7090 
       
  7091     if (!(arm_feature(env, ARM_FEATURE_THUMB2)
       
  7092           || arm_feature (env, ARM_FEATURE_M))) {
       
  7093         /* Thumb-1 cores may need to treat bl and blx as a pair of
       
  7094            16-bit instructions to get correct prefetch abort behavior.  */
       
  7095         insn = insn_hw1;
       
  7096         if ((insn & (1 << 12)) == 0) {
       
  7097             /* Second half of blx.  */
       
  7098             offset = ((insn & 0x7ff) << 1);
       
  7099             tmp = load_reg(s, 14);
       
  7100             tcg_gen_addi_i32(tmp, tmp, offset);
       
  7101             tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
       
  7102 
       
  7103             tmp2 = new_tmp();
       
  7104             tcg_gen_movi_i32(tmp2, s->pc | 1);
       
  7105             store_reg(s, 14, tmp2);
       
  7106             gen_bx(s, tmp);
       
  7107             return 0;
       
  7108         }
       
  7109         if (insn & (1 << 11)) {
       
  7110             /* Second half of bl.  */
       
  7111             offset = ((insn & 0x7ff) << 1) | 1;
       
  7112             tmp = load_reg(s, 14);
       
  7113             tcg_gen_addi_i32(tmp, tmp, offset);
       
  7114 
       
  7115             tmp2 = new_tmp();
       
  7116             tcg_gen_movi_i32(tmp2, s->pc | 1);
       
  7117             store_reg(s, 14, tmp2);
       
  7118             gen_bx(s, tmp);
       
  7119             return 0;
       
  7120         }
       
  7121         if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
       
  7122             /* Instruction spans a page boundary.  Implement it as two
       
  7123                16-bit instructions in case the second half causes an
       
  7124                prefetch abort.  */
       
  7125             offset = ((int32_t)insn << 21) >> 9;
       
  7126             gen_op_movl_T0_im(s->pc + 2 + offset);
       
  7127             gen_movl_reg_T0(s, 14);
       
  7128             return 0;
       
  7129         }
       
  7130         /* Fall through to 32-bit decode.  */
       
  7131     }
       
  7132 
       
  7133     insn = lduw_code(s->pc);
       
  7134     if (env->bswap_code)
       
  7135         insn = bswap16(insn);
       
  7136     s->pc += 2;
       
  7137     insn |= (uint32_t)insn_hw1 << 16;
       
  7138 
       
  7139     if ((insn & 0xf800e800) != 0xf000e800) {
       
  7140         ARCH(6T2);
       
  7141     }
       
  7142 
       
  7143     rn = (insn >> 16) & 0xf;
       
  7144     rs = (insn >> 12) & 0xf;
       
  7145     rd = (insn >> 8) & 0xf;
       
  7146     rm = insn & 0xf;
       
  7147     switch ((insn >> 25) & 0xf) {
       
  7148     case 0: case 1: case 2: case 3:
       
  7149         /* 16-bit instructions.  Should never happen.  */
       
  7150         abort();
       
  7151     case 4:
       
  7152         if (insn & (1 << 22)) {
       
  7153             /* Other load/store, table branch.  */
       
  7154             if (insn & 0x01200000) {
       
  7155                 /* Load/store doubleword.  */
       
  7156                 if (rn == 15) {
       
  7157                     addr = new_tmp();
       
  7158                     tcg_gen_movi_i32(addr, s->pc & ~3);
       
  7159                 } else {
       
  7160                     addr = load_reg(s, rn);
       
  7161                 }
       
  7162                 offset = (insn & 0xff) * 4;
       
  7163                 if ((insn & (1 << 23)) == 0)
       
  7164                     offset = -offset;
       
  7165                 if (insn & (1 << 24)) {
       
  7166                     tcg_gen_addi_i32(addr, addr, offset);
       
  7167                     offset = 0;
       
  7168                 }
       
  7169                 if (insn & (1 << 20)) {
       
  7170                     /* ldrd */
       
  7171                     tmp = gen_ld32(addr, IS_USER(s));
       
  7172                     store_reg(s, rs, tmp);
       
  7173                     tcg_gen_addi_i32(addr, addr, 4);
       
  7174                     tmp = gen_ld32(addr, IS_USER(s));
       
  7175                     store_reg(s, rd, tmp);
       
  7176                 } else {
       
  7177                     /* strd */
       
  7178                     tmp = load_reg(s, rs);
       
  7179                     gen_st32(tmp, addr, IS_USER(s));
       
  7180                     tcg_gen_addi_i32(addr, addr, 4);
       
  7181                     tmp = load_reg(s, rd);
       
  7182                     gen_st32(tmp, addr, IS_USER(s));
       
  7183                 }
       
  7184                 if (insn & (1 << 21)) {
       
  7185                     /* Base writeback.  */
       
  7186                     if (rn == 15)
       
  7187                         goto illegal_op;
       
  7188                     tcg_gen_addi_i32(addr, addr, offset - 4);
       
  7189                     store_reg(s, rn, addr);
       
  7190                 } else {
       
  7191                     dead_tmp(addr);
       
  7192                 }
       
  7193             } else if ((insn & (1 << 23)) == 0) {
       
  7194                 /* Load/store exclusive word.  */
       
  7195                 gen_movl_T1_reg(s, rn);
       
  7196                 addr = cpu_T[1];
       
  7197                 if (insn & (1 << 20)) {
       
  7198                     gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
       
  7199                     tmp = gen_ld32(addr, IS_USER(s));
       
  7200                     store_reg(s, rd, tmp);
       
  7201                 } else {
       
  7202                     int label = gen_new_label();
       
  7203                     gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
       
  7204                     tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0],
       
  7205                                         0, label);
       
  7206                     tmp = load_reg(s, rs);
       
  7207                     gen_st32(tmp, cpu_T[1], IS_USER(s));
       
  7208                     gen_set_label(label);
       
  7209                     gen_movl_reg_T0(s, rd);
       
  7210                 }
       
  7211             } else if ((insn & (1 << 6)) == 0) {
       
  7212                 /* Table Branch.  */
       
  7213                 if (rn == 15) {
       
  7214                     addr = new_tmp();
       
  7215                     tcg_gen_movi_i32(addr, s->pc);
       
  7216                 } else {
       
  7217                     addr = load_reg(s, rn);
       
  7218                 }
       
  7219                 tmp = load_reg(s, rm);
       
  7220                 tcg_gen_add_i32(addr, addr, tmp);
       
  7221                 if (insn & (1 << 4)) {
       
  7222                     /* tbh */
       
  7223                     tcg_gen_add_i32(addr, addr, tmp);
       
  7224                     dead_tmp(tmp);
       
  7225                     tmp = gen_ld16u(addr, IS_USER(s));
       
  7226                 } else { /* tbb */
       
  7227                     dead_tmp(tmp);
       
  7228                     tmp = gen_ld8u(addr, IS_USER(s));
       
  7229                 }
       
  7230                 dead_tmp(addr);
       
  7231                 tcg_gen_shli_i32(tmp, tmp, 1);
       
  7232                 tcg_gen_addi_i32(tmp, tmp, s->pc);
       
  7233                 store_reg(s, 15, tmp);
       
  7234             } else {
       
  7235                 /* Load/store exclusive byte/halfword/doubleword.  */
       
  7236                 /* ??? These are not really atomic.  However we know
       
  7237                    we never have multiple CPUs running in parallel,
       
  7238                    so it is good enough.  */
       
  7239                 op = (insn >> 4) & 0x3;
       
  7240                 /* Must use a global reg for the address because we have
       
  7241                    a conditional branch in the store instruction.  */
       
  7242                 gen_movl_T1_reg(s, rn);
       
  7243                 addr = cpu_T[1];
       
  7244                 if (insn & (1 << 20)) {
       
  7245                     gen_helper_mark_exclusive(cpu_env, addr);
       
  7246                     switch (op) {
       
  7247                     case 0:
       
  7248                         tmp = gen_ld8u(addr, IS_USER(s));
       
  7249                         break;
       
  7250                     case 1:
       
  7251                         tmp = gen_ld16u(addr, IS_USER(s));
       
  7252                         break;
       
  7253                     case 3:
       
  7254                         tmp = gen_ld32(addr, IS_USER(s));
       
  7255                         tcg_gen_addi_i32(addr, addr, 4);
       
  7256                         tmp2 = gen_ld32(addr, IS_USER(s));
       
  7257                         store_reg(s, rd, tmp2);
       
  7258                         break;
       
  7259                     default:
       
  7260                         goto illegal_op;
       
  7261                     }
       
  7262                     store_reg(s, rs, tmp);
       
  7263                 } else {
       
  7264                     int label = gen_new_label();
       
  7265                     /* Must use a global that is not killed by the branch.  */
       
  7266                     gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
       
  7267                     tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0], 0, label);
       
  7268                     tmp = load_reg(s, rs);
       
  7269                     switch (op) {
       
  7270                     case 0:
       
  7271                         gen_st8(tmp, addr, IS_USER(s));
       
  7272                         break;
       
  7273                     case 1:
       
  7274                         gen_st16(tmp, addr, IS_USER(s));
       
  7275                         break;
       
  7276                     case 3:
       
  7277                         gen_st32(tmp, addr, IS_USER(s));
       
  7278                         tcg_gen_addi_i32(addr, addr, 4);
       
  7279                         tmp = load_reg(s, rd);
       
  7280                         gen_st32(tmp, addr, IS_USER(s));
       
  7281                         break;
       
  7282                     default:
       
  7283                         goto illegal_op;
       
  7284                     }
       
  7285                     gen_set_label(label);
       
  7286                     gen_movl_reg_T0(s, rm);
       
  7287                 }
       
  7288             }
       
  7289         } else {
       
  7290             /* Load/store multiple, RFE, SRS.  */
       
  7291             if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
       
  7292                 /* Not available in user mode.  */
       
  7293                 if (IS_USER(s))
       
  7294                     goto illegal_op;
       
  7295                 if (insn & (1 << 20)) {
       
  7296                     /* rfe */
       
  7297                     addr = load_reg(s, rn);
       
  7298                     if ((insn & (1 << 24)) == 0)
       
  7299                         tcg_gen_addi_i32(addr, addr, -8);
       
  7300                     /* Load PC into tmp and CPSR into tmp2.  */
       
  7301                     tmp = gen_ld32(addr, 0);
       
  7302                     tcg_gen_addi_i32(addr, addr, 4);
       
  7303                     tmp2 = gen_ld32(addr, 0);
       
  7304                     if (insn & (1 << 21)) {
       
  7305                         /* Base writeback.  */
       
  7306                         if (insn & (1 << 24)) {
       
  7307                             tcg_gen_addi_i32(addr, addr, 4);
       
  7308                         } else {
       
  7309                             tcg_gen_addi_i32(addr, addr, -4);
       
  7310                         }
       
  7311                         store_reg(s, rn, addr);
       
  7312                     } else {
       
  7313                         dead_tmp(addr);
       
  7314                     }
       
  7315                     gen_rfe(s, tmp, tmp2);
       
  7316                 } else {
       
  7317                     /* srs */
       
  7318                     op = (insn & 0x1f);
       
  7319                     if (op == (env->uncached_cpsr & CPSR_M)) {
       
  7320                         addr = load_reg(s, 13);
       
  7321                     } else {
       
  7322                         addr = new_tmp();
       
  7323                         gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op));
       
  7324                     }
       
  7325                     if ((insn & (1 << 24)) == 0) {
       
  7326                         tcg_gen_addi_i32(addr, addr, -8);
       
  7327                     }
       
  7328                     tmp = load_reg(s, 14);
       
  7329                     gen_st32(tmp, addr, 0);
       
  7330                     tcg_gen_addi_i32(addr, addr, 4);
       
  7331                     tmp = new_tmp();
       
  7332                     gen_helper_cpsr_read(tmp);
       
  7333                     gen_st32(tmp, addr, 0);
       
  7334                     if (insn & (1 << 21)) {
       
  7335                         if ((insn & (1 << 24)) == 0) {
       
  7336                             tcg_gen_addi_i32(addr, addr, -4);
       
  7337                         } else {
       
  7338                             tcg_gen_addi_i32(addr, addr, 4);
       
  7339                         }
       
  7340                         if (op == (env->uncached_cpsr & CPSR_M)) {
       
  7341                             store_reg(s, 13, addr);
       
  7342                         } else {
       
  7343                             gen_helper_set_r13_banked(cpu_env,
       
  7344                                 tcg_const_i32(op), addr);
       
  7345                         }
       
  7346                     } else {
       
  7347                         dead_tmp(addr);
       
  7348                     }
       
  7349                 }
       
  7350             } else {
       
  7351                 int i;
       
  7352                 /* Load/store multiple.  */
       
  7353                 addr = load_reg(s, rn);
       
  7354                 offset = 0;
       
  7355                 for (i = 0; i < 16; i++) {
       
  7356                     if (insn & (1 << i))
       
  7357                         offset += 4;
       
  7358                 }
       
  7359                 if (insn & (1 << 24)) {
       
  7360                     tcg_gen_addi_i32(addr, addr, -offset);
       
  7361                 }
       
  7362 
       
  7363                 for (i = 0; i < 16; i++) {
       
  7364                     if ((insn & (1 << i)) == 0)
       
  7365                         continue;
       
  7366                     if (insn & (1 << 20)) {
       
  7367                         /* Load.  */
       
  7368                         tmp = gen_ld32(addr, IS_USER(s));
       
  7369                         if (i == 15) {
       
  7370                             gen_bx(s, tmp);
       
  7371                         } else {
       
  7372                             store_reg(s, i, tmp);
       
  7373                         }
       
  7374                     } else {
       
  7375                         /* Store.  */
       
  7376                         tmp = load_reg(s, i);
       
  7377                         gen_st32(tmp, addr, IS_USER(s));
       
  7378                     }
       
  7379                     tcg_gen_addi_i32(addr, addr, 4);
       
  7380                 }
       
  7381                 if (insn & (1 << 21)) {
       
  7382                     /* Base register writeback.  */
       
  7383                     if (insn & (1 << 24)) {
       
  7384                         tcg_gen_addi_i32(addr, addr, -offset);
       
  7385                     }
       
  7386                     /* Fault if writeback register is in register list.  */
       
  7387                     if (insn & (1 << rn))
       
  7388                         goto illegal_op;
       
  7389                     store_reg(s, rn, addr);
       
  7390                 } else {
       
  7391                     dead_tmp(addr);
       
  7392                 }
       
  7393             }
       
  7394         }
       
  7395         break;
       
  7396     case 5: /* Data processing register constant shift.  */
       
  7397         if (rn == 15)
       
  7398             gen_op_movl_T0_im(0);
       
  7399         else
       
  7400             gen_movl_T0_reg(s, rn);
       
  7401         gen_movl_T1_reg(s, rm);
       
  7402         op = (insn >> 21) & 0xf;
       
  7403         shiftop = (insn >> 4) & 3;
       
  7404         shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
       
  7405         conds = (insn & (1 << 20)) != 0;
       
  7406         logic_cc = (conds && thumb2_logic_op(op));
       
  7407         gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc);
       
  7408         if (gen_thumb2_data_op(s, op, conds, 0))
       
  7409             goto illegal_op;
       
  7410         if (rd != 15)
       
  7411             gen_movl_reg_T0(s, rd);
       
  7412         break;
       
  7413     case 13: /* Misc data processing.  */
       
  7414         op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
       
  7415         if (op < 4 && (insn & 0xf000) != 0xf000)
       
  7416             goto illegal_op;
       
  7417         switch (op) {
       
  7418         case 0: /* Register controlled shift.  */
       
  7419             tmp = load_reg(s, rn);
       
  7420             tmp2 = load_reg(s, rm);
       
  7421             if ((insn & 0x70) != 0)
       
  7422                 goto illegal_op;
       
  7423             op = (insn >> 21) & 3;
       
  7424             logic_cc = (insn & (1 << 20)) != 0;
       
  7425             gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
       
  7426             if (logic_cc)
       
  7427                 gen_logic_CC(tmp);
       
  7428             store_reg(s, rd, tmp);
       
  7429             break;
       
  7430         case 1: /* Sign/zero extend.  */
       
  7431             tmp = load_reg(s, rm);
       
  7432             shift = (insn >> 4) & 3;
       
  7433             /* ??? In many cases it's not neccessary to do a
       
  7434                rotate, a shift is sufficient.  */
       
  7435             if (shift != 0)
       
  7436                 tcg_gen_rori_i32(tmp, tmp, shift * 8);
       
  7437             op = (insn >> 20) & 7;
       
  7438             switch (op) {
       
  7439             case 0: gen_sxth(tmp);   break;
       
  7440             case 1: gen_uxth(tmp);   break;
       
  7441             case 2: gen_sxtb16(tmp); break;
       
  7442             case 3: gen_uxtb16(tmp); break;
       
  7443             case 4: gen_sxtb(tmp);   break;
       
  7444             case 5: gen_uxtb(tmp);   break;
       
  7445             default: goto illegal_op;
       
  7446             }
       
  7447             if (rn != 15) {
       
  7448                 tmp2 = load_reg(s, rn);
       
  7449                 if ((op >> 1) == 1) {
       
  7450                     gen_add16(tmp, tmp2);
       
  7451                 } else {
       
  7452                     tcg_gen_add_i32(tmp, tmp, tmp2);
       
  7453                     dead_tmp(tmp2);
       
  7454                 }
       
  7455             }
       
  7456             store_reg(s, rd, tmp);
       
  7457             break;
       
  7458         case 2: /* SIMD add/subtract.  */
       
  7459             op = (insn >> 20) & 7;
       
  7460             shift = (insn >> 4) & 7;
       
  7461             if ((op & 3) == 3 || (shift & 3) == 3)
       
  7462                 goto illegal_op;
       
  7463             tmp = load_reg(s, rn);
       
  7464             tmp2 = load_reg(s, rm);
       
  7465             gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
       
  7466             dead_tmp(tmp2);
       
  7467             store_reg(s, rd, tmp);
       
  7468             break;
       
  7469         case 3: /* Other data processing.  */
       
  7470             op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
       
  7471             if (op < 4) {
       
  7472                 /* Saturating add/subtract.  */
       
  7473                 tmp = load_reg(s, rn);
       
  7474                 tmp2 = load_reg(s, rm);
       
  7475                 if (op & 2)
       
  7476                     gen_helper_double_saturate(tmp, tmp);
       
  7477                 if (op & 1)
       
  7478                     gen_helper_sub_saturate(tmp, tmp2, tmp);
       
  7479                 else
       
  7480                     gen_helper_add_saturate(tmp, tmp, tmp2);
       
  7481                 dead_tmp(tmp2);
       
  7482             } else {
       
  7483                 tmp = load_reg(s, rn);
       
  7484                 switch (op) {
       
  7485                 case 0x0a: /* rbit */
       
  7486                     gen_helper_rbit(tmp, tmp);
       
  7487                     break;
       
  7488                 case 0x08: /* rev */
       
  7489                     tcg_gen_bswap_i32(tmp, tmp);
       
  7490                     break;
       
  7491                 case 0x09: /* rev16 */
       
  7492                     gen_rev16(tmp);
       
  7493                     break;
       
  7494                 case 0x0b: /* revsh */
       
  7495                     gen_revsh(tmp);
       
  7496                     break;
       
  7497                 case 0x10: /* sel */
       
  7498                     tmp2 = load_reg(s, rm);
       
  7499                     tmp3 = new_tmp();
       
  7500                     tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
       
  7501                     gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
       
  7502                     dead_tmp(tmp3);
       
  7503                     dead_tmp(tmp2);
       
  7504                     break;
       
  7505                 case 0x18: /* clz */
       
  7506                     gen_helper_clz(tmp, tmp);
       
  7507                     break;
       
  7508                 default:
       
  7509                     goto illegal_op;
       
  7510                 }
       
  7511             }
       
  7512             store_reg(s, rd, tmp);
       
  7513             break;
       
  7514         case 4: case 5: /* 32-bit multiply.  Sum of absolute differences.  */
       
  7515             op = (insn >> 4) & 0xf;
       
  7516             tmp = load_reg(s, rn);
       
  7517             tmp2 = load_reg(s, rm);
       
  7518             switch ((insn >> 20) & 7) {
       
  7519             case 0: /* 32 x 32 -> 32 */
       
  7520                 tcg_gen_mul_i32(tmp, tmp, tmp2);
       
  7521                 dead_tmp(tmp2);
       
  7522                 if (rs != 15) {
       
  7523                     tmp2 = load_reg(s, rs);
       
  7524                     if (op)
       
  7525                         tcg_gen_sub_i32(tmp, tmp2, tmp);
       
  7526                     else
       
  7527                         tcg_gen_add_i32(tmp, tmp, tmp2);
       
  7528                     dead_tmp(tmp2);
       
  7529                 }
       
  7530                 break;
       
  7531             case 1: /* 16 x 16 -> 32 */
       
  7532                 gen_mulxy(tmp, tmp2, op & 2, op & 1);
       
  7533                 dead_tmp(tmp2);
       
  7534                 if (rs != 15) {
       
  7535                     tmp2 = load_reg(s, rs);
       
  7536                     gen_helper_add_setq(tmp, tmp, tmp2);
       
  7537                     dead_tmp(tmp2);
       
  7538                 }
       
  7539                 break;
       
  7540             case 2: /* Dual multiply add.  */
       
  7541             case 4: /* Dual multiply subtract.  */
       
  7542                 if (op)
       
  7543                     gen_swap_half(tmp2);
       
  7544                 gen_smul_dual(tmp, tmp2);
       
  7545                 /* This addition cannot overflow.  */
       
  7546                 if (insn & (1 << 22)) {
       
  7547                     tcg_gen_sub_i32(tmp, tmp, tmp2);
       
  7548                 } else {
       
  7549                     tcg_gen_add_i32(tmp, tmp, tmp2);
       
  7550                 }
       
  7551                 dead_tmp(tmp2);
       
  7552                 if (rs != 15)
       
  7553                   {
       
  7554                     tmp2 = load_reg(s, rs);
       
  7555                     gen_helper_add_setq(tmp, tmp, tmp2);
       
  7556                     dead_tmp(tmp2);
       
  7557                   }
       
  7558                 break;
       
  7559             case 3: /* 32 * 16 -> 32msb */
       
  7560                 if (op)
       
  7561                     tcg_gen_sari_i32(tmp2, tmp2, 16);
       
  7562                 else
       
  7563                     gen_sxth(tmp2);
       
  7564                 tmp64 = gen_muls_i64_i32(tmp, tmp2);
       
  7565                 tcg_gen_shri_i64(tmp64, tmp64, 16);
       
  7566                 tmp = new_tmp();
       
  7567                 tcg_gen_trunc_i64_i32(tmp, tmp64);
       
  7568                 if (rs != 15)
       
  7569                   {
       
  7570                     tmp2 = load_reg(s, rs);
       
  7571                     gen_helper_add_setq(tmp, tmp, tmp2);
       
  7572                     dead_tmp(tmp2);
       
  7573                   }
       
  7574                 break;
       
  7575             case 5: case 6: /* 32 * 32 -> 32msb */
       
  7576                 gen_imull(tmp, tmp2);
       
  7577                 if (insn & (1 << 5)) {
       
  7578                     gen_roundqd(tmp, tmp2);
       
  7579                     dead_tmp(tmp2);
       
  7580                 } else {
       
  7581                     dead_tmp(tmp);
       
  7582                     tmp = tmp2;
       
  7583                 }
       
  7584                 if (rs != 15) {
       
  7585                     tmp2 = load_reg(s, rs);
       
  7586                     if (insn & (1 << 21)) {
       
  7587                         tcg_gen_add_i32(tmp, tmp, tmp2);
       
  7588                     } else {
       
  7589                         tcg_gen_sub_i32(tmp, tmp2, tmp);
       
  7590                     }
       
  7591                     dead_tmp(tmp2);
       
  7592                 }
       
  7593                 break;
       
  7594             case 7: /* Unsigned sum of absolute differences.  */
       
  7595                 gen_helper_usad8(tmp, tmp, tmp2);
       
  7596                 dead_tmp(tmp2);
       
  7597                 if (rs != 15) {
       
  7598                     tmp2 = load_reg(s, rs);
       
  7599                     tcg_gen_add_i32(tmp, tmp, tmp2);
       
  7600                     dead_tmp(tmp2);
       
  7601                 }
       
  7602                 break;
       
  7603             }
       
  7604             store_reg(s, rd, tmp);
       
  7605             break;
       
  7606         case 6: case 7: /* 64-bit multiply, Divide.  */
       
  7607             op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
       
  7608             tmp = load_reg(s, rn);
       
  7609             tmp2 = load_reg(s, rm);
       
  7610             if ((op & 0x50) == 0x10) {
       
  7611                 /* sdiv, udiv */
       
  7612                 if (!arm_feature(env, ARM_FEATURE_DIV))
       
  7613                     goto illegal_op;
       
  7614                 if (op & 0x20)
       
  7615                     gen_helper_udiv(tmp, tmp, tmp2);
       
  7616                 else
       
  7617                     gen_helper_sdiv(tmp, tmp, tmp2);
       
  7618                 dead_tmp(tmp2);
       
  7619                 store_reg(s, rd, tmp);
       
  7620             } else if ((op & 0xe) == 0xc) {
       
  7621                 /* Dual multiply accumulate long.  */
       
  7622                 if (op & 1)
       
  7623                     gen_swap_half(tmp2);
       
  7624                 gen_smul_dual(tmp, tmp2);
       
  7625                 if (op & 0x10) {
       
  7626                     tcg_gen_sub_i32(tmp, tmp, tmp2);
       
  7627                 } else {
       
  7628                     tcg_gen_add_i32(tmp, tmp, tmp2);
       
  7629                 }
       
  7630                 dead_tmp(tmp2);
       
  7631                 /* BUGFIX */
       
  7632                 tmp64 = tcg_temp_new_i64();
       
  7633                 tcg_gen_ext_i32_i64(tmp64, tmp);
       
  7634                 dead_tmp(tmp);
       
  7635                 gen_addq(s, tmp64, rs, rd);
       
  7636                 gen_storeq_reg(s, rs, rd, tmp64);
       
  7637             } else {
       
  7638                 if (op & 0x20) {
       
  7639                     /* Unsigned 64-bit multiply  */
       
  7640                     tmp64 = gen_mulu_i64_i32(tmp, tmp2);
       
  7641                 } else {
       
  7642                     if (op & 8) {
       
  7643                         /* smlalxy */
       
  7644                         gen_mulxy(tmp, tmp2, op & 2, op & 1);
       
  7645                         dead_tmp(tmp2);
       
  7646                         tmp64 = tcg_temp_new_i64();
       
  7647                         tcg_gen_ext_i32_i64(tmp64, tmp);
       
  7648                         dead_tmp(tmp);
       
  7649                     } else {
       
  7650                         /* Signed 64-bit multiply  */
       
  7651                         tmp64 = gen_muls_i64_i32(tmp, tmp2);
       
  7652                     }
       
  7653                 }
       
  7654                 if (op & 4) {
       
  7655                     /* umaal */
       
  7656                     gen_addq_lo(s, tmp64, rs);
       
  7657                     gen_addq_lo(s, tmp64, rd);
       
  7658                 } else if (op & 0x40) {
       
  7659                     /* 64-bit accumulate.  */
       
  7660                     gen_addq(s, tmp64, rs, rd);
       
  7661                 }
       
  7662                 gen_storeq_reg(s, rs, rd, tmp64);
       
  7663             }
       
  7664             break;
       
  7665         }
       
  7666         break;
       
  7667     case 6: case 7: case 14: case 15:
       
  7668         /* Coprocessor.  */
       
  7669         if (((insn >> 24) & 3) == 3) {
       
  7670             /* Translate into the equivalent ARM encoding.  */
       
  7671             insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
       
  7672             if (disas_neon_data_insn(env, s, insn))
       
  7673                 goto illegal_op;
       
  7674         } else {
       
  7675             if (insn & (1 << 28))
       
  7676                 goto illegal_op;
       
  7677             if (disas_coproc_insn (env, s, insn))
       
  7678                 goto illegal_op;
       
  7679         }
       
  7680         break;
       
  7681     case 8: case 9: case 10: case 11:
       
  7682         if (insn & (1 << 15)) {
       
  7683             /* Branches, misc control.  */
       
  7684             if (insn & 0x5000) {
       
  7685                 /* Unconditional branch.  */
       
  7686                 /* signextend(hw1[10:0]) -> offset[:12].  */
       
  7687                 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
       
  7688                 /* hw1[10:0] -> offset[11:1].  */
       
  7689                 offset |= (insn & 0x7ff) << 1;
       
  7690                 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
       
  7691                    offset[24:22] already have the same value because of the
       
  7692                    sign extension above.  */
       
  7693                 offset ^= ((~insn) & (1 << 13)) << 10;
       
  7694                 offset ^= ((~insn) & (1 << 11)) << 11;
       
  7695 
       
  7696                 if (insn & (1 << 14)) {
       
  7697                     /* Branch and link.  */
       
  7698                     gen_op_movl_T1_im(s->pc | 1);
       
  7699                     gen_movl_reg_T1(s, 14);
       
  7700                 }
       
  7701 
       
  7702                 offset += s->pc;
       
  7703                 if (insn & (1 << 12)) {
       
  7704                     /* b/bl */
       
  7705                     gen_jmp(s, offset);
       
  7706                 } else {
       
  7707                     /* blx */
       
  7708                     offset &= ~(uint32_t)2;
       
  7709                     gen_bx_im(s, offset);
       
  7710                 }
       
  7711             } else if (((insn >> 23) & 7) == 7) {
       
  7712                 /* Misc control */
       
  7713                 if (insn & (1 << 13))
       
  7714                     goto illegal_op;
       
  7715 
       
  7716                 if (insn & (1 << 26)) {
       
  7717                     /* Secure monitor call (v6Z) */
       
  7718                     goto illegal_op; /* not implemented.  */
       
  7719                 } else {
       
  7720                     op = (insn >> 20) & 7;
       
  7721                     switch (op) {
       
  7722                     case 0: /* msr cpsr.  */
       
  7723                         if (IS_M(env)) {
       
  7724                             tmp = load_reg(s, rn);
       
  7725                             addr = tcg_const_i32(insn & 0xff);
       
  7726                             gen_helper_v7m_msr(cpu_env, addr, tmp);
       
  7727                             gen_lookup_tb(s);
       
  7728                             break;
       
  7729                         }
       
  7730                         /* fall through */
       
  7731                     case 1: /* msr spsr.  */
       
  7732                         if (IS_M(env))
       
  7733                             goto illegal_op;
       
  7734                         gen_movl_T0_reg(s, rn);
       
  7735                         if (gen_set_psr_T0(s,
       
  7736                               msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
       
  7737                               op == 1))
       
  7738                             goto illegal_op;
       
  7739                         break;
       
  7740                     case 2: /* cps, nop-hint.  */
       
  7741                         if (((insn >> 8) & 7) == 0) {
       
  7742                             gen_nop_hint(s, insn & 0xff);
       
  7743                         }
       
  7744                         /* Implemented as NOP in user mode.  */
       
  7745                         if (IS_USER(s))
       
  7746                             break;
       
  7747                         offset = 0;
       
  7748                         imm = 0;
       
  7749                         if (insn & (1 << 10)) {
       
  7750                             if (insn & (1 << 7))
       
  7751                                 offset |= CPSR_A;
       
  7752                             if (insn & (1 << 6))
       
  7753                                 offset |= CPSR_I;
       
  7754                             if (insn & (1 << 5))
       
  7755                                 offset |= CPSR_F;
       
  7756                             if (insn & (1 << 9))
       
  7757                                 imm = CPSR_A | CPSR_I | CPSR_F;
       
  7758                         }
       
  7759                         if (insn & (1 << 8)) {
       
  7760                             offset |= 0x1f;
       
  7761                             imm |= (insn & 0x1f);
       
  7762                         }
       
  7763                         if (offset) {
       
  7764                             gen_op_movl_T0_im(imm);
       
  7765                             gen_set_psr_T0(s, offset, 0);
       
  7766                         }
       
  7767                         break;
       
  7768                     case 3: /* Special control operations.  */
       
  7769                         op = (insn >> 4) & 0xf;
       
  7770                         switch (op) {
       
  7771                         case 2: /* clrex */
       
  7772                             gen_helper_clrex(cpu_env);
       
  7773                             break;
       
  7774                         case 4: /* dsb */
       
  7775                         case 5: /* dmb */
       
  7776                         case 6: /* isb */
       
  7777                             /* These execute as NOPs.  */
       
  7778                             ARCH(7);
       
  7779                             break;
       
  7780                         default:
       
  7781                             goto illegal_op;
       
  7782                         }
       
  7783                         break;
       
  7784                     case 4: /* bxj */
       
  7785                         /* Trivial implementation equivalent to bx.  */
       
  7786                         tmp = load_reg(s, rn);
       
  7787                         gen_bx(s, tmp);
       
  7788                         break;
       
  7789                     case 5: /* Exception return.  */
       
  7790                         /* Unpredictable in user mode.  */
       
  7791                         goto illegal_op;
       
  7792                     case 6: /* mrs cpsr.  */
       
  7793                         tmp = new_tmp();
       
  7794                         if (IS_M(env)) {
       
  7795                             addr = tcg_const_i32(insn & 0xff);
       
  7796                             gen_helper_v7m_mrs(tmp, cpu_env, addr);
       
  7797                         } else {
       
  7798                             gen_helper_cpsr_read(tmp);
       
  7799                         }
       
  7800                         store_reg(s, rd, tmp);
       
  7801                         break;
       
  7802                     case 7: /* mrs spsr.  */
       
  7803                         /* Not accessible in user mode.  */
       
  7804                         if (IS_USER(s) || IS_M(env))
       
  7805                             goto illegal_op;
       
  7806                         tmp = load_cpu_field(spsr);
       
  7807                         store_reg(s, rd, tmp);
       
  7808                         break;
       
  7809                     }
       
  7810                 }
       
  7811             } else {
       
  7812                 /* Conditional branch.  */
       
  7813                 op = (insn >> 22) & 0xf;
       
  7814                 /* Generate a conditional jump to next instruction.  */
       
  7815                 s->condlabel = gen_new_label();
       
  7816                 gen_test_cc(op ^ 1, s->condlabel);
       
  7817                 s->condjmp = 1;
       
  7818 
       
  7819                 /* offset[11:1] = insn[10:0] */
       
  7820                 offset = (insn & 0x7ff) << 1;
       
  7821                 /* offset[17:12] = insn[21:16].  */
       
  7822                 offset |= (insn & 0x003f0000) >> 4;
       
  7823                 /* offset[31:20] = insn[26].  */
       
  7824                 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
       
  7825                 /* offset[18] = insn[13].  */
       
  7826                 offset |= (insn & (1 << 13)) << 5;
       
  7827                 /* offset[19] = insn[11].  */
       
  7828                 offset |= (insn & (1 << 11)) << 8;
       
  7829 
       
  7830                 /* jump to the offset */
       
  7831                 gen_jmp(s, s->pc + offset);
       
  7832             }
       
  7833         } else {
       
  7834             /* Data processing immediate.  */
       
  7835             if (insn & (1 << 25)) {
       
  7836                 if (insn & (1 << 24)) {
       
  7837                     if (insn & (1 << 20))
       
  7838                         goto illegal_op;
       
  7839                     /* Bitfield/Saturate.  */
       
  7840                     op = (insn >> 21) & 7;
       
  7841                     imm = insn & 0x1f;
       
  7842                     shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
       
  7843                     if (rn == 15) {
       
  7844                         tmp = new_tmp();
       
  7845                         tcg_gen_movi_i32(tmp, 0);
       
  7846                     } else {
       
  7847                         tmp = load_reg(s, rn);
       
  7848                     }
       
  7849                     switch (op) {
       
  7850                     case 2: /* Signed bitfield extract.  */
       
  7851                         imm++;
       
  7852                         if (shift + imm > 32)
       
  7853                             goto illegal_op;
       
  7854                         if (imm < 32)
       
  7855                             gen_sbfx(tmp, shift, imm);
       
  7856                         break;
       
  7857                     case 6: /* Unsigned bitfield extract.  */
       
  7858                         imm++;
       
  7859                         if (shift + imm > 32)
       
  7860                             goto illegal_op;
       
  7861                         if (imm < 32)
       
  7862                             gen_ubfx(tmp, shift, (1u << imm) - 1);
       
  7863                         break;
       
  7864                     case 3: /* Bitfield insert/clear.  */
       
  7865                         if (imm < shift)
       
  7866                             goto illegal_op;
       
  7867                         imm = imm + 1 - shift;
       
  7868                         if (imm != 32) {
       
  7869                             tmp2 = load_reg(s, rd);
       
  7870                             gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
       
  7871                             dead_tmp(tmp2);
       
  7872                         }
       
  7873                         break;
       
  7874                     case 7:
       
  7875                         goto illegal_op;
       
  7876                     default: /* Saturate.  */
       
  7877                         if (shift) {
       
  7878                             if (op & 1)
       
  7879                                 tcg_gen_sari_i32(tmp, tmp, shift);
       
  7880                             else
       
  7881                                 tcg_gen_shli_i32(tmp, tmp, shift);
       
  7882                         }
       
  7883                         tmp2 = tcg_const_i32(imm);
       
  7884                         if (op & 4) {
       
  7885                             /* Unsigned.  */
       
  7886                             if ((op & 1) && shift == 0)
       
  7887                                 gen_helper_usat16(tmp, tmp, tmp2);
       
  7888                             else
       
  7889                                 gen_helper_usat(tmp, tmp, tmp2);
       
  7890                         } else {
       
  7891                             /* Signed.  */
       
  7892                             if ((op & 1) && shift == 0)
       
  7893                                 gen_helper_ssat16(tmp, tmp, tmp2);
       
  7894                             else
       
  7895                                 gen_helper_ssat(tmp, tmp, tmp2);
       
  7896                         }
       
  7897                         break;
       
  7898                     }
       
  7899                     store_reg(s, rd, tmp);
       
  7900                 } else {
       
  7901                     imm = ((insn & 0x04000000) >> 15)
       
  7902                           | ((insn & 0x7000) >> 4) | (insn & 0xff);
       
  7903                     if (insn & (1 << 22)) {
       
  7904                         /* 16-bit immediate.  */
       
  7905                         imm |= (insn >> 4) & 0xf000;
       
  7906                         if (insn & (1 << 23)) {
       
  7907                             /* movt */
       
  7908                             tmp = load_reg(s, rd);
       
  7909                             tcg_gen_ext16u_i32(tmp, tmp);
       
  7910                             tcg_gen_ori_i32(tmp, tmp, imm << 16);
       
  7911                         } else {
       
  7912                             /* movw */
       
  7913                             tmp = new_tmp();
       
  7914                             tcg_gen_movi_i32(tmp, imm);
       
  7915                         }
       
  7916                     } else {
       
  7917                         /* Add/sub 12-bit immediate.  */
       
  7918                         if (rn == 15) {
       
  7919                             offset = s->pc & ~(uint32_t)3;
       
  7920                             if (insn & (1 << 23))
       
  7921                                 offset -= imm;
       
  7922                             else
       
  7923                                 offset += imm;
       
  7924                             tmp = new_tmp();
       
  7925                             tcg_gen_movi_i32(tmp, offset);
       
  7926                         } else {
       
  7927                             tmp = load_reg(s, rn);
       
  7928                             if (insn & (1 << 23))
       
  7929                                 tcg_gen_subi_i32(tmp, tmp, imm);
       
  7930                             else
       
  7931                                 tcg_gen_addi_i32(tmp, tmp, imm);
       
  7932                         }
       
  7933                     }
       
  7934                     store_reg(s, rd, tmp);
       
  7935                 }
       
  7936             } else {
       
  7937                 int shifter_out = 0;
       
  7938                 /* modified 12-bit immediate.  */
       
  7939                 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
       
  7940                 imm = (insn & 0xff);
       
  7941                 switch (shift) {
       
  7942                 case 0: /* XY */
       
  7943                     /* Nothing to do.  */
       
  7944                     break;
       
  7945                 case 1: /* 00XY00XY */
       
  7946                     imm |= imm << 16;
       
  7947                     break;
       
  7948                 case 2: /* XY00XY00 */
       
  7949                     imm |= imm << 16;
       
  7950                     imm <<= 8;
       
  7951                     break;
       
  7952                 case 3: /* XYXYXYXY */
       
  7953                     imm |= imm << 16;
       
  7954                     imm |= imm << 8;
       
  7955                     break;
       
  7956                 default: /* Rotated constant.  */
       
  7957                     shift = (shift << 1) | (imm >> 7);
       
  7958                     imm |= 0x80;
       
  7959                     imm = imm << (32 - shift);
       
  7960                     shifter_out = 1;
       
  7961                     break;
       
  7962                 }
       
  7963                 gen_op_movl_T1_im(imm);
       
  7964                 rn = (insn >> 16) & 0xf;
       
  7965                 if (rn == 15)
       
  7966                     gen_op_movl_T0_im(0);
       
  7967                 else
       
  7968                     gen_movl_T0_reg(s, rn);
       
  7969                 op = (insn >> 21) & 0xf;
       
  7970                 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
       
  7971                                        shifter_out))
       
  7972                     goto illegal_op;
       
  7973                 rd = (insn >> 8) & 0xf;
       
  7974                 if (rd != 15) {
       
  7975                     gen_movl_reg_T0(s, rd);
       
  7976                 }
       
  7977             }
       
  7978         }
       
  7979         break;
       
  7980     case 12: /* Load/store single data item.  */
       
  7981         {
       
  7982         int postinc = 0;
       
  7983         int writeback = 0;
       
  7984         int user;
       
  7985         if ((insn & 0x01100000) == 0x01000000) {
       
  7986             if (disas_neon_ls_insn(env, s, insn))
       
  7987                 goto illegal_op;
       
  7988             break;
       
  7989         }
       
  7990         user = IS_USER(s);
       
  7991         if (rn == 15) {
       
  7992             addr = new_tmp();
       
  7993             /* PC relative.  */
       
  7994             /* s->pc has already been incremented by 4.  */
       
  7995             imm = s->pc & 0xfffffffc;
       
  7996             if (insn & (1 << 23))
       
  7997                 imm += insn & 0xfff;
       
  7998             else
       
  7999                 imm -= insn & 0xfff;
       
  8000             tcg_gen_movi_i32(addr, imm);
       
  8001         } else {
       
  8002             addr = load_reg(s, rn);
       
  8003             if (insn & (1 << 23)) {
       
  8004                 /* Positive offset.  */
       
  8005                 imm = insn & 0xfff;
       
  8006                 tcg_gen_addi_i32(addr, addr, imm);
       
  8007             } else {
       
  8008                 op = (insn >> 8) & 7;
       
  8009                 imm = insn & 0xff;
       
  8010                 switch (op) {
       
  8011                 case 0: case 8: /* Shifted Register.  */
       
  8012                     shift = (insn >> 4) & 0xf;
       
  8013                     if (shift > 3)
       
  8014                         goto illegal_op;
       
  8015                     tmp = load_reg(s, rm);
       
  8016                     if (shift)
       
  8017                         tcg_gen_shli_i32(tmp, tmp, shift);
       
  8018                     tcg_gen_add_i32(addr, addr, tmp);
       
  8019                     dead_tmp(tmp);
       
  8020                     break;
       
  8021                 case 4: /* Negative offset.  */
       
  8022                     tcg_gen_addi_i32(addr, addr, -imm);
       
  8023                     break;
       
  8024                 case 6: /* User privilege.  */
       
  8025                     tcg_gen_addi_i32(addr, addr, imm);
       
  8026                     user = 1;
       
  8027                     break;
       
  8028                 case 1: /* Post-decrement.  */
       
  8029                     imm = -imm;
       
  8030                     /* Fall through.  */
       
  8031                 case 3: /* Post-increment.  */
       
  8032                     postinc = 1;
       
  8033                     writeback = 1;
       
  8034                     break;
       
  8035                 case 5: /* Pre-decrement.  */
       
  8036                     imm = -imm;
       
  8037                     /* Fall through.  */
       
  8038                 case 7: /* Pre-increment.  */
       
  8039                     tcg_gen_addi_i32(addr, addr, imm);
       
  8040                     writeback = 1;
       
  8041                     break;
       
  8042                 default:
       
  8043                     goto illegal_op;
       
  8044                 }
       
  8045             }
       
  8046         }
       
  8047         op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
       
  8048         if (insn & (1 << 20)) {
       
  8049             /* Load.  */
       
  8050             if (rs == 15 && op != 2) {
       
  8051                 if (op & 2)
       
  8052                     goto illegal_op;
       
  8053                 /* Memory hint.  Implemented as NOP.  */
       
  8054             } else {
       
  8055                 switch (op) {
       
  8056                 case 0: tmp = gen_ld8u(addr, user); break;
       
  8057                 case 4: tmp = gen_ld8s(addr, user); break;
       
  8058                 case 1: tmp = gen_ld16u(addr, user); break;
       
  8059                 case 5: tmp = gen_ld16s(addr, user); break;
       
  8060                 case 2: tmp = gen_ld32(addr, user); break;
       
  8061                 default: goto illegal_op;
       
  8062                 }
       
  8063                 if (rs == 15) {
       
  8064                     gen_bx(s, tmp);
       
  8065                 } else {
       
  8066                     store_reg(s, rs, tmp);
       
  8067                 }
       
  8068             }
       
  8069         } else {
       
  8070             /* Store.  */
       
  8071             if (rs == 15)
       
  8072                 goto illegal_op;
       
  8073             tmp = load_reg(s, rs);
       
  8074             switch (op) {
       
  8075             case 0: gen_st8(tmp, addr, user); break;
       
  8076             case 1: gen_st16(tmp, addr, user); break;
       
  8077             case 2: gen_st32(tmp, addr, user); break;
       
  8078             default: goto illegal_op;
       
  8079             }
       
  8080         }
       
  8081         if (postinc)
       
  8082             tcg_gen_addi_i32(addr, addr, imm);
       
  8083         if (writeback) {
       
  8084             store_reg(s, rn, addr);
       
  8085         } else {
       
  8086             dead_tmp(addr);
       
  8087         }
       
  8088         }
       
  8089         break;
       
  8090     default:
       
  8091         goto illegal_op;
       
  8092     }
       
  8093     return 0;
       
  8094 illegal_op:
       
  8095     return 1;
       
  8096 }
       
  8097 
       
  8098 static void disas_thumb_insn(CPUState *env, DisasContext *s)
       
  8099 {
       
  8100     uint32_t val, insn, op, rm, rn, rd, shift, cond;
       
  8101     int32_t offset;
       
  8102     int i;
       
  8103     TCGv tmp;
       
  8104     TCGv tmp2;
       
  8105     TCGv addr;
       
  8106 
       
  8107     if (s->condexec_mask) {
       
  8108         cond = s->condexec_cond;
       
  8109         s->condlabel = gen_new_label();
       
  8110         gen_test_cc(cond ^ 1, s->condlabel);
       
  8111         s->condjmp = 1;
       
  8112     }
       
  8113 
       
  8114     insn = lduw_code(s->pc);
       
  8115     if (env->bswap_code)
       
  8116         insn = bswap16(insn);
       
  8117     s->pc += 2;
       
  8118 
       
  8119     switch (insn >> 12) {
       
  8120     case 0: case 1:
       
  8121         rd = insn & 7;
       
  8122         op = (insn >> 11) & 3;
       
  8123         if (op == 3) {
       
  8124             /* add/subtract */
       
  8125             rn = (insn >> 3) & 7;
       
  8126             gen_movl_T0_reg(s, rn);
       
  8127             if (insn & (1 << 10)) {
       
  8128                 /* immediate */
       
  8129                 gen_op_movl_T1_im((insn >> 6) & 7);
       
  8130             } else {
       
  8131                 /* reg */
       
  8132                 rm = (insn >> 6) & 7;
       
  8133                 gen_movl_T1_reg(s, rm);
       
  8134             }
       
  8135             if (insn & (1 << 9)) {
       
  8136                 if (s->condexec_mask)
       
  8137                     gen_op_subl_T0_T1();
       
  8138                 else
       
  8139                     gen_op_subl_T0_T1_cc();
       
  8140             } else {
       
  8141                 if (s->condexec_mask)
       
  8142                     gen_op_addl_T0_T1();
       
  8143                 else
       
  8144                     gen_op_addl_T0_T1_cc();
       
  8145             }
       
  8146             gen_movl_reg_T0(s, rd);
       
  8147         } else {
       
  8148             /* shift immediate */
       
  8149             rm = (insn >> 3) & 7;
       
  8150             shift = (insn >> 6) & 0x1f;
       
  8151             tmp = load_reg(s, rm);
       
  8152             gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
       
  8153             if (!s->condexec_mask)
       
  8154                 gen_logic_CC(tmp);
       
  8155             store_reg(s, rd, tmp);
       
  8156         }
       
  8157         break;
       
  8158     case 2: case 3:
       
  8159         /* arithmetic large immediate */
       
  8160         op = (insn >> 11) & 3;
       
  8161         rd = (insn >> 8) & 0x7;
       
  8162         if (op == 0) {
       
  8163             gen_op_movl_T0_im(insn & 0xff);
       
  8164         } else {
       
  8165             gen_movl_T0_reg(s, rd);
       
  8166             gen_op_movl_T1_im(insn & 0xff);
       
  8167         }
       
  8168         switch (op) {
       
  8169         case 0: /* mov */
       
  8170             if (!s->condexec_mask)
       
  8171                 gen_op_logic_T0_cc();
       
  8172             break;
       
  8173         case 1: /* cmp */
       
  8174             gen_op_subl_T0_T1_cc();
       
  8175             break;
       
  8176         case 2: /* add */
       
  8177             if (s->condexec_mask)
       
  8178                 gen_op_addl_T0_T1();
       
  8179             else
       
  8180                 gen_op_addl_T0_T1_cc();
       
  8181             break;
       
  8182         case 3: /* sub */
       
  8183             if (s->condexec_mask)
       
  8184                 gen_op_subl_T0_T1();
       
  8185             else
       
  8186                 gen_op_subl_T0_T1_cc();
       
  8187             break;
       
  8188         }
       
  8189         if (op != 1)
       
  8190             gen_movl_reg_T0(s, rd);
       
  8191         break;
       
  8192     case 4:
       
  8193         if (insn & (1 << 11)) {
       
  8194             rd = (insn >> 8) & 7;
       
  8195             /* load pc-relative.  Bit 1 of PC is ignored.  */
       
  8196             val = s->pc + 2 + ((insn & 0xff) * 4);
       
  8197             val &= ~(uint32_t)2;
       
  8198             addr = new_tmp();
       
  8199             tcg_gen_movi_i32(addr, val);
       
  8200             tmp = gen_ld32(addr, IS_USER(s));
       
  8201             dead_tmp(addr);
       
  8202             store_reg(s, rd, tmp);
       
  8203             break;
       
  8204         }
       
  8205         if (insn & (1 << 10)) {
       
  8206             /* data processing extended or blx */
       
  8207             rd = (insn & 7) | ((insn >> 4) & 8);
       
  8208             rm = (insn >> 3) & 0xf;
       
  8209             op = (insn >> 8) & 3;
       
  8210             switch (op) {
       
  8211             case 0: /* add */
       
  8212                 gen_movl_T0_reg(s, rd);
       
  8213                 gen_movl_T1_reg(s, rm);
       
  8214                 gen_op_addl_T0_T1();
       
  8215                 gen_movl_reg_T0(s, rd);
       
  8216                 break;
       
  8217             case 1: /* cmp */
       
  8218                 gen_movl_T0_reg(s, rd);
       
  8219                 gen_movl_T1_reg(s, rm);
       
  8220                 gen_op_subl_T0_T1_cc();
       
  8221                 break;
       
  8222             case 2: /* mov/cpy */
       
  8223                 gen_movl_T0_reg(s, rm);
       
  8224                 gen_movl_reg_T0(s, rd);
       
  8225                 break;
       
  8226             case 3:/* branch [and link] exchange thumb register */
       
  8227                 tmp = load_reg(s, rm);
       
  8228                 if (insn & (1 << 7)) {
       
  8229                     val = (uint32_t)s->pc | 1;
       
  8230                     tmp2 = new_tmp();
       
  8231                     tcg_gen_movi_i32(tmp2, val);
       
  8232                     store_reg(s, 14, tmp2);
       
  8233                 }
       
  8234                 gen_bx(s, tmp);
       
  8235                 break;
       
  8236             }
       
  8237             break;
       
  8238         }
       
  8239 
       
  8240         /* data processing register */
       
  8241         rd = insn & 7;
       
  8242         rm = (insn >> 3) & 7;
       
  8243         op = (insn >> 6) & 0xf;
       
  8244         if (op == 2 || op == 3 || op == 4 || op == 7) {
       
  8245             /* the shift/rotate ops want the operands backwards */
       
  8246             val = rm;
       
  8247             rm = rd;
       
  8248             rd = val;
       
  8249             val = 1;
       
  8250         } else {
       
  8251             val = 0;
       
  8252         }
       
  8253 
       
  8254         if (op == 9) /* neg */
       
  8255             gen_op_movl_T0_im(0);
       
  8256         else if (op != 0xf) /* mvn doesn't read its first operand */
       
  8257             gen_movl_T0_reg(s, rd);
       
  8258 
       
  8259         gen_movl_T1_reg(s, rm);
       
  8260         switch (op) {
       
  8261         case 0x0: /* and */
       
  8262             gen_op_andl_T0_T1();
       
  8263             if (!s->condexec_mask)
       
  8264                 gen_op_logic_T0_cc();
       
  8265             break;
       
  8266         case 0x1: /* eor */
       
  8267             gen_op_xorl_T0_T1();
       
  8268             if (!s->condexec_mask)
       
  8269                 gen_op_logic_T0_cc();
       
  8270             break;
       
  8271         case 0x2: /* lsl */
       
  8272             if (s->condexec_mask) {
       
  8273                 gen_helper_shl(cpu_T[1], cpu_T[1], cpu_T[0]);
       
  8274             } else {
       
  8275                 gen_helper_shl_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
       
  8276                 gen_op_logic_T1_cc();
       
  8277             }
       
  8278             break;
       
  8279         case 0x3: /* lsr */
       
  8280             if (s->condexec_mask) {
       
  8281                 gen_helper_shr(cpu_T[1], cpu_T[1], cpu_T[0]);
       
  8282             } else {
       
  8283                 gen_helper_shr_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
       
  8284                 gen_op_logic_T1_cc();
       
  8285             }
       
  8286             break;
       
  8287         case 0x4: /* asr */
       
  8288             if (s->condexec_mask) {
       
  8289                 gen_helper_sar(cpu_T[1], cpu_T[1], cpu_T[0]);
       
  8290             } else {
       
  8291                 gen_helper_sar_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
       
  8292                 gen_op_logic_T1_cc();
       
  8293             }
       
  8294             break;
       
  8295         case 0x5: /* adc */
       
  8296             if (s->condexec_mask)
       
  8297                 gen_adc_T0_T1();
       
  8298             else
       
  8299                 gen_op_adcl_T0_T1_cc();
       
  8300             break;
       
  8301         case 0x6: /* sbc */
       
  8302             if (s->condexec_mask)
       
  8303                 gen_sbc_T0_T1();
       
  8304             else
       
  8305                 gen_op_sbcl_T0_T1_cc();
       
  8306             break;
       
  8307         case 0x7: /* ror */
       
  8308             if (s->condexec_mask) {
       
  8309                 gen_helper_ror(cpu_T[1], cpu_T[1], cpu_T[0]);
       
  8310             } else {
       
  8311                 gen_helper_ror_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
       
  8312                 gen_op_logic_T1_cc();
       
  8313             }
       
  8314             break;
       
  8315         case 0x8: /* tst */
       
  8316             gen_op_andl_T0_T1();
       
  8317             gen_op_logic_T0_cc();
       
  8318             rd = 16;
       
  8319             break;
       
  8320         case 0x9: /* neg */
       
  8321             if (s->condexec_mask)
       
  8322                 tcg_gen_neg_i32(cpu_T[0], cpu_T[1]);
       
  8323             else
       
  8324                 gen_op_subl_T0_T1_cc();
       
  8325             break;
       
  8326         case 0xa: /* cmp */
       
  8327             gen_op_subl_T0_T1_cc();
       
  8328             rd = 16;
       
  8329             break;
       
  8330         case 0xb: /* cmn */
       
  8331             gen_op_addl_T0_T1_cc();
       
  8332             rd = 16;
       
  8333             break;
       
  8334         case 0xc: /* orr */
       
  8335             gen_op_orl_T0_T1();
       
  8336             if (!s->condexec_mask)
       
  8337                 gen_op_logic_T0_cc();
       
  8338             break;
       
  8339         case 0xd: /* mul */
       
  8340             gen_op_mull_T0_T1();
       
  8341             if (!s->condexec_mask)
       
  8342                 gen_op_logic_T0_cc();
       
  8343             break;
       
  8344         case 0xe: /* bic */
       
  8345             gen_op_bicl_T0_T1();
       
  8346             if (!s->condexec_mask)
       
  8347                 gen_op_logic_T0_cc();
       
  8348             break;
       
  8349         case 0xf: /* mvn */
       
  8350             gen_op_notl_T1();
       
  8351             if (!s->condexec_mask)
       
  8352                 gen_op_logic_T1_cc();
       
  8353             val = 1;
       
  8354             rm = rd;
       
  8355             break;
       
  8356         }
       
  8357         if (rd != 16) {
       
  8358             if (val)
       
  8359                 gen_movl_reg_T1(s, rm);
       
  8360             else
       
  8361                 gen_movl_reg_T0(s, rd);
       
  8362         }
       
  8363         break;
       
  8364 
       
  8365     case 5:
       
  8366         /* load/store register offset.  */
       
  8367         rd = insn & 7;
       
  8368         rn = (insn >> 3) & 7;
       
  8369         rm = (insn >> 6) & 7;
       
  8370         op = (insn >> 9) & 7;
       
  8371         addr = load_reg(s, rn);
       
  8372         tmp = load_reg(s, rm);
       
  8373         tcg_gen_add_i32(addr, addr, tmp);
       
  8374         dead_tmp(tmp);
       
  8375 
       
  8376         if (op < 3) /* store */
       
  8377             tmp = load_reg(s, rd);
       
  8378 
       
  8379         switch (op) {
       
  8380         case 0: /* str */
       
  8381             gen_st32(tmp, addr, IS_USER(s));
       
  8382             break;
       
  8383         case 1: /* strh */
       
  8384             gen_st16(tmp, addr, IS_USER(s));
       
  8385             break;
       
  8386         case 2: /* strb */
       
  8387             gen_st8(tmp, addr, IS_USER(s));
       
  8388             break;
       
  8389         case 3: /* ldrsb */
       
  8390             tmp = gen_ld8s(addr, IS_USER(s));
       
  8391             break;
       
  8392         case 4: /* ldr */
       
  8393             tmp = gen_ld32(addr, IS_USER(s));
       
  8394             break;
       
  8395         case 5: /* ldrh */
       
  8396             tmp = gen_ld16u(addr, IS_USER(s));
       
  8397             break;
       
  8398         case 6: /* ldrb */
       
  8399             tmp = gen_ld8u(addr, IS_USER(s));
       
  8400             break;
       
  8401         case 7: /* ldrsh */
       
  8402             tmp = gen_ld16s(addr, IS_USER(s));
       
  8403             break;
       
  8404         }
       
  8405         if (op >= 3) /* load */
       
  8406             store_reg(s, rd, tmp);
       
  8407         dead_tmp(addr);
       
  8408         break;
       
  8409 
       
  8410     case 6:
       
  8411         /* load/store word immediate offset */
       
  8412         rd = insn & 7;
       
  8413         rn = (insn >> 3) & 7;
       
  8414         addr = load_reg(s, rn);
       
  8415         val = (insn >> 4) & 0x7c;
       
  8416         tcg_gen_addi_i32(addr, addr, val);
       
  8417 
       
  8418         if (insn & (1 << 11)) {
       
  8419             /* load */
       
  8420             tmp = gen_ld32(addr, IS_USER(s));
       
  8421             store_reg(s, rd, tmp);
       
  8422         } else {
       
  8423             /* store */
       
  8424             tmp = load_reg(s, rd);
       
  8425             gen_st32(tmp, addr, IS_USER(s));
       
  8426         }
       
  8427         dead_tmp(addr);
       
  8428         break;
       
  8429 
       
  8430     case 7:
       
  8431         /* load/store byte immediate offset */
       
  8432         rd = insn & 7;
       
  8433         rn = (insn >> 3) & 7;
       
  8434         addr = load_reg(s, rn);
       
  8435         val = (insn >> 6) & 0x1f;
       
  8436         tcg_gen_addi_i32(addr, addr, val);
       
  8437 
       
  8438         if (insn & (1 << 11)) {
       
  8439             /* load */
       
  8440             tmp = gen_ld8u(addr, IS_USER(s));
       
  8441             store_reg(s, rd, tmp);
       
  8442         } else {
       
  8443             /* store */
       
  8444             tmp = load_reg(s, rd);
       
  8445             gen_st8(tmp, addr, IS_USER(s));
       
  8446         }
       
  8447         dead_tmp(addr);
       
  8448         break;
       
  8449 
       
  8450     case 8:
       
  8451         /* load/store halfword immediate offset */
       
  8452         rd = insn & 7;
       
  8453         rn = (insn >> 3) & 7;
       
  8454         addr = load_reg(s, rn);
       
  8455         val = (insn >> 5) & 0x3e;
       
  8456         tcg_gen_addi_i32(addr, addr, val);
       
  8457 
       
  8458         if (insn & (1 << 11)) {
       
  8459             /* load */
       
  8460             tmp = gen_ld16u(addr, IS_USER(s));
       
  8461             store_reg(s, rd, tmp);
       
  8462         } else {
       
  8463             /* store */
       
  8464             tmp = load_reg(s, rd);
       
  8465             gen_st16(tmp, addr, IS_USER(s));
       
  8466         }
       
  8467         dead_tmp(addr);
       
  8468         break;
       
  8469 
       
  8470     case 9:
       
  8471         /* load/store from stack */
       
  8472         rd = (insn >> 8) & 7;
       
  8473         addr = load_reg(s, 13);
       
  8474         val = (insn & 0xff) * 4;
       
  8475         tcg_gen_addi_i32(addr, addr, val);
       
  8476 
       
  8477         if (insn & (1 << 11)) {
       
  8478             /* load */
       
  8479             tmp = gen_ld32(addr, IS_USER(s));
       
  8480             store_reg(s, rd, tmp);
       
  8481         } else {
       
  8482             /* store */
       
  8483             tmp = load_reg(s, rd);
       
  8484             gen_st32(tmp, addr, IS_USER(s));
       
  8485         }
       
  8486         dead_tmp(addr);
       
  8487         break;
       
  8488 
       
  8489     case 10:
       
  8490         /* add to high reg */
       
  8491         rd = (insn >> 8) & 7;
       
  8492         if (insn & (1 << 11)) {
       
  8493             /* SP */
       
  8494             tmp = load_reg(s, 13);
       
  8495         } else {
       
  8496             /* PC. bit 1 is ignored.  */
       
  8497             tmp = new_tmp();
       
  8498             tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
       
  8499         }
       
  8500         val = (insn & 0xff) * 4;
       
  8501         tcg_gen_addi_i32(tmp, tmp, val);
       
  8502         store_reg(s, rd, tmp);
       
  8503         break;
       
  8504 
       
  8505     case 11:
       
  8506         /* misc */
       
  8507         op = (insn >> 8) & 0xf;
       
  8508         switch (op) {
       
  8509         case 0:
       
  8510             /* adjust stack pointer */
       
  8511             tmp = load_reg(s, 13);
       
  8512             val = (insn & 0x7f) * 4;
       
  8513             if (insn & (1 << 7))
       
  8514                 val = -(int32_t)val;
       
  8515             tcg_gen_addi_i32(tmp, tmp, val);
       
  8516             store_reg(s, 13, tmp);
       
  8517             break;
       
  8518 
       
  8519         case 2: /* sign/zero extend.  */
       
  8520             ARCH(6);
       
  8521             rd = insn & 7;
       
  8522             rm = (insn >> 3) & 7;
       
  8523             tmp = load_reg(s, rm);
       
  8524             switch ((insn >> 6) & 3) {
       
  8525             case 0: gen_sxth(tmp); break;
       
  8526             case 1: gen_sxtb(tmp); break;
       
  8527             case 2: gen_uxth(tmp); break;
       
  8528             case 3: gen_uxtb(tmp); break;
       
  8529             }
       
  8530             store_reg(s, rd, tmp);
       
  8531             break;
       
  8532         case 4: case 5: case 0xc: case 0xd:
       
  8533             /* push/pop */
       
  8534             addr = load_reg(s, 13);
       
  8535             if (insn & (1 << 8))
       
  8536                 offset = 4;
       
  8537             else
       
  8538                 offset = 0;
       
  8539             for (i = 0; i < 8; i++) {
       
  8540                 if (insn & (1 << i))
       
  8541                     offset += 4;
       
  8542             }
       
  8543             if ((insn & (1 << 11)) == 0) {
       
  8544                 tcg_gen_addi_i32(addr, addr, -offset);
       
  8545             }
       
  8546             for (i = 0; i < 8; i++) {
       
  8547                 if (insn & (1 << i)) {
       
  8548                     if (insn & (1 << 11)) {
       
  8549                         /* pop */
       
  8550                         tmp = gen_ld32(addr, IS_USER(s));
       
  8551                         store_reg(s, i, tmp);
       
  8552                     } else {
       
  8553                         /* push */
       
  8554                         tmp = load_reg(s, i);
       
  8555                         gen_st32(tmp, addr, IS_USER(s));
       
  8556                     }
       
  8557                     /* advance to the next address.  */
       
  8558                     tcg_gen_addi_i32(addr, addr, 4);
       
  8559                 }
       
  8560             }
       
  8561             TCGV_UNUSED(tmp);
       
  8562             if (insn & (1 << 8)) {
       
  8563                 if (insn & (1 << 11)) {
       
  8564                     /* pop pc */
       
  8565                     tmp = gen_ld32(addr, IS_USER(s));
       
  8566                     /* don't set the pc until the rest of the instruction
       
  8567                        has completed */
       
  8568                 } else {
       
  8569                     /* push lr */
       
  8570                     tmp = load_reg(s, 14);
       
  8571                     gen_st32(tmp, addr, IS_USER(s));
       
  8572                 }
       
  8573                 tcg_gen_addi_i32(addr, addr, 4);
       
  8574             }
       
  8575             if ((insn & (1 << 11)) == 0) {
       
  8576                 tcg_gen_addi_i32(addr, addr, -offset);
       
  8577             }
       
  8578             /* write back the new stack pointer */
       
  8579             store_reg(s, 13, addr);
       
  8580             /* set the new PC value */
       
  8581             if ((insn & 0x0900) == 0x0900)
       
  8582                 gen_bx(s, tmp);
       
  8583             break;
       
  8584 
       
  8585         case 1: case 3: case 9: case 11: /* czb */
       
  8586             rm = insn & 7;
       
  8587             tmp = load_reg(s, rm);
       
  8588             s->condlabel = gen_new_label();
       
  8589             s->condjmp = 1;
       
  8590             if (insn & (1 << 11))
       
  8591                 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
       
  8592             else
       
  8593                 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
       
  8594             dead_tmp(tmp);
       
  8595             offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
       
  8596             val = (uint32_t)s->pc + 2;
       
  8597             val += offset;
       
  8598             gen_jmp(s, val);
       
  8599             break;
       
  8600 
       
  8601         case 15: /* IT, nop-hint.  */
       
  8602             if ((insn & 0xf) == 0) {
       
  8603                 gen_nop_hint(s, (insn >> 4) & 0xf);
       
  8604                 break;
       
  8605             }
       
  8606             /* If Then.  */
       
  8607             s->condexec_cond = (insn >> 4) & 0xe;
       
  8608             s->condexec_mask = insn & 0x1f;
       
  8609             /* No actual code generated for this insn, just setup state.  */
       
  8610             break;
       
  8611 
       
  8612         case 0xe: /* bkpt */
       
  8613             gen_set_condexec(s);
       
  8614             gen_set_pc_im(s->pc - 2);
       
  8615             gen_exception(EXCP_BKPT);
       
  8616             s->is_jmp = DISAS_JUMP;
       
  8617             break;
       
  8618 
       
  8619         case 0xa: /* rev */
       
  8620             ARCH(6);
       
  8621             rn = (insn >> 3) & 0x7;
       
  8622             rd = insn & 0x7;
       
  8623             tmp = load_reg(s, rn);
       
  8624             switch ((insn >> 6) & 3) {
       
  8625             case 0: tcg_gen_bswap_i32(tmp, tmp); break;
       
  8626             case 1: gen_rev16(tmp); break;
       
  8627             case 3: gen_revsh(tmp); break;
       
  8628             default: goto illegal_op;
       
  8629             }
       
  8630             store_reg(s, rd, tmp);
       
  8631             break;
       
  8632 
       
  8633         case 6: /* cps */
       
  8634             ARCH(6);
       
  8635             if (IS_USER(s))
       
  8636                 break;
       
  8637             if (IS_M(env)) {
       
  8638                 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
       
  8639                 /* PRIMASK */
       
  8640                 if (insn & 1) {
       
  8641                     addr = tcg_const_i32(16);
       
  8642                     gen_helper_v7m_msr(cpu_env, addr, tmp);
       
  8643                 }
       
  8644                 /* FAULTMASK */
       
  8645                 if (insn & 2) {
       
  8646                     addr = tcg_const_i32(17);
       
  8647                     gen_helper_v7m_msr(cpu_env, addr, tmp);
       
  8648                 }
       
  8649                 gen_lookup_tb(s);
       
  8650             } else {
       
  8651                 if (insn & (1 << 4))
       
  8652                     shift = CPSR_A | CPSR_I | CPSR_F;
       
  8653                 else
       
  8654                     shift = 0;
       
  8655 
       
  8656                 val = ((insn & 7) << 6) & shift;
       
  8657                 gen_op_movl_T0_im(val);
       
  8658                 gen_set_psr_T0(s, shift, 0);
       
  8659             }
       
  8660             break;
       
  8661 
       
  8662         default:
       
  8663             goto undef;
       
  8664         }
       
  8665         break;
       
  8666 
       
  8667     case 12:
       
  8668         /* load/store multiple */
       
  8669         rn = (insn >> 8) & 0x7;
       
  8670         addr = load_reg(s, rn);
       
  8671         for (i = 0; i < 8; i++) {
       
  8672             if (insn & (1 << i)) {
       
  8673                 if (insn & (1 << 11)) {
       
  8674                     /* load */
       
  8675                     tmp = gen_ld32(addr, IS_USER(s));
       
  8676                     store_reg(s, i, tmp);
       
  8677                 } else {
       
  8678                     /* store */
       
  8679                     tmp = load_reg(s, i);
       
  8680                     gen_st32(tmp, addr, IS_USER(s));
       
  8681                 }
       
  8682                 /* advance to the next address */
       
  8683                 tcg_gen_addi_i32(addr, addr, 4);
       
  8684             }
       
  8685         }
       
  8686         /* Base register writeback.  */
       
  8687         if ((insn & (1 << rn)) == 0) {
       
  8688             store_reg(s, rn, addr);
       
  8689         } else {
       
  8690             dead_tmp(addr);
       
  8691         }
       
  8692         break;
       
  8693 
       
  8694     case 13:
       
  8695         /* conditional branch or swi */
       
  8696         cond = (insn >> 8) & 0xf;
       
  8697         if (cond == 0xe)
       
  8698             goto undef;
       
  8699 
       
  8700         if (cond == 0xf) {
       
  8701             /* swi */
       
  8702             gen_set_condexec(s);
       
  8703             gen_set_pc_im(s->pc);
       
  8704             s->is_jmp = DISAS_SWI;
       
  8705             break;
       
  8706         }
       
  8707         /* generate a conditional jump to next instruction */
       
  8708         s->condlabel = gen_new_label();
       
  8709         gen_test_cc(cond ^ 1, s->condlabel);
       
  8710         s->condjmp = 1;
       
  8711         gen_movl_T1_reg(s, 15);
       
  8712 
       
  8713         /* jump to the offset */
       
  8714         val = (uint32_t)s->pc + 2;
       
  8715         offset = ((int32_t)insn << 24) >> 24;
       
  8716         val += offset << 1;
       
  8717         gen_jmp(s, val);
       
  8718         break;
       
  8719 
       
  8720     case 14:
       
  8721         if (insn & (1 << 11)) {
       
  8722             if (disas_thumb2_insn(env, s, insn))
       
  8723               goto undef32;
       
  8724             break;
       
  8725         }
       
  8726         /* unconditional branch */
       
  8727         val = (uint32_t)s->pc;
       
  8728         offset = ((int32_t)insn << 21) >> 21;
       
  8729         val += (offset << 1) + 2;
       
  8730         gen_jmp(s, val);
       
  8731         break;
       
  8732 
       
  8733     case 15:
       
  8734         if (disas_thumb2_insn(env, s, insn))
       
  8735             goto undef32;
       
  8736         break;
       
  8737     }
       
  8738     return;
       
  8739 undef32:
       
  8740     gen_set_condexec(s);
       
  8741     gen_set_pc_im(s->pc - 4);
       
  8742     gen_exception(EXCP_UDEF);
       
  8743     s->is_jmp = DISAS_JUMP;
       
  8744     return;
       
  8745 illegal_op:
       
  8746 undef:
       
  8747     gen_set_condexec(s);
       
  8748     gen_set_pc_im(s->pc - 2);
       
  8749     gen_exception(EXCP_UDEF);
       
  8750     s->is_jmp = DISAS_JUMP;
       
  8751 }
       
  8752 
       
  8753 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
       
  8754    basic block 'tb'. If search_pc is TRUE, also generate PC
       
  8755    information for each intermediate instruction. */
       
  8756 static inline void gen_intermediate_code_internal(CPUState *env,
       
  8757                                                   TranslationBlock *tb,
       
  8758                                                   int search_pc)
       
  8759 {
       
  8760     DisasContext dc1, *dc = &dc1;
       
  8761     CPUBreakpoint *bp;
       
  8762     uint16_t *gen_opc_end;
       
  8763     int j, lj;
       
  8764     target_ulong pc_start;
       
  8765     uint32_t next_page_start;
       
  8766     int num_insns;
       
  8767     int max_insns;
       
  8768 
       
  8769     /* generate intermediate code */
       
  8770     num_temps = 0;
       
  8771     memset(temps, 0, sizeof(temps));
       
  8772 
       
  8773     pc_start = tb->pc;
       
  8774 
       
  8775     dc->tb = tb;
       
  8776 
       
  8777     gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
       
  8778 
       
  8779     dc->is_jmp = DISAS_NEXT;
       
  8780     dc->pc = pc_start;
       
  8781     dc->singlestep_enabled = env->singlestep_enabled;
       
  8782     dc->condjmp = 0;
       
  8783     dc->thumb = env->thumb;
       
  8784     dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
       
  8785     dc->condexec_cond = env->condexec_bits >> 4;
       
  8786 #if !defined(CONFIG_USER_ONLY)
       
  8787     if (IS_M(env)) {
       
  8788         dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
       
  8789     } else {
       
  8790         dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
       
  8791     }
       
  8792 #endif
       
  8793     cpu_F0s = tcg_temp_new_i32();
       
  8794     cpu_F1s = tcg_temp_new_i32();
       
  8795     cpu_F0d = tcg_temp_new_i64();
       
  8796     cpu_F1d = tcg_temp_new_i64();
       
  8797     cpu_V0 = cpu_F0d;
       
  8798     cpu_V1 = cpu_F1d;
       
  8799     /* FIXME: cpu_M0 can probably be the same as cpu_V0.  */
       
  8800     cpu_M0 = tcg_temp_new_i64();
       
  8801     next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
       
  8802     lj = -1;
       
  8803     num_insns = 0;
       
  8804     max_insns = tb->cflags & CF_COUNT_MASK;
       
  8805     if (max_insns == 0)
       
  8806         max_insns = CF_COUNT_MASK;
       
  8807 
       
  8808     gen_icount_start();
       
  8809     /* Reset the conditional execution bits immediately. This avoids
       
  8810        complications trying to do it at the end of the block.  */
       
  8811     if (env->condexec_bits)
       
  8812       {
       
  8813         TCGv tmp = new_tmp();
       
  8814         tcg_gen_movi_i32(tmp, 0);
       
  8815         store_cpu_field(tmp, condexec_bits);
       
  8816       }
       
  8817     do {
       
  8818 #ifdef CONFIG_USER_ONLY
       
  8819         /* Intercept jump to the magic kernel page.  */
       
  8820         if (dc->pc >= 0xffff0000) {
       
  8821             /* We always get here via a jump, so know we are not in a
       
  8822                conditional execution block.  */
       
  8823             gen_exception(EXCP_KERNEL_TRAP);
       
  8824             dc->is_jmp = DISAS_UPDATE;
       
  8825             break;
       
  8826         }
       
  8827 #else
       
  8828         if (dc->pc >= 0xfffffff0 && IS_M(env)) {
       
  8829             /* We always get here via a jump, so know we are not in a
       
  8830                conditional execution block.  */
       
  8831             gen_exception(EXCP_EXCEPTION_EXIT);
       
  8832             dc->is_jmp = DISAS_UPDATE;
       
  8833             break;
       
  8834         }
       
  8835 #endif
       
  8836 
       
  8837         if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
       
  8838             TAILQ_FOREACH(bp, &env->breakpoints, entry) {
       
  8839                 if (bp->pc == dc->pc) {
       
  8840                     gen_set_condexec(dc);
       
  8841                     gen_set_pc_im(dc->pc);
       
  8842                     gen_exception(EXCP_DEBUG);
       
  8843                     dc->is_jmp = DISAS_JUMP;
       
  8844                     /* Advance PC so that clearing the breakpoint will
       
  8845                        invalidate this TB.  */
       
  8846                     dc->pc += 2;
       
  8847                     goto done_generating;
       
  8848                     break;
       
  8849                 }
       
  8850             }
       
  8851         }
       
  8852         if (search_pc) {
       
  8853             j = gen_opc_ptr - gen_opc_buf;
       
  8854             if (lj < j) {
       
  8855                 lj++;
       
  8856                 while (lj < j)
       
  8857                     gen_opc_instr_start[lj++] = 0;
       
  8858             }
       
  8859             gen_opc_pc[lj] = dc->pc;
       
  8860             gen_opc_instr_start[lj] = 1;
       
  8861             gen_opc_icount[lj] = num_insns;
       
  8862         }
       
  8863 
       
  8864         if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
       
  8865             gen_io_start();
       
  8866 
       
  8867         if (env->thumb) {
       
  8868             disas_thumb_insn(env, dc);
       
  8869             if (dc->condexec_mask) {
       
  8870                 dc->condexec_cond = (dc->condexec_cond & 0xe)
       
  8871                                    | ((dc->condexec_mask >> 4) & 1);
       
  8872                 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
       
  8873                 if (dc->condexec_mask == 0) {
       
  8874                     dc->condexec_cond = 0;
       
  8875                 }
       
  8876             }
       
  8877         } else {
       
  8878             disas_arm_insn(env, dc);
       
  8879         }
       
  8880         if (num_temps) {
       
  8881             fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
       
  8882             num_temps = 0;
       
  8883         }
       
  8884 
       
  8885         if (dc->condjmp && !dc->is_jmp) {
       
  8886             gen_set_label(dc->condlabel);
       
  8887             dc->condjmp = 0;
       
  8888         }
       
  8889         /* Translation stops when a conditional branch is encountered.
       
  8890          * Otherwise the subsequent code could get translated several times.
       
  8891          * Also stop translation when a page boundary is reached.  This
       
  8892          * ensures prefetch aborts occur at the right place.  */
       
  8893         num_insns ++;
       
  8894     } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
       
  8895              !env->singlestep_enabled &&
       
  8896              dc->pc < next_page_start &&
       
  8897              num_insns < max_insns);
       
  8898 
       
  8899     if (tb->cflags & CF_LAST_IO) {
       
  8900         if (dc->condjmp) {
       
  8901             /* FIXME:  This can theoretically happen with self-modifying
       
  8902                code.  */
       
  8903             cpu_abort(env, "IO on conditional branch instruction");
       
  8904         }
       
  8905         gen_io_end();
       
  8906     }
       
  8907 
       
  8908     /* At this stage dc->condjmp will only be set when the skipped
       
  8909        instruction was a conditional branch or trap, and the PC has
       
  8910        already been written.  */
       
  8911     if (unlikely(env->singlestep_enabled)) {
       
  8912         /* Make sure the pc is updated, and raise a debug exception.  */
       
  8913         if (dc->condjmp) {
       
  8914             gen_set_condexec(dc);
       
  8915             if (dc->is_jmp == DISAS_SWI) {
       
  8916                 gen_exception(EXCP_SWI);
       
  8917             } else {
       
  8918                 gen_exception(EXCP_DEBUG);
       
  8919             }
       
  8920             gen_set_label(dc->condlabel);
       
  8921         }
       
  8922         if (dc->condjmp || !dc->is_jmp) {
       
  8923             gen_set_pc_im(dc->pc);
       
  8924             dc->condjmp = 0;
       
  8925         }
       
  8926         gen_set_condexec(dc);
       
  8927         if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
       
  8928             gen_exception(EXCP_SWI);
       
  8929         } else {
       
  8930             /* FIXME: Single stepping a WFI insn will not halt
       
  8931                the CPU.  */
       
  8932             gen_exception(EXCP_DEBUG);
       
  8933         }
       
  8934     } else {
       
  8935         /* While branches must always occur at the end of an IT block,
       
  8936            there are a few other things that can cause us to terminate
       
  8937            the TB in the middel of an IT block:
       
  8938             - Exception generating instructions (bkpt, swi, undefined).
       
  8939             - Page boundaries.
       
  8940             - Hardware watchpoints.
       
  8941            Hardware breakpoints have already been handled and skip this code.
       
  8942          */
       
  8943         gen_set_condexec(dc);
       
  8944         switch(dc->is_jmp) {
       
  8945         case DISAS_NEXT:
       
  8946             gen_goto_tb(dc, 1, dc->pc);
       
  8947             break;
       
  8948         default:
       
  8949         case DISAS_JUMP:
       
  8950         case DISAS_UPDATE:
       
  8951             /* indicate that the hash table must be used to find the next TB */
       
  8952             tcg_gen_exit_tb(0);
       
  8953             break;
       
  8954         case DISAS_TB_JUMP:
       
  8955             /* nothing more to generate */
       
  8956             break;
       
  8957         case DISAS_WFI:
       
  8958             gen_helper_wfi();
       
  8959             break;
       
  8960         case DISAS_SWI:
       
  8961             gen_exception(EXCP_SWI);
       
  8962             break;
       
  8963         }
       
  8964         if (dc->condjmp) {
       
  8965             gen_set_label(dc->condlabel);
       
  8966             gen_set_condexec(dc);
       
  8967             gen_goto_tb(dc, 1, dc->pc);
       
  8968             dc->condjmp = 0;
       
  8969         }
       
  8970     }
       
  8971 
       
  8972 done_generating:
       
  8973     gen_icount_end(tb, num_insns);
       
  8974     *gen_opc_ptr = INDEX_op_end;
       
  8975 
       
  8976 #ifdef DEBUG_DISAS
       
  8977     if (loglevel & CPU_LOG_TB_IN_ASM) {
       
  8978         fprintf(logfile, "----------------\n");
       
  8979         fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
       
  8980         target_disas(logfile, pc_start, dc->pc - pc_start,
       
  8981                      env->thumb | (env->bswap_code << 1));
       
  8982         fprintf(logfile, "\n");
       
  8983     }
       
  8984 #endif
       
  8985     if (search_pc) {
       
  8986         j = gen_opc_ptr - gen_opc_buf;
       
  8987         lj++;
       
  8988         while (lj <= j)
       
  8989             gen_opc_instr_start[lj++] = 0;
       
  8990     } else {
       
  8991         tb->size = dc->pc - pc_start;
       
  8992         tb->icount = num_insns;
       
  8993     }
       
  8994 }
       
  8995 
       
  8996 void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
       
  8997 {
       
  8998     gen_intermediate_code_internal(env, tb, 0);
       
  8999 }
       
  9000 
       
  9001 void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
       
  9002 {
       
  9003     gen_intermediate_code_internal(env, tb, 1);
       
  9004 }
       
  9005 
       
  9006 static const char *cpu_mode_names[16] = {
       
  9007   "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
       
  9008   "???", "???", "???", "und", "???", "???", "???", "sys"
       
  9009 };
       
  9010 
       
  9011 void cpu_dump_state(CPUState *env, FILE *f,
       
  9012                     int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
       
  9013                     int flags)
       
  9014 {
       
  9015     int i;
       
  9016 #if 0
       
  9017     union {
       
  9018         uint32_t i;
       
  9019         float s;
       
  9020     } s0, s1;
       
  9021     CPU_DoubleU d;
       
  9022     /* ??? This assumes float64 and double have the same layout.
       
  9023        Oh well, it's only debug dumps.  */
       
  9024     union {
       
  9025         float64 f64;
       
  9026         double d;
       
  9027     } d0;
       
  9028 #endif
       
  9029     uint32_t psr;
       
  9030 
       
  9031     for(i=0;i<16;i++) {
       
  9032         cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
       
  9033         if ((i % 4) == 3)
       
  9034             cpu_fprintf(f, "\n");
       
  9035         else
       
  9036             cpu_fprintf(f, " ");
       
  9037     }
       
  9038     psr = cpsr_read(env);
       
  9039     cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
       
  9040                 psr,
       
  9041                 psr & (1 << 31) ? 'N' : '-',
       
  9042                 psr & (1 << 30) ? 'Z' : '-',
       
  9043                 psr & (1 << 29) ? 'C' : '-',
       
  9044                 psr & (1 << 28) ? 'V' : '-',
       
  9045                 psr & CPSR_T ? 'T' : 'A',
       
  9046                 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
       
  9047 
       
  9048 #if 0
       
  9049     for (i = 0; i < 16; i++) {
       
  9050         d.d = env->vfp.regs[i];
       
  9051         s0.i = d.l.lower;
       
  9052         s1.i = d.l.upper;
       
  9053         d0.f64 = d.d;
       
  9054         cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
       
  9055                     i * 2, (int)s0.i, s0.s,
       
  9056                     i * 2 + 1, (int)s1.i, s1.s,
       
  9057                     i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
       
  9058                     d0.d);
       
  9059     }
       
  9060     cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
       
  9061 #endif
       
  9062 }
       
  9063 
       
  9064 void gen_pc_load(CPUState *env, TranslationBlock *tb,
       
  9065                 unsigned long searched_pc, int pc_pos, void *puc)
       
  9066 {
       
  9067     env->regs[15] = gen_opc_pc[pc_pos];
       
  9068 }