symbian-qemu-0.9.1-12/qemu-symbian-svp/target-ppc/op_helper.c
changeset 1 2fb8b9db1c86
equal deleted inserted replaced
0:ffa851df0825 1:2fb8b9db1c86
       
     1 /*
       
     2  *  PowerPC emulation helpers for qemu.
       
     3  *
       
     4  *  Copyright (c) 2003-2007 Jocelyn Mayer
       
     5  *
       
     6  * This library is free software; you can redistribute it and/or
       
     7  * modify it under the terms of the GNU Lesser General Public
       
     8  * License as published by the Free Software Foundation; either
       
     9  * version 2 of the License, or (at your option) any later version.
       
    10  *
       
    11  * This library is distributed in the hope that it will be useful,
       
    12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
       
    13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
       
    14  * Lesser General Public License for more details.
       
    15  *
       
    16  * You should have received a copy of the GNU Lesser General Public
       
    17  * License along with this library; if not, write to the Free Software
       
    18  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
       
    19  */
       
    20 #include "exec.h"
       
    21 #include "host-utils.h"
       
    22 #include "helper.h"
       
    23 
       
    24 #include "helper_regs.h"
       
    25 
       
    26 //#define DEBUG_OP
       
    27 //#define DEBUG_EXCEPTIONS
       
    28 //#define DEBUG_SOFTWARE_TLB
       
    29 
       
    30 /*****************************************************************************/
       
    31 /* Exceptions processing helpers */
       
    32 
       
    33 void helper_raise_exception_err (uint32_t exception, uint32_t error_code)
       
    34 {
       
    35 #if 0
       
    36     printf("Raise exception %3x code : %d\n", exception, error_code);
       
    37 #endif
       
    38     env->exception_index = exception;
       
    39     env->error_code = error_code;
       
    40     cpu_loop_exit();
       
    41 }
       
    42 
       
    43 void helper_raise_exception (uint32_t exception)
       
    44 {
       
    45     helper_raise_exception_err(exception, 0);
       
    46 }
       
    47 
       
    48 /*****************************************************************************/
       
    49 /* Registers load and stores */
       
    50 target_ulong helper_load_cr (void)
       
    51 {
       
    52     return (env->crf[0] << 28) |
       
    53            (env->crf[1] << 24) |
       
    54            (env->crf[2] << 20) |
       
    55            (env->crf[3] << 16) |
       
    56            (env->crf[4] << 12) |
       
    57            (env->crf[5] << 8) |
       
    58            (env->crf[6] << 4) |
       
    59            (env->crf[7] << 0);
       
    60 }
       
    61 
       
    62 void helper_store_cr (target_ulong val, uint32_t mask)
       
    63 {
       
    64     int i, sh;
       
    65 
       
    66     for (i = 0, sh = 7; i < 8; i++, sh--) {
       
    67         if (mask & (1 << sh))
       
    68             env->crf[i] = (val >> (sh * 4)) & 0xFUL;
       
    69     }
       
    70 }
       
    71 
       
    72 /*****************************************************************************/
       
    73 /* SPR accesses */
       
    74 void helper_load_dump_spr (uint32_t sprn)
       
    75 {
       
    76     if (loglevel != 0) {
       
    77         fprintf(logfile, "Read SPR %d %03x => " ADDRX "\n",
       
    78                 sprn, sprn, env->spr[sprn]);
       
    79     }
       
    80 }
       
    81 
       
    82 void helper_store_dump_spr (uint32_t sprn)
       
    83 {
       
    84     if (loglevel != 0) {
       
    85         fprintf(logfile, "Write SPR %d %03x <= " ADDRX "\n",
       
    86                 sprn, sprn, env->spr[sprn]);
       
    87     }
       
    88 }
       
    89 
       
    90 target_ulong helper_load_tbl (void)
       
    91 {
       
    92     return cpu_ppc_load_tbl(env);
       
    93 }
       
    94 
       
    95 target_ulong helper_load_tbu (void)
       
    96 {
       
    97     return cpu_ppc_load_tbu(env);
       
    98 }
       
    99 
       
   100 target_ulong helper_load_atbl (void)
       
   101 {
       
   102     return cpu_ppc_load_atbl(env);
       
   103 }
       
   104 
       
   105 target_ulong helper_load_atbu (void)
       
   106 {
       
   107     return cpu_ppc_load_atbu(env);
       
   108 }
       
   109 
       
   110 target_ulong helper_load_601_rtcl (void)
       
   111 {
       
   112     return cpu_ppc601_load_rtcl(env);
       
   113 }
       
   114 
       
   115 target_ulong helper_load_601_rtcu (void)
       
   116 {
       
   117     return cpu_ppc601_load_rtcu(env);
       
   118 }
       
   119 
       
   120 #if !defined(CONFIG_USER_ONLY)
       
   121 #if defined (TARGET_PPC64)
       
   122 void helper_store_asr (target_ulong val)
       
   123 {
       
   124     ppc_store_asr(env, val);
       
   125 }
       
   126 #endif
       
   127 
       
   128 void helper_store_sdr1 (target_ulong val)
       
   129 {
       
   130     ppc_store_sdr1(env, val);
       
   131 }
       
   132 
       
   133 void helper_store_tbl (target_ulong val)
       
   134 {
       
   135     cpu_ppc_store_tbl(env, val);
       
   136 }
       
   137 
       
   138 void helper_store_tbu (target_ulong val)
       
   139 {
       
   140     cpu_ppc_store_tbu(env, val);
       
   141 }
       
   142 
       
   143 void helper_store_atbl (target_ulong val)
       
   144 {
       
   145     cpu_ppc_store_atbl(env, val);
       
   146 }
       
   147 
       
   148 void helper_store_atbu (target_ulong val)
       
   149 {
       
   150     cpu_ppc_store_atbu(env, val);
       
   151 }
       
   152 
       
   153 void helper_store_601_rtcl (target_ulong val)
       
   154 {
       
   155     cpu_ppc601_store_rtcl(env, val);
       
   156 }
       
   157 
       
   158 void helper_store_601_rtcu (target_ulong val)
       
   159 {
       
   160     cpu_ppc601_store_rtcu(env, val);
       
   161 }
       
   162 
       
   163 target_ulong helper_load_decr (void)
       
   164 {
       
   165     return cpu_ppc_load_decr(env);
       
   166 }
       
   167 
       
   168 void helper_store_decr (target_ulong val)
       
   169 {
       
   170     cpu_ppc_store_decr(env, val);
       
   171 }
       
   172 
       
   173 void helper_store_hid0_601 (target_ulong val)
       
   174 {
       
   175     target_ulong hid0;
       
   176 
       
   177     hid0 = env->spr[SPR_HID0];
       
   178     if ((val ^ hid0) & 0x00000008) {
       
   179         /* Change current endianness */
       
   180         env->hflags &= ~(1 << MSR_LE);
       
   181         env->hflags_nmsr &= ~(1 << MSR_LE);
       
   182         env->hflags_nmsr |= (1 << MSR_LE) & (((val >> 3) & 1) << MSR_LE);
       
   183         env->hflags |= env->hflags_nmsr;
       
   184         if (loglevel != 0) {
       
   185             fprintf(logfile, "%s: set endianness to %c => " ADDRX "\n",
       
   186                     __func__, val & 0x8 ? 'l' : 'b', env->hflags);
       
   187         }
       
   188     }
       
   189     env->spr[SPR_HID0] = (uint32_t)val;
       
   190 }
       
   191 
       
   192 void helper_store_403_pbr (uint32_t num, target_ulong value)
       
   193 {
       
   194     if (likely(env->pb[num] != value)) {
       
   195         env->pb[num] = value;
       
   196         /* Should be optimized */
       
   197         tlb_flush(env, 1);
       
   198     }
       
   199 }
       
   200 
       
   201 target_ulong helper_load_40x_pit (void)
       
   202 {
       
   203     return load_40x_pit(env);
       
   204 }
       
   205 
       
   206 void helper_store_40x_pit (target_ulong val)
       
   207 {
       
   208     store_40x_pit(env, val);
       
   209 }
       
   210 
       
   211 void helper_store_40x_dbcr0 (target_ulong val)
       
   212 {
       
   213     store_40x_dbcr0(env, val);
       
   214 }
       
   215 
       
   216 void helper_store_40x_sler (target_ulong val)
       
   217 {
       
   218     store_40x_sler(env, val);
       
   219 }
       
   220 
       
   221 void helper_store_booke_tcr (target_ulong val)
       
   222 {
       
   223     store_booke_tcr(env, val);
       
   224 }
       
   225 
       
   226 void helper_store_booke_tsr (target_ulong val)
       
   227 {
       
   228     store_booke_tsr(env, val);
       
   229 }
       
   230 
       
   231 void helper_store_ibatu (uint32_t nr, target_ulong val)
       
   232 {
       
   233     ppc_store_ibatu(env, nr, val);
       
   234 }
       
   235 
       
   236 void helper_store_ibatl (uint32_t nr, target_ulong val)
       
   237 {
       
   238     ppc_store_ibatl(env, nr, val);
       
   239 }
       
   240 
       
   241 void helper_store_dbatu (uint32_t nr, target_ulong val)
       
   242 {
       
   243     ppc_store_dbatu(env, nr, val);
       
   244 }
       
   245 
       
   246 void helper_store_dbatl (uint32_t nr, target_ulong val)
       
   247 {
       
   248     ppc_store_dbatl(env, nr, val);
       
   249 }
       
   250 
       
   251 void helper_store_601_batl (uint32_t nr, target_ulong val)
       
   252 {
       
   253     ppc_store_ibatl_601(env, nr, val);
       
   254 }
       
   255 
       
   256 void helper_store_601_batu (uint32_t nr, target_ulong val)
       
   257 {
       
   258     ppc_store_ibatu_601(env, nr, val);
       
   259 }
       
   260 #endif
       
   261 
       
   262 /*****************************************************************************/
       
   263 /* Memory load and stores */
       
   264 
       
   265 static always_inline target_ulong addr_add(target_ulong addr, target_long arg)
       
   266 {
       
   267 #if defined(TARGET_PPC64)
       
   268         if (!msr_sf)
       
   269             return (uint32_t)(addr + arg);
       
   270         else
       
   271 #endif
       
   272             return addr + arg;
       
   273 }
       
   274 
       
   275 void helper_lmw (target_ulong addr, uint32_t reg)
       
   276 {
       
   277     for (; reg < 32; reg++) {
       
   278         if (msr_le)
       
   279             env->gpr[reg] = bswap32(ldl(addr));
       
   280         else
       
   281             env->gpr[reg] = ldl(addr);
       
   282 	addr = addr_add(addr, 4);
       
   283     }
       
   284 }
       
   285 
       
   286 void helper_stmw (target_ulong addr, uint32_t reg)
       
   287 {
       
   288     for (; reg < 32; reg++) {
       
   289         if (msr_le)
       
   290             stl(addr, bswap32((uint32_t)env->gpr[reg]));
       
   291         else
       
   292             stl(addr, (uint32_t)env->gpr[reg]);
       
   293 	addr = addr_add(addr, 4);
       
   294     }
       
   295 }
       
   296 
       
   297 void helper_lsw(target_ulong addr, uint32_t nb, uint32_t reg)
       
   298 {
       
   299     int sh;
       
   300     for (; nb > 3; nb -= 4) {
       
   301         env->gpr[reg] = ldl(addr);
       
   302         reg = (reg + 1) % 32;
       
   303 	addr = addr_add(addr, 4);
       
   304     }
       
   305     if (unlikely(nb > 0)) {
       
   306         env->gpr[reg] = 0;
       
   307         for (sh = 24; nb > 0; nb--, sh -= 8) {
       
   308             env->gpr[reg] |= ldub(addr) << sh;
       
   309 	    addr = addr_add(addr, 1);
       
   310         }
       
   311     }
       
   312 }
       
   313 /* PPC32 specification says we must generate an exception if
       
   314  * rA is in the range of registers to be loaded.
       
   315  * In an other hand, IBM says this is valid, but rA won't be loaded.
       
   316  * For now, I'll follow the spec...
       
   317  */
       
   318 void helper_lswx(target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
       
   319 {
       
   320     if (likely(xer_bc != 0)) {
       
   321         if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) ||
       
   322                      (reg < rb && (reg + xer_bc) > rb))) {
       
   323             helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
       
   324                                        POWERPC_EXCP_INVAL |
       
   325                                        POWERPC_EXCP_INVAL_LSWX);
       
   326         } else {
       
   327             helper_lsw(addr, xer_bc, reg);
       
   328         }
       
   329     }
       
   330 }
       
   331 
       
   332 void helper_stsw(target_ulong addr, uint32_t nb, uint32_t reg)
       
   333 {
       
   334     int sh;
       
   335     for (; nb > 3; nb -= 4) {
       
   336         stl(addr, env->gpr[reg]);
       
   337         reg = (reg + 1) % 32;
       
   338 	addr = addr_add(addr, 4);
       
   339     }
       
   340     if (unlikely(nb > 0)) {
       
   341         for (sh = 24; nb > 0; nb--, sh -= 8)
       
   342             stb(addr, (env->gpr[reg] >> sh) & 0xFF);
       
   343 	    addr = addr_add(addr, 1);
       
   344     }
       
   345 }
       
   346 
       
   347 static void do_dcbz(target_ulong addr, int dcache_line_size)
       
   348 {
       
   349     addr &= ~(dcache_line_size - 1);
       
   350     int i;
       
   351     for (i = 0 ; i < dcache_line_size ; i += 4) {
       
   352         stl(addr + i , 0);
       
   353     }
       
   354     if (env->reserve == addr)
       
   355         env->reserve = (target_ulong)-1ULL;
       
   356 }
       
   357 
       
   358 void helper_dcbz(target_ulong addr)
       
   359 {
       
   360     do_dcbz(addr, env->dcache_line_size);
       
   361 }
       
   362 
       
   363 void helper_dcbz_970(target_ulong addr)
       
   364 {
       
   365     if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1)
       
   366         do_dcbz(addr, 32);
       
   367     else
       
   368         do_dcbz(addr, env->dcache_line_size);
       
   369 }
       
   370 
       
   371 void helper_icbi(target_ulong addr)
       
   372 {
       
   373     uint32_t tmp;
       
   374 
       
   375     addr &= ~(env->dcache_line_size - 1);
       
   376     /* Invalidate one cache line :
       
   377      * PowerPC specification says this is to be treated like a load
       
   378      * (not a fetch) by the MMU. To be sure it will be so,
       
   379      * do the load "by hand".
       
   380      */
       
   381     tmp = ldl(addr);
       
   382     tb_invalidate_page_range(addr, addr + env->icache_line_size);
       
   383 }
       
   384 
       
   385 // XXX: to be tested
       
   386 target_ulong helper_lscbx (target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
       
   387 {
       
   388     int i, c, d;
       
   389     d = 24;
       
   390     for (i = 0; i < xer_bc; i++) {
       
   391         c = ldub(addr);
       
   392 	addr = addr_add(addr, 1);
       
   393         /* ra (if not 0) and rb are never modified */
       
   394         if (likely(reg != rb && (ra == 0 || reg != ra))) {
       
   395             env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
       
   396         }
       
   397         if (unlikely(c == xer_cmp))
       
   398             break;
       
   399         if (likely(d != 0)) {
       
   400             d -= 8;
       
   401         } else {
       
   402             d = 24;
       
   403             reg++;
       
   404             reg = reg & 0x1F;
       
   405         }
       
   406     }
       
   407     return i;
       
   408 }
       
   409 
       
   410 /*****************************************************************************/
       
   411 /* Fixed point operations helpers */
       
   412 #if defined(TARGET_PPC64)
       
   413 
       
   414 /* multiply high word */
       
   415 uint64_t helper_mulhd (uint64_t arg1, uint64_t arg2)
       
   416 {
       
   417     uint64_t tl, th;
       
   418 
       
   419     muls64(&tl, &th, arg1, arg2);
       
   420     return th;
       
   421 }
       
   422 
       
   423 /* multiply high word unsigned */
       
   424 uint64_t helper_mulhdu (uint64_t arg1, uint64_t arg2)
       
   425 {
       
   426     uint64_t tl, th;
       
   427 
       
   428     mulu64(&tl, &th, arg1, arg2);
       
   429     return th;
       
   430 }
       
   431 
       
   432 uint64_t helper_mulldo (uint64_t arg1, uint64_t arg2)
       
   433 {
       
   434     int64_t th;
       
   435     uint64_t tl;
       
   436 
       
   437     muls64(&tl, (uint64_t *)&th, arg1, arg2);
       
   438     /* If th != 0 && th != -1, then we had an overflow */
       
   439     if (likely((uint64_t)(th + 1) <= 1)) {
       
   440         env->xer &= ~(1 << XER_OV);
       
   441     } else {
       
   442         env->xer |= (1 << XER_OV) | (1 << XER_SO);
       
   443     }
       
   444     return (int64_t)tl;
       
   445 }
       
   446 #endif
       
   447 
       
   448 target_ulong helper_cntlzw (target_ulong t)
       
   449 {
       
   450     return clz32(t);
       
   451 }
       
   452 
       
   453 #if defined(TARGET_PPC64)
       
   454 target_ulong helper_cntlzd (target_ulong t)
       
   455 {
       
   456     return clz64(t);
       
   457 }
       
   458 #endif
       
   459 
       
   460 /* shift right arithmetic helper */
       
   461 target_ulong helper_sraw (target_ulong value, target_ulong shift)
       
   462 {
       
   463     int32_t ret;
       
   464 
       
   465     if (likely(!(shift & 0x20))) {
       
   466         if (likely((uint32_t)shift != 0)) {
       
   467             shift &= 0x1f;
       
   468             ret = (int32_t)value >> shift;
       
   469             if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
       
   470                 env->xer &= ~(1 << XER_CA);
       
   471             } else {
       
   472                 env->xer |= (1 << XER_CA);
       
   473             }
       
   474         } else {
       
   475             ret = (int32_t)value;
       
   476             env->xer &= ~(1 << XER_CA);
       
   477         }
       
   478     } else {
       
   479         ret = (int32_t)value >> 31;
       
   480         if (ret) {
       
   481             env->xer |= (1 << XER_CA);
       
   482         } else {
       
   483             env->xer &= ~(1 << XER_CA);
       
   484         }
       
   485     }
       
   486     return (target_long)ret;
       
   487 }
       
   488 
       
   489 #if defined(TARGET_PPC64)
       
   490 target_ulong helper_srad (target_ulong value, target_ulong shift)
       
   491 {
       
   492     int64_t ret;
       
   493 
       
   494     if (likely(!(shift & 0x40))) {
       
   495         if (likely((uint64_t)shift != 0)) {
       
   496             shift &= 0x3f;
       
   497             ret = (int64_t)value >> shift;
       
   498             if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
       
   499                 env->xer &= ~(1 << XER_CA);
       
   500             } else {
       
   501                 env->xer |= (1 << XER_CA);
       
   502             }
       
   503         } else {
       
   504             ret = (int64_t)value;
       
   505             env->xer &= ~(1 << XER_CA);
       
   506         }
       
   507     } else {
       
   508         ret = (int64_t)value >> 63;
       
   509         if (ret) {
       
   510             env->xer |= (1 << XER_CA);
       
   511         } else {
       
   512             env->xer &= ~(1 << XER_CA);
       
   513         }
       
   514     }
       
   515     return ret;
       
   516 }
       
   517 #endif
       
   518 
       
   519 target_ulong helper_popcntb (target_ulong val)
       
   520 {
       
   521     val = (val & 0x55555555) + ((val >>  1) & 0x55555555);
       
   522     val = (val & 0x33333333) + ((val >>  2) & 0x33333333);
       
   523     val = (val & 0x0f0f0f0f) + ((val >>  4) & 0x0f0f0f0f);
       
   524     return val;
       
   525 }
       
   526 
       
   527 #if defined(TARGET_PPC64)
       
   528 target_ulong helper_popcntb_64 (target_ulong val)
       
   529 {
       
   530     val = (val & 0x5555555555555555ULL) + ((val >>  1) & 0x5555555555555555ULL);
       
   531     val = (val & 0x3333333333333333ULL) + ((val >>  2) & 0x3333333333333333ULL);
       
   532     val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >>  4) & 0x0f0f0f0f0f0f0f0fULL);
       
   533     return val;
       
   534 }
       
   535 #endif
       
   536 
       
   537 /*****************************************************************************/
       
   538 /* Floating point operations helpers */
       
   539 uint64_t helper_float32_to_float64(uint32_t arg)
       
   540 {
       
   541     CPU_FloatU f;
       
   542     CPU_DoubleU d;
       
   543     f.l = arg;
       
   544     d.d = float32_to_float64(f.f, &env->fp_status);
       
   545     return d.ll;
       
   546 }
       
   547 
       
   548 uint32_t helper_float64_to_float32(uint64_t arg)
       
   549 {
       
   550     CPU_FloatU f;
       
   551     CPU_DoubleU d;
       
   552     d.ll = arg;
       
   553     f.f = float64_to_float32(d.d, &env->fp_status);
       
   554     return f.l;
       
   555 }
       
   556 
       
   557 static always_inline int isden (float64 d)
       
   558 {
       
   559     CPU_DoubleU u;
       
   560 
       
   561     u.d = d;
       
   562 
       
   563     return ((u.ll >> 52) & 0x7FF) == 0;
       
   564 }
       
   565 
       
   566 uint32_t helper_compute_fprf (uint64_t arg, uint32_t set_fprf)
       
   567 {
       
   568     CPU_DoubleU farg;
       
   569     int isneg;
       
   570     int ret;
       
   571     farg.ll = arg;
       
   572     isneg = float64_is_neg(farg.d);
       
   573     if (unlikely(float64_is_nan(farg.d))) {
       
   574         if (float64_is_signaling_nan(farg.d)) {
       
   575             /* Signaling NaN: flags are undefined */
       
   576             ret = 0x00;
       
   577         } else {
       
   578             /* Quiet NaN */
       
   579             ret = 0x11;
       
   580         }
       
   581     } else if (unlikely(float64_is_infinity(farg.d))) {
       
   582         /* +/- infinity */
       
   583         if (isneg)
       
   584             ret = 0x09;
       
   585         else
       
   586             ret = 0x05;
       
   587     } else {
       
   588         if (float64_is_zero(farg.d)) {
       
   589             /* +/- zero */
       
   590             if (isneg)
       
   591                 ret = 0x12;
       
   592             else
       
   593                 ret = 0x02;
       
   594         } else {
       
   595             if (isden(farg.d)) {
       
   596                 /* Denormalized numbers */
       
   597                 ret = 0x10;
       
   598             } else {
       
   599                 /* Normalized numbers */
       
   600                 ret = 0x00;
       
   601             }
       
   602             if (isneg) {
       
   603                 ret |= 0x08;
       
   604             } else {
       
   605                 ret |= 0x04;
       
   606             }
       
   607         }
       
   608     }
       
   609     if (set_fprf) {
       
   610         /* We update FPSCR_FPRF */
       
   611         env->fpscr &= ~(0x1F << FPSCR_FPRF);
       
   612         env->fpscr |= ret << FPSCR_FPRF;
       
   613     }
       
   614     /* We just need fpcc to update Rc1 */
       
   615     return ret & 0xF;
       
   616 }
       
   617 
       
   618 /* Floating-point invalid operations exception */
       
   619 static always_inline uint64_t fload_invalid_op_excp (int op)
       
   620 {
       
   621     uint64_t ret = 0;
       
   622     int ve;
       
   623 
       
   624     ve = fpscr_ve;
       
   625     switch (op) {
       
   626     case POWERPC_EXCP_FP_VXSNAN:
       
   627         env->fpscr |= 1 << FPSCR_VXSNAN;
       
   628 	break;
       
   629     case POWERPC_EXCP_FP_VXSOFT:
       
   630         env->fpscr |= 1 << FPSCR_VXSOFT;
       
   631 	break;
       
   632     case POWERPC_EXCP_FP_VXISI:
       
   633         /* Magnitude subtraction of infinities */
       
   634         env->fpscr |= 1 << FPSCR_VXISI;
       
   635         goto update_arith;
       
   636     case POWERPC_EXCP_FP_VXIDI:
       
   637         /* Division of infinity by infinity */
       
   638         env->fpscr |= 1 << FPSCR_VXIDI;
       
   639         goto update_arith;
       
   640     case POWERPC_EXCP_FP_VXZDZ:
       
   641         /* Division of zero by zero */
       
   642         env->fpscr |= 1 << FPSCR_VXZDZ;
       
   643         goto update_arith;
       
   644     case POWERPC_EXCP_FP_VXIMZ:
       
   645         /* Multiplication of zero by infinity */
       
   646         env->fpscr |= 1 << FPSCR_VXIMZ;
       
   647         goto update_arith;
       
   648     case POWERPC_EXCP_FP_VXVC:
       
   649         /* Ordered comparison of NaN */
       
   650         env->fpscr |= 1 << FPSCR_VXVC;
       
   651         env->fpscr &= ~(0xF << FPSCR_FPCC);
       
   652         env->fpscr |= 0x11 << FPSCR_FPCC;
       
   653         /* We must update the target FPR before raising the exception */
       
   654         if (ve != 0) {
       
   655             env->exception_index = POWERPC_EXCP_PROGRAM;
       
   656             env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
       
   657             /* Update the floating-point enabled exception summary */
       
   658             env->fpscr |= 1 << FPSCR_FEX;
       
   659             /* Exception is differed */
       
   660             ve = 0;
       
   661         }
       
   662         break;
       
   663     case POWERPC_EXCP_FP_VXSQRT:
       
   664         /* Square root of a negative number */
       
   665         env->fpscr |= 1 << FPSCR_VXSQRT;
       
   666     update_arith:
       
   667         env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
       
   668         if (ve == 0) {
       
   669             /* Set the result to quiet NaN */
       
   670             ret = 0xFFF8000000000000ULL;
       
   671             env->fpscr &= ~(0xF << FPSCR_FPCC);
       
   672             env->fpscr |= 0x11 << FPSCR_FPCC;
       
   673         }
       
   674         break;
       
   675     case POWERPC_EXCP_FP_VXCVI:
       
   676         /* Invalid conversion */
       
   677         env->fpscr |= 1 << FPSCR_VXCVI;
       
   678         env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
       
   679         if (ve == 0) {
       
   680             /* Set the result to quiet NaN */
       
   681             ret = 0xFFF8000000000000ULL;
       
   682             env->fpscr &= ~(0xF << FPSCR_FPCC);
       
   683             env->fpscr |= 0x11 << FPSCR_FPCC;
       
   684         }
       
   685         break;
       
   686     }
       
   687     /* Update the floating-point invalid operation summary */
       
   688     env->fpscr |= 1 << FPSCR_VX;
       
   689     /* Update the floating-point exception summary */
       
   690     env->fpscr |= 1 << FPSCR_FX;
       
   691     if (ve != 0) {
       
   692         /* Update the floating-point enabled exception summary */
       
   693         env->fpscr |= 1 << FPSCR_FEX;
       
   694         if (msr_fe0 != 0 || msr_fe1 != 0)
       
   695             helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
       
   696     }
       
   697     return ret;
       
   698 }
       
   699 
       
   700 static always_inline void float_zero_divide_excp (void)
       
   701 {
       
   702     env->fpscr |= 1 << FPSCR_ZX;
       
   703     env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
       
   704     /* Update the floating-point exception summary */
       
   705     env->fpscr |= 1 << FPSCR_FX;
       
   706     if (fpscr_ze != 0) {
       
   707         /* Update the floating-point enabled exception summary */
       
   708         env->fpscr |= 1 << FPSCR_FEX;
       
   709         if (msr_fe0 != 0 || msr_fe1 != 0) {
       
   710             helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
       
   711                                        POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
       
   712         }
       
   713     }
       
   714 }
       
   715 
       
   716 static always_inline void float_overflow_excp (void)
       
   717 {
       
   718     env->fpscr |= 1 << FPSCR_OX;
       
   719     /* Update the floating-point exception summary */
       
   720     env->fpscr |= 1 << FPSCR_FX;
       
   721     if (fpscr_oe != 0) {
       
   722         /* XXX: should adjust the result */
       
   723         /* Update the floating-point enabled exception summary */
       
   724         env->fpscr |= 1 << FPSCR_FEX;
       
   725         /* We must update the target FPR before raising the exception */
       
   726         env->exception_index = POWERPC_EXCP_PROGRAM;
       
   727         env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
       
   728     } else {
       
   729         env->fpscr |= 1 << FPSCR_XX;
       
   730         env->fpscr |= 1 << FPSCR_FI;
       
   731     }
       
   732 }
       
   733 
       
   734 static always_inline void float_underflow_excp (void)
       
   735 {
       
   736     env->fpscr |= 1 << FPSCR_UX;
       
   737     /* Update the floating-point exception summary */
       
   738     env->fpscr |= 1 << FPSCR_FX;
       
   739     if (fpscr_ue != 0) {
       
   740         /* XXX: should adjust the result */
       
   741         /* Update the floating-point enabled exception summary */
       
   742         env->fpscr |= 1 << FPSCR_FEX;
       
   743         /* We must update the target FPR before raising the exception */
       
   744         env->exception_index = POWERPC_EXCP_PROGRAM;
       
   745         env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
       
   746     }
       
   747 }
       
   748 
       
   749 static always_inline void float_inexact_excp (void)
       
   750 {
       
   751     env->fpscr |= 1 << FPSCR_XX;
       
   752     /* Update the floating-point exception summary */
       
   753     env->fpscr |= 1 << FPSCR_FX;
       
   754     if (fpscr_xe != 0) {
       
   755         /* Update the floating-point enabled exception summary */
       
   756         env->fpscr |= 1 << FPSCR_FEX;
       
   757         /* We must update the target FPR before raising the exception */
       
   758         env->exception_index = POWERPC_EXCP_PROGRAM;
       
   759         env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
       
   760     }
       
   761 }
       
   762 
       
   763 static always_inline void fpscr_set_rounding_mode (void)
       
   764 {
       
   765     int rnd_type;
       
   766 
       
   767     /* Set rounding mode */
       
   768     switch (fpscr_rn) {
       
   769     case 0:
       
   770         /* Best approximation (round to nearest) */
       
   771         rnd_type = float_round_nearest_even;
       
   772         break;
       
   773     case 1:
       
   774         /* Smaller magnitude (round toward zero) */
       
   775         rnd_type = float_round_to_zero;
       
   776         break;
       
   777     case 2:
       
   778         /* Round toward +infinite */
       
   779         rnd_type = float_round_up;
       
   780         break;
       
   781     default:
       
   782     case 3:
       
   783         /* Round toward -infinite */
       
   784         rnd_type = float_round_down;
       
   785         break;
       
   786     }
       
   787     set_float_rounding_mode(rnd_type, &env->fp_status);
       
   788 }
       
   789 
       
   790 void helper_fpscr_clrbit (uint32_t bit)
       
   791 {
       
   792     int prev;
       
   793 
       
   794     prev = (env->fpscr >> bit) & 1;
       
   795     env->fpscr &= ~(1 << bit);
       
   796     if (prev == 1) {
       
   797         switch (bit) {
       
   798         case FPSCR_RN1:
       
   799         case FPSCR_RN:
       
   800             fpscr_set_rounding_mode();
       
   801             break;
       
   802         default:
       
   803             break;
       
   804         }
       
   805     }
       
   806 }
       
   807 
       
   808 void helper_fpscr_setbit (uint32_t bit)
       
   809 {
       
   810     int prev;
       
   811 
       
   812     prev = (env->fpscr >> bit) & 1;
       
   813     env->fpscr |= 1 << bit;
       
   814     if (prev == 0) {
       
   815         switch (bit) {
       
   816         case FPSCR_VX:
       
   817             env->fpscr |= 1 << FPSCR_FX;
       
   818             if (fpscr_ve)
       
   819                 goto raise_ve;
       
   820         case FPSCR_OX:
       
   821             env->fpscr |= 1 << FPSCR_FX;
       
   822             if (fpscr_oe)
       
   823                 goto raise_oe;
       
   824             break;
       
   825         case FPSCR_UX:
       
   826             env->fpscr |= 1 << FPSCR_FX;
       
   827             if (fpscr_ue)
       
   828                 goto raise_ue;
       
   829             break;
       
   830         case FPSCR_ZX:
       
   831             env->fpscr |= 1 << FPSCR_FX;
       
   832             if (fpscr_ze)
       
   833                 goto raise_ze;
       
   834             break;
       
   835         case FPSCR_XX:
       
   836             env->fpscr |= 1 << FPSCR_FX;
       
   837             if (fpscr_xe)
       
   838                 goto raise_xe;
       
   839             break;
       
   840         case FPSCR_VXSNAN:
       
   841         case FPSCR_VXISI:
       
   842         case FPSCR_VXIDI:
       
   843         case FPSCR_VXZDZ:
       
   844         case FPSCR_VXIMZ:
       
   845         case FPSCR_VXVC:
       
   846         case FPSCR_VXSOFT:
       
   847         case FPSCR_VXSQRT:
       
   848         case FPSCR_VXCVI:
       
   849             env->fpscr |= 1 << FPSCR_VX;
       
   850             env->fpscr |= 1 << FPSCR_FX;
       
   851             if (fpscr_ve != 0)
       
   852                 goto raise_ve;
       
   853             break;
       
   854         case FPSCR_VE:
       
   855             if (fpscr_vx != 0) {
       
   856             raise_ve:
       
   857                 env->error_code = POWERPC_EXCP_FP;
       
   858                 if (fpscr_vxsnan)
       
   859                     env->error_code |= POWERPC_EXCP_FP_VXSNAN;
       
   860                 if (fpscr_vxisi)
       
   861                     env->error_code |= POWERPC_EXCP_FP_VXISI;
       
   862                 if (fpscr_vxidi)
       
   863                     env->error_code |= POWERPC_EXCP_FP_VXIDI;
       
   864                 if (fpscr_vxzdz)
       
   865                     env->error_code |= POWERPC_EXCP_FP_VXZDZ;
       
   866                 if (fpscr_vximz)
       
   867                     env->error_code |= POWERPC_EXCP_FP_VXIMZ;
       
   868                 if (fpscr_vxvc)
       
   869                     env->error_code |= POWERPC_EXCP_FP_VXVC;
       
   870                 if (fpscr_vxsoft)
       
   871                     env->error_code |= POWERPC_EXCP_FP_VXSOFT;
       
   872                 if (fpscr_vxsqrt)
       
   873                     env->error_code |= POWERPC_EXCP_FP_VXSQRT;
       
   874                 if (fpscr_vxcvi)
       
   875                     env->error_code |= POWERPC_EXCP_FP_VXCVI;
       
   876                 goto raise_excp;
       
   877             }
       
   878             break;
       
   879         case FPSCR_OE:
       
   880             if (fpscr_ox != 0) {
       
   881             raise_oe:
       
   882                 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
       
   883                 goto raise_excp;
       
   884             }
       
   885             break;
       
   886         case FPSCR_UE:
       
   887             if (fpscr_ux != 0) {
       
   888             raise_ue:
       
   889                 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
       
   890                 goto raise_excp;
       
   891             }
       
   892             break;
       
   893         case FPSCR_ZE:
       
   894             if (fpscr_zx != 0) {
       
   895             raise_ze:
       
   896                 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
       
   897                 goto raise_excp;
       
   898             }
       
   899             break;
       
   900         case FPSCR_XE:
       
   901             if (fpscr_xx != 0) {
       
   902             raise_xe:
       
   903                 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
       
   904                 goto raise_excp;
       
   905             }
       
   906             break;
       
   907         case FPSCR_RN1:
       
   908         case FPSCR_RN:
       
   909             fpscr_set_rounding_mode();
       
   910             break;
       
   911         default:
       
   912             break;
       
   913         raise_excp:
       
   914             /* Update the floating-point enabled exception summary */
       
   915             env->fpscr |= 1 << FPSCR_FEX;
       
   916                 /* We have to update Rc1 before raising the exception */
       
   917             env->exception_index = POWERPC_EXCP_PROGRAM;
       
   918             break;
       
   919         }
       
   920     }
       
   921 }
       
   922 
       
   923 void helper_store_fpscr (uint64_t arg, uint32_t mask)
       
   924 {
       
   925     /*
       
   926      * We use only the 32 LSB of the incoming fpr
       
   927      */
       
   928     uint32_t prev, new;
       
   929     int i;
       
   930 
       
   931     prev = env->fpscr;
       
   932     new = (uint32_t)arg;
       
   933     new &= ~0x60000000;
       
   934     new |= prev & 0x60000000;
       
   935     for (i = 0; i < 8; i++) {
       
   936         if (mask & (1 << i)) {
       
   937             env->fpscr &= ~(0xF << (4 * i));
       
   938             env->fpscr |= new & (0xF << (4 * i));
       
   939         }
       
   940     }
       
   941     /* Update VX and FEX */
       
   942     if (fpscr_ix != 0)
       
   943         env->fpscr |= 1 << FPSCR_VX;
       
   944     else
       
   945         env->fpscr &= ~(1 << FPSCR_VX);
       
   946     if ((fpscr_ex & fpscr_eex) != 0) {
       
   947         env->fpscr |= 1 << FPSCR_FEX;
       
   948         env->exception_index = POWERPC_EXCP_PROGRAM;
       
   949         /* XXX: we should compute it properly */
       
   950         env->error_code = POWERPC_EXCP_FP;
       
   951     }
       
   952     else
       
   953         env->fpscr &= ~(1 << FPSCR_FEX);
       
   954     fpscr_set_rounding_mode();
       
   955 }
       
   956 
       
   957 void helper_float_check_status (void)
       
   958 {
       
   959 #ifdef CONFIG_SOFTFLOAT
       
   960     if (env->exception_index == POWERPC_EXCP_PROGRAM &&
       
   961         (env->error_code & POWERPC_EXCP_FP)) {
       
   962         /* Differred floating-point exception after target FPR update */
       
   963         if (msr_fe0 != 0 || msr_fe1 != 0)
       
   964             helper_raise_exception_err(env->exception_index, env->error_code);
       
   965     } else {
       
   966         int status = get_float_exception_flags(&env->fp_status);
       
   967         if (status & float_flag_divbyzero) {
       
   968             float_zero_divide_excp();
       
   969         } else if (status & float_flag_overflow) {
       
   970             float_overflow_excp();
       
   971         } else if (status & float_flag_underflow) {
       
   972             float_underflow_excp();
       
   973         } else if (status & float_flag_inexact) {
       
   974             float_inexact_excp();
       
   975         }
       
   976     }
       
   977 #else
       
   978     if (env->exception_index == POWERPC_EXCP_PROGRAM &&
       
   979         (env->error_code & POWERPC_EXCP_FP)) {
       
   980         /* Differred floating-point exception after target FPR update */
       
   981         if (msr_fe0 != 0 || msr_fe1 != 0)
       
   982             helper_raise_exception_err(env->exception_index, env->error_code);
       
   983     }
       
   984 #endif
       
   985 }
       
   986 
       
   987 #ifdef CONFIG_SOFTFLOAT
       
   988 void helper_reset_fpstatus (void)
       
   989 {
       
   990     set_float_exception_flags(0, &env->fp_status);
       
   991 }
       
   992 #endif
       
   993 
       
   994 /* fadd - fadd. */
       
   995 uint64_t helper_fadd (uint64_t arg1, uint64_t arg2)
       
   996 {
       
   997     CPU_DoubleU farg1, farg2;
       
   998 
       
   999     farg1.ll = arg1;
       
  1000     farg2.ll = arg2;
       
  1001 #if USE_PRECISE_EMULATION
       
  1002     if (unlikely(float64_is_signaling_nan(farg1.d) ||
       
  1003                  float64_is_signaling_nan(farg2.d))) {
       
  1004         /* sNaN addition */
       
  1005         farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
       
  1006     } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
       
  1007                       float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
       
  1008         /* Magnitude subtraction of infinities */
       
  1009         farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
       
  1010     } else {
       
  1011         farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
       
  1012     }
       
  1013 #else
       
  1014     farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
       
  1015 #endif
       
  1016     return farg1.ll;
       
  1017 }
       
  1018 
       
  1019 /* fsub - fsub. */
       
  1020 uint64_t helper_fsub (uint64_t arg1, uint64_t arg2)
       
  1021 {
       
  1022     CPU_DoubleU farg1, farg2;
       
  1023 
       
  1024     farg1.ll = arg1;
       
  1025     farg2.ll = arg2;
       
  1026 #if USE_PRECISE_EMULATION
       
  1027 {
       
  1028     if (unlikely(float64_is_signaling_nan(farg1.d) ||
       
  1029                  float64_is_signaling_nan(farg2.d))) {
       
  1030         /* sNaN subtraction */
       
  1031         farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
       
  1032     } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
       
  1033                       float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
       
  1034         /* Magnitude subtraction of infinities */
       
  1035         farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
       
  1036     } else {
       
  1037         farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
       
  1038     }
       
  1039 }
       
  1040 #else
       
  1041     farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
       
  1042 #endif
       
  1043     return farg1.ll;
       
  1044 }
       
  1045 
       
  1046 /* fmul - fmul. */
       
  1047 uint64_t helper_fmul (uint64_t arg1, uint64_t arg2)
       
  1048 {
       
  1049     CPU_DoubleU farg1, farg2;
       
  1050 
       
  1051     farg1.ll = arg1;
       
  1052     farg2.ll = arg2;
       
  1053 #if USE_PRECISE_EMULATION
       
  1054     if (unlikely(float64_is_signaling_nan(farg1.d) ||
       
  1055                  float64_is_signaling_nan(farg2.d))) {
       
  1056         /* sNaN multiplication */
       
  1057         farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
       
  1058     } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
       
  1059                         (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
       
  1060         /* Multiplication of zero by infinity */
       
  1061         farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
       
  1062     } else {
       
  1063         farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
       
  1064     }
       
  1065 #else
       
  1066     farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
       
  1067 #endif
       
  1068     return farg1.ll;
       
  1069 }
       
  1070 
       
  1071 /* fdiv - fdiv. */
       
  1072 uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2)
       
  1073 {
       
  1074     CPU_DoubleU farg1, farg2;
       
  1075 
       
  1076     farg1.ll = arg1;
       
  1077     farg2.ll = arg2;
       
  1078 #if USE_PRECISE_EMULATION
       
  1079     if (unlikely(float64_is_signaling_nan(farg1.d) ||
       
  1080                  float64_is_signaling_nan(farg2.d))) {
       
  1081         /* sNaN division */
       
  1082         farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
       
  1083     } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d))) {
       
  1084         /* Division of infinity by infinity */
       
  1085         farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
       
  1086     } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
       
  1087         /* Division of zero by zero */
       
  1088         farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
       
  1089     } else {
       
  1090         farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
       
  1091     }
       
  1092 #else
       
  1093     farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
       
  1094 #endif
       
  1095     return farg1.ll;
       
  1096 }
       
  1097 
       
  1098 /* fabs */
       
  1099 uint64_t helper_fabs (uint64_t arg)
       
  1100 {
       
  1101     CPU_DoubleU farg;
       
  1102 
       
  1103     farg.ll = arg;
       
  1104     farg.d = float64_abs(farg.d);
       
  1105     return farg.ll;
       
  1106 }
       
  1107 
       
  1108 /* fnabs */
       
  1109 uint64_t helper_fnabs (uint64_t arg)
       
  1110 {
       
  1111     CPU_DoubleU farg;
       
  1112 
       
  1113     farg.ll = arg;
       
  1114     farg.d = float64_abs(farg.d);
       
  1115     farg.d = float64_chs(farg.d);
       
  1116     return farg.ll;
       
  1117 }
       
  1118 
       
  1119 /* fneg */
       
  1120 uint64_t helper_fneg (uint64_t arg)
       
  1121 {
       
  1122     CPU_DoubleU farg;
       
  1123 
       
  1124     farg.ll = arg;
       
  1125     farg.d = float64_chs(farg.d);
       
  1126     return farg.ll;
       
  1127 }
       
  1128 
       
  1129 /* fctiw - fctiw. */
       
  1130 uint64_t helper_fctiw (uint64_t arg)
       
  1131 {
       
  1132     CPU_DoubleU farg;
       
  1133     farg.ll = arg;
       
  1134 
       
  1135     if (unlikely(float64_is_signaling_nan(farg.d))) {
       
  1136         /* sNaN conversion */
       
  1137         farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
       
  1138     } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
       
  1139         /* qNan / infinity conversion */
       
  1140         farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
       
  1141     } else {
       
  1142         farg.ll = float64_to_int32(farg.d, &env->fp_status);
       
  1143 #if USE_PRECISE_EMULATION
       
  1144         /* XXX: higher bits are not supposed to be significant.
       
  1145          *     to make tests easier, return the same as a real PowerPC 750
       
  1146          */
       
  1147         farg.ll |= 0xFFF80000ULL << 32;
       
  1148 #endif
       
  1149     }
       
  1150     return farg.ll;
       
  1151 }
       
  1152 
       
  1153 /* fctiwz - fctiwz. */
       
  1154 uint64_t helper_fctiwz (uint64_t arg)
       
  1155 {
       
  1156     CPU_DoubleU farg;
       
  1157     farg.ll = arg;
       
  1158 
       
  1159     if (unlikely(float64_is_signaling_nan(farg.d))) {
       
  1160         /* sNaN conversion */
       
  1161         farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
       
  1162     } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
       
  1163         /* qNan / infinity conversion */
       
  1164         farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
       
  1165     } else {
       
  1166         farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
       
  1167 #if USE_PRECISE_EMULATION
       
  1168         /* XXX: higher bits are not supposed to be significant.
       
  1169          *     to make tests easier, return the same as a real PowerPC 750
       
  1170          */
       
  1171         farg.ll |= 0xFFF80000ULL << 32;
       
  1172 #endif
       
  1173     }
       
  1174     return farg.ll;
       
  1175 }
       
  1176 
       
  1177 #if defined(TARGET_PPC64)
       
  1178 /* fcfid - fcfid. */
       
  1179 uint64_t helper_fcfid (uint64_t arg)
       
  1180 {
       
  1181     CPU_DoubleU farg;
       
  1182     farg.d = int64_to_float64(arg, &env->fp_status);
       
  1183     return farg.ll;
       
  1184 }
       
  1185 
       
  1186 /* fctid - fctid. */
       
  1187 uint64_t helper_fctid (uint64_t arg)
       
  1188 {
       
  1189     CPU_DoubleU farg;
       
  1190     farg.ll = arg;
       
  1191 
       
  1192     if (unlikely(float64_is_signaling_nan(farg.d))) {
       
  1193         /* sNaN conversion */
       
  1194         farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
       
  1195     } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
       
  1196         /* qNan / infinity conversion */
       
  1197         farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
       
  1198     } else {
       
  1199         farg.ll = float64_to_int64(farg.d, &env->fp_status);
       
  1200     }
       
  1201     return farg.ll;
       
  1202 }
       
  1203 
       
  1204 /* fctidz - fctidz. */
       
  1205 uint64_t helper_fctidz (uint64_t arg)
       
  1206 {
       
  1207     CPU_DoubleU farg;
       
  1208     farg.ll = arg;
       
  1209 
       
  1210     if (unlikely(float64_is_signaling_nan(farg.d))) {
       
  1211         /* sNaN conversion */
       
  1212         farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
       
  1213     } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
       
  1214         /* qNan / infinity conversion */
       
  1215         farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
       
  1216     } else {
       
  1217         farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
       
  1218     }
       
  1219     return farg.ll;
       
  1220 }
       
  1221 
       
  1222 #endif
       
  1223 
       
  1224 static always_inline uint64_t do_fri (uint64_t arg, int rounding_mode)
       
  1225 {
       
  1226     CPU_DoubleU farg;
       
  1227     farg.ll = arg;
       
  1228 
       
  1229     if (unlikely(float64_is_signaling_nan(farg.d))) {
       
  1230         /* sNaN round */
       
  1231         farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
       
  1232     } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
       
  1233         /* qNan / infinity round */
       
  1234         farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
       
  1235     } else {
       
  1236         set_float_rounding_mode(rounding_mode, &env->fp_status);
       
  1237         farg.ll = float64_round_to_int(farg.d, &env->fp_status);
       
  1238         /* Restore rounding mode from FPSCR */
       
  1239         fpscr_set_rounding_mode();
       
  1240     }
       
  1241     return farg.ll;
       
  1242 }
       
  1243 
       
  1244 uint64_t helper_frin (uint64_t arg)
       
  1245 {
       
  1246     return do_fri(arg, float_round_nearest_even);
       
  1247 }
       
  1248 
       
  1249 uint64_t helper_friz (uint64_t arg)
       
  1250 {
       
  1251     return do_fri(arg, float_round_to_zero);
       
  1252 }
       
  1253 
       
  1254 uint64_t helper_frip (uint64_t arg)
       
  1255 {
       
  1256     return do_fri(arg, float_round_up);
       
  1257 }
       
  1258 
       
  1259 uint64_t helper_frim (uint64_t arg)
       
  1260 {
       
  1261     return do_fri(arg, float_round_down);
       
  1262 }
       
  1263 
       
  1264 /* fmadd - fmadd. */
       
  1265 uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
       
  1266 {
       
  1267     CPU_DoubleU farg1, farg2, farg3;
       
  1268 
       
  1269     farg1.ll = arg1;
       
  1270     farg2.ll = arg2;
       
  1271     farg3.ll = arg3;
       
  1272 #if USE_PRECISE_EMULATION
       
  1273     if (unlikely(float64_is_signaling_nan(farg1.d) ||
       
  1274                  float64_is_signaling_nan(farg2.d) ||
       
  1275                  float64_is_signaling_nan(farg3.d))) {
       
  1276         /* sNaN operation */
       
  1277         farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
       
  1278     } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
       
  1279                         (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
       
  1280         /* Multiplication of zero by infinity */
       
  1281         farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
       
  1282     } else {
       
  1283 #ifdef FLOAT128
       
  1284         /* This is the way the PowerPC specification defines it */
       
  1285         float128 ft0_128, ft1_128;
       
  1286 
       
  1287         ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
       
  1288         ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
       
  1289         ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
       
  1290         if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
       
  1291                      float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
       
  1292             /* Magnitude subtraction of infinities */
       
  1293             farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
       
  1294         } else {
       
  1295             ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
       
  1296             ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
       
  1297             farg1.d = float128_to_float64(ft0_128, &env->fp_status);
       
  1298         }
       
  1299 #else
       
  1300         /* This is OK on x86 hosts */
       
  1301         farg1.d = (farg1.d * farg2.d) + farg3.d;
       
  1302 #endif
       
  1303     }
       
  1304 #else
       
  1305     farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
       
  1306     farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
       
  1307 #endif
       
  1308     return farg1.ll;
       
  1309 }
       
  1310 
       
  1311 /* fmsub - fmsub. */
       
  1312 uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
       
  1313 {
       
  1314     CPU_DoubleU farg1, farg2, farg3;
       
  1315 
       
  1316     farg1.ll = arg1;
       
  1317     farg2.ll = arg2;
       
  1318     farg3.ll = arg3;
       
  1319 #if USE_PRECISE_EMULATION
       
  1320     if (unlikely(float64_is_signaling_nan(farg1.d) ||
       
  1321                  float64_is_signaling_nan(farg2.d) ||
       
  1322                  float64_is_signaling_nan(farg3.d))) {
       
  1323         /* sNaN operation */
       
  1324         farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
       
  1325     } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
       
  1326                         (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
       
  1327         /* Multiplication of zero by infinity */
       
  1328         farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
       
  1329     } else {
       
  1330 #ifdef FLOAT128
       
  1331         /* This is the way the PowerPC specification defines it */
       
  1332         float128 ft0_128, ft1_128;
       
  1333 
       
  1334         ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
       
  1335         ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
       
  1336         ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
       
  1337         if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
       
  1338                      float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
       
  1339             /* Magnitude subtraction of infinities */
       
  1340             farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
       
  1341         } else {
       
  1342             ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
       
  1343             ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
       
  1344             farg1.d = float128_to_float64(ft0_128, &env->fp_status);
       
  1345         }
       
  1346 #else
       
  1347         /* This is OK on x86 hosts */
       
  1348         farg1.d = (farg1.d * farg2.d) - farg3.d;
       
  1349 #endif
       
  1350     }
       
  1351 #else
       
  1352     farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
       
  1353     farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
       
  1354 #endif
       
  1355     return farg1.ll;
       
  1356 }
       
  1357 
       
  1358 /* fnmadd - fnmadd. */
       
  1359 uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
       
  1360 {
       
  1361     CPU_DoubleU farg1, farg2, farg3;
       
  1362 
       
  1363     farg1.ll = arg1;
       
  1364     farg2.ll = arg2;
       
  1365     farg3.ll = arg3;
       
  1366 
       
  1367     if (unlikely(float64_is_signaling_nan(farg1.d) ||
       
  1368                  float64_is_signaling_nan(farg2.d) ||
       
  1369                  float64_is_signaling_nan(farg3.d))) {
       
  1370         /* sNaN operation */
       
  1371         farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
       
  1372     } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
       
  1373                         (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
       
  1374         /* Multiplication of zero by infinity */
       
  1375         farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
       
  1376     } else {
       
  1377 #if USE_PRECISE_EMULATION
       
  1378 #ifdef FLOAT128
       
  1379         /* This is the way the PowerPC specification defines it */
       
  1380         float128 ft0_128, ft1_128;
       
  1381 
       
  1382         ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
       
  1383         ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
       
  1384         ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
       
  1385         if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
       
  1386                      float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
       
  1387             /* Magnitude subtraction of infinities */
       
  1388             farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
       
  1389         } else {
       
  1390             ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
       
  1391             ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
       
  1392             farg1.d = float128_to_float64(ft0_128, &env->fp_status);
       
  1393         }
       
  1394 #else
       
  1395         /* This is OK on x86 hosts */
       
  1396         farg1.d = (farg1.d * farg2.d) + farg3.d;
       
  1397 #endif
       
  1398 #else
       
  1399         farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
       
  1400         farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
       
  1401 #endif
       
  1402         if (likely(!float64_is_nan(farg1.d)))
       
  1403             farg1.d = float64_chs(farg1.d);
       
  1404     }
       
  1405     return farg1.ll;
       
  1406 }
       
  1407 
       
  1408 /* fnmsub - fnmsub. */
       
  1409 uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
       
  1410 {
       
  1411     CPU_DoubleU farg1, farg2, farg3;
       
  1412 
       
  1413     farg1.ll = arg1;
       
  1414     farg2.ll = arg2;
       
  1415     farg3.ll = arg3;
       
  1416 
       
  1417     if (unlikely(float64_is_signaling_nan(farg1.d) ||
       
  1418                  float64_is_signaling_nan(farg2.d) ||
       
  1419                  float64_is_signaling_nan(farg3.d))) {
       
  1420         /* sNaN operation */
       
  1421         farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
       
  1422     } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
       
  1423                         (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
       
  1424         /* Multiplication of zero by infinity */
       
  1425         farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
       
  1426     } else {
       
  1427 #if USE_PRECISE_EMULATION
       
  1428 #ifdef FLOAT128
       
  1429         /* This is the way the PowerPC specification defines it */
       
  1430         float128 ft0_128, ft1_128;
       
  1431 
       
  1432         ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
       
  1433         ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
       
  1434         ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
       
  1435         if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
       
  1436                      float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
       
  1437             /* Magnitude subtraction of infinities */
       
  1438             farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
       
  1439         } else {
       
  1440             ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
       
  1441             ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
       
  1442             farg1.d = float128_to_float64(ft0_128, &env->fp_status);
       
  1443         }
       
  1444 #else
       
  1445         /* This is OK on x86 hosts */
       
  1446         farg1.d = (farg1.d * farg2.d) - farg3.d;
       
  1447 #endif
       
  1448 #else
       
  1449         farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
       
  1450         farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
       
  1451 #endif
       
  1452         if (likely(!float64_is_nan(farg1.d)))
       
  1453             farg1.d = float64_chs(farg1.d);
       
  1454     }
       
  1455     return farg1.ll;
       
  1456 }
       
  1457 
       
  1458 /* frsp - frsp. */
       
  1459 uint64_t helper_frsp (uint64_t arg)
       
  1460 {
       
  1461     CPU_DoubleU farg;
       
  1462     float32 f32;
       
  1463     farg.ll = arg;
       
  1464 
       
  1465 #if USE_PRECISE_EMULATION
       
  1466     if (unlikely(float64_is_signaling_nan(farg.d))) {
       
  1467         /* sNaN square root */
       
  1468        farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
       
  1469     } else {
       
  1470        f32 = float64_to_float32(farg.d, &env->fp_status);
       
  1471        farg.d = float32_to_float64(f32, &env->fp_status);
       
  1472     }
       
  1473 #else
       
  1474     f32 = float64_to_float32(farg.d, &env->fp_status);
       
  1475     farg.d = float32_to_float64(f32, &env->fp_status);
       
  1476 #endif
       
  1477     return farg.ll;
       
  1478 }
       
  1479 
       
  1480 /* fsqrt - fsqrt. */
       
  1481 uint64_t helper_fsqrt (uint64_t arg)
       
  1482 {
       
  1483     CPU_DoubleU farg;
       
  1484     farg.ll = arg;
       
  1485 
       
  1486     if (unlikely(float64_is_signaling_nan(farg.d))) {
       
  1487         /* sNaN square root */
       
  1488         farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
       
  1489     } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
       
  1490         /* Square root of a negative nonzero number */
       
  1491         farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
       
  1492     } else {
       
  1493         farg.d = float64_sqrt(farg.d, &env->fp_status);
       
  1494     }
       
  1495     return farg.ll;
       
  1496 }
       
  1497 
       
  1498 /* fre - fre. */
       
  1499 uint64_t helper_fre (uint64_t arg)
       
  1500 {
       
  1501     CPU_DoubleU fone, farg;
       
  1502     fone.ll = 0x3FF0000000000000ULL; /* 1.0 */
       
  1503     farg.ll = arg;
       
  1504 
       
  1505     if (unlikely(float64_is_signaling_nan(farg.d))) {
       
  1506         /* sNaN reciprocal */
       
  1507         farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
       
  1508     } else {
       
  1509         farg.d = float64_div(fone.d, farg.d, &env->fp_status);
       
  1510     }
       
  1511     return farg.d;
       
  1512 }
       
  1513 
       
  1514 /* fres - fres. */
       
  1515 uint64_t helper_fres (uint64_t arg)
       
  1516 {
       
  1517     CPU_DoubleU fone, farg;
       
  1518     float32 f32;
       
  1519     fone.ll = 0x3FF0000000000000ULL; /* 1.0 */
       
  1520     farg.ll = arg;
       
  1521 
       
  1522     if (unlikely(float64_is_signaling_nan(farg.d))) {
       
  1523         /* sNaN reciprocal */
       
  1524         farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
       
  1525     } else {
       
  1526         farg.d = float64_div(fone.d, farg.d, &env->fp_status);
       
  1527         f32 = float64_to_float32(farg.d, &env->fp_status);
       
  1528         farg.d = float32_to_float64(f32, &env->fp_status);
       
  1529     }
       
  1530     return farg.ll;
       
  1531 }
       
  1532 
       
  1533 /* frsqrte  - frsqrte. */
       
  1534 uint64_t helper_frsqrte (uint64_t arg)
       
  1535 {
       
  1536     CPU_DoubleU fone, farg;
       
  1537     float32 f32;
       
  1538     fone.ll = 0x3FF0000000000000ULL; /* 1.0 */
       
  1539     farg.ll = arg;
       
  1540 
       
  1541     if (unlikely(float64_is_signaling_nan(farg.d))) {
       
  1542         /* sNaN reciprocal square root */
       
  1543         farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
       
  1544     } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
       
  1545         /* Reciprocal square root of a negative nonzero number */
       
  1546         farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
       
  1547     } else {
       
  1548         farg.d = float64_sqrt(farg.d, &env->fp_status);
       
  1549         farg.d = float64_div(fone.d, farg.d, &env->fp_status);
       
  1550         f32 = float64_to_float32(farg.d, &env->fp_status);
       
  1551         farg.d = float32_to_float64(f32, &env->fp_status);
       
  1552     }
       
  1553     return farg.ll;
       
  1554 }
       
  1555 
       
  1556 /* fsel - fsel. */
       
  1557 uint64_t helper_fsel (uint64_t arg1, uint64_t arg2, uint64_t arg3)
       
  1558 {
       
  1559     CPU_DoubleU farg1;
       
  1560 
       
  1561     farg1.ll = arg1;
       
  1562 
       
  1563     if (!float64_is_neg(farg1.d) || float64_is_zero(farg1.d))
       
  1564         return arg2;
       
  1565     else
       
  1566         return arg3;
       
  1567 }
       
  1568 
       
  1569 void helper_fcmpu (uint64_t arg1, uint64_t arg2, uint32_t crfD)
       
  1570 {
       
  1571     CPU_DoubleU farg1, farg2;
       
  1572     uint32_t ret = 0;
       
  1573     farg1.ll = arg1;
       
  1574     farg2.ll = arg2;
       
  1575 
       
  1576     if (unlikely(float64_is_nan(farg1.d) ||
       
  1577                  float64_is_nan(farg2.d))) {
       
  1578         ret = 0x01UL;
       
  1579     } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
       
  1580         ret = 0x08UL;
       
  1581     } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
       
  1582         ret = 0x04UL;
       
  1583     } else {
       
  1584         ret = 0x02UL;
       
  1585     }
       
  1586 
       
  1587     env->fpscr &= ~(0x0F << FPSCR_FPRF);
       
  1588     env->fpscr |= ret << FPSCR_FPRF;
       
  1589     env->crf[crfD] = ret;
       
  1590     if (unlikely(ret == 0x01UL
       
  1591                  && (float64_is_signaling_nan(farg1.d) ||
       
  1592                      float64_is_signaling_nan(farg2.d)))) {
       
  1593         /* sNaN comparison */
       
  1594         fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
       
  1595     }
       
  1596 }
       
  1597 
       
  1598 void helper_fcmpo (uint64_t arg1, uint64_t arg2, uint32_t crfD)
       
  1599 {
       
  1600     CPU_DoubleU farg1, farg2;
       
  1601     uint32_t ret = 0;
       
  1602     farg1.ll = arg1;
       
  1603     farg2.ll = arg2;
       
  1604 
       
  1605     if (unlikely(float64_is_nan(farg1.d) ||
       
  1606                  float64_is_nan(farg2.d))) {
       
  1607         ret = 0x01UL;
       
  1608     } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
       
  1609         ret = 0x08UL;
       
  1610     } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
       
  1611         ret = 0x04UL;
       
  1612     } else {
       
  1613         ret = 0x02UL;
       
  1614     }
       
  1615 
       
  1616     env->fpscr &= ~(0x0F << FPSCR_FPRF);
       
  1617     env->fpscr |= ret << FPSCR_FPRF;
       
  1618     env->crf[crfD] = ret;
       
  1619     if (unlikely (ret == 0x01UL)) {
       
  1620         if (float64_is_signaling_nan(farg1.d) ||
       
  1621             float64_is_signaling_nan(farg2.d)) {
       
  1622             /* sNaN comparison */
       
  1623             fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
       
  1624                                   POWERPC_EXCP_FP_VXVC);
       
  1625         } else {
       
  1626             /* qNaN comparison */
       
  1627             fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
       
  1628         }
       
  1629     }
       
  1630 }
       
  1631 
       
  1632 #if !defined (CONFIG_USER_ONLY)
       
  1633 void helper_store_msr (target_ulong val)
       
  1634 {
       
  1635     val = hreg_store_msr(env, val, 0);
       
  1636     if (val != 0) {
       
  1637         env->interrupt_request |= CPU_INTERRUPT_EXITTB;
       
  1638         helper_raise_exception(val);
       
  1639     }
       
  1640 }
       
  1641 
       
  1642 static always_inline void do_rfi (target_ulong nip, target_ulong msr,
       
  1643                                     target_ulong msrm, int keep_msrh)
       
  1644 {
       
  1645 #if defined(TARGET_PPC64)
       
  1646     if (msr & (1ULL << MSR_SF)) {
       
  1647         nip = (uint64_t)nip;
       
  1648         msr &= (uint64_t)msrm;
       
  1649     } else {
       
  1650         nip = (uint32_t)nip;
       
  1651         msr = (uint32_t)(msr & msrm);
       
  1652         if (keep_msrh)
       
  1653             msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
       
  1654     }
       
  1655 #else
       
  1656     nip = (uint32_t)nip;
       
  1657     msr &= (uint32_t)msrm;
       
  1658 #endif
       
  1659     /* XXX: beware: this is false if VLE is supported */
       
  1660     env->nip = nip & ~((target_ulong)0x00000003);
       
  1661     hreg_store_msr(env, msr, 1);
       
  1662 #if defined (DEBUG_OP)
       
  1663     cpu_dump_rfi(env->nip, env->msr);
       
  1664 #endif
       
  1665     /* No need to raise an exception here,
       
  1666      * as rfi is always the last insn of a TB
       
  1667      */
       
  1668     env->interrupt_request |= CPU_INTERRUPT_EXITTB;
       
  1669 }
       
  1670 
       
  1671 void helper_rfi (void)
       
  1672 {
       
  1673     do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
       
  1674            ~((target_ulong)0xFFFF0000), 1);
       
  1675 }
       
  1676 
       
  1677 #if defined(TARGET_PPC64)
       
  1678 void helper_rfid (void)
       
  1679 {
       
  1680     do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
       
  1681            ~((target_ulong)0xFFFF0000), 0);
       
  1682 }
       
  1683 
       
  1684 void helper_hrfid (void)
       
  1685 {
       
  1686     do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
       
  1687            ~((target_ulong)0xFFFF0000), 0);
       
  1688 }
       
  1689 #endif
       
  1690 #endif
       
  1691 
       
  1692 void helper_tw (target_ulong arg1, target_ulong arg2, uint32_t flags)
       
  1693 {
       
  1694     if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
       
  1695                   ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
       
  1696                   ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
       
  1697                   ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
       
  1698                   ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
       
  1699         helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
       
  1700     }
       
  1701 }
       
  1702 
       
  1703 #if defined(TARGET_PPC64)
       
  1704 void helper_td (target_ulong arg1, target_ulong arg2, uint32_t flags)
       
  1705 {
       
  1706     if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
       
  1707                   ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
       
  1708                   ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
       
  1709                   ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
       
  1710                   ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01)))))
       
  1711         helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
       
  1712 }
       
  1713 #endif
       
  1714 
       
  1715 /*****************************************************************************/
       
  1716 /* PowerPC 601 specific instructions (POWER bridge) */
       
  1717 
       
  1718 target_ulong helper_clcs (uint32_t arg)
       
  1719 {
       
  1720     switch (arg) {
       
  1721     case 0x0CUL:
       
  1722         /* Instruction cache line size */
       
  1723         return env->icache_line_size;
       
  1724         break;
       
  1725     case 0x0DUL:
       
  1726         /* Data cache line size */
       
  1727         return env->dcache_line_size;
       
  1728         break;
       
  1729     case 0x0EUL:
       
  1730         /* Minimum cache line size */
       
  1731         return (env->icache_line_size < env->dcache_line_size) ?
       
  1732                 env->icache_line_size : env->dcache_line_size;
       
  1733         break;
       
  1734     case 0x0FUL:
       
  1735         /* Maximum cache line size */
       
  1736         return (env->icache_line_size > env->dcache_line_size) ?
       
  1737                 env->icache_line_size : env->dcache_line_size;
       
  1738         break;
       
  1739     default:
       
  1740         /* Undefined */
       
  1741         return 0;
       
  1742         break;
       
  1743     }
       
  1744 }
       
  1745 
       
  1746 target_ulong helper_div (target_ulong arg1, target_ulong arg2)
       
  1747 {
       
  1748     uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
       
  1749 
       
  1750     if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
       
  1751         (int32_t)arg2 == 0) {
       
  1752         env->spr[SPR_MQ] = 0;
       
  1753         return INT32_MIN;
       
  1754     } else {
       
  1755         env->spr[SPR_MQ] = tmp % arg2;
       
  1756         return  tmp / (int32_t)arg2;
       
  1757     }
       
  1758 }
       
  1759 
       
  1760 target_ulong helper_divo (target_ulong arg1, target_ulong arg2)
       
  1761 {
       
  1762     uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
       
  1763 
       
  1764     if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
       
  1765         (int32_t)arg2 == 0) {
       
  1766         env->xer |= (1 << XER_OV) | (1 << XER_SO);
       
  1767         env->spr[SPR_MQ] = 0;
       
  1768         return INT32_MIN;
       
  1769     } else {
       
  1770         env->spr[SPR_MQ] = tmp % arg2;
       
  1771         tmp /= (int32_t)arg2;
       
  1772 	if ((int32_t)tmp != tmp) {
       
  1773             env->xer |= (1 << XER_OV) | (1 << XER_SO);
       
  1774         } else {
       
  1775             env->xer &= ~(1 << XER_OV);
       
  1776         }
       
  1777         return tmp;
       
  1778     }
       
  1779 }
       
  1780 
       
  1781 target_ulong helper_divs (target_ulong arg1, target_ulong arg2)
       
  1782 {
       
  1783     if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
       
  1784         (int32_t)arg2 == 0) {
       
  1785         env->spr[SPR_MQ] = 0;
       
  1786         return INT32_MIN;
       
  1787     } else {
       
  1788         env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
       
  1789         return (int32_t)arg1 / (int32_t)arg2;
       
  1790     }
       
  1791 }
       
  1792 
       
  1793 target_ulong helper_divso (target_ulong arg1, target_ulong arg2)
       
  1794 {
       
  1795     if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
       
  1796         (int32_t)arg2 == 0) {
       
  1797         env->xer |= (1 << XER_OV) | (1 << XER_SO);
       
  1798         env->spr[SPR_MQ] = 0;
       
  1799         return INT32_MIN;
       
  1800     } else {
       
  1801         env->xer &= ~(1 << XER_OV);
       
  1802         env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
       
  1803         return (int32_t)arg1 / (int32_t)arg2;
       
  1804     }
       
  1805 }
       
  1806 
       
  1807 #if !defined (CONFIG_USER_ONLY)
       
  1808 target_ulong helper_rac (target_ulong addr)
       
  1809 {
       
  1810     mmu_ctx_t ctx;
       
  1811     int nb_BATs;
       
  1812     target_ulong ret = 0;
       
  1813 
       
  1814     /* We don't have to generate many instances of this instruction,
       
  1815      * as rac is supervisor only.
       
  1816      */
       
  1817     /* XXX: FIX THIS: Pretend we have no BAT */
       
  1818     nb_BATs = env->nb_BATs;
       
  1819     env->nb_BATs = 0;
       
  1820     if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0)
       
  1821         ret = ctx.raddr;
       
  1822     env->nb_BATs = nb_BATs;
       
  1823     return ret;
       
  1824 }
       
  1825 
       
  1826 void helper_rfsvc (void)
       
  1827 {
       
  1828     do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
       
  1829 }
       
  1830 #endif
       
  1831 
       
  1832 /*****************************************************************************/
       
  1833 /* 602 specific instructions */
       
  1834 /* mfrom is the most crazy instruction ever seen, imho ! */
       
  1835 /* Real implementation uses a ROM table. Do the same */
       
  1836 /* Extremly decomposed:
       
  1837  *                      -arg / 256
       
  1838  * return 256 * log10(10           + 1.0) + 0.5
       
  1839  */
       
  1840 #if !defined (CONFIG_USER_ONLY)
       
  1841 target_ulong helper_602_mfrom (target_ulong arg)
       
  1842 {
       
  1843     if (likely(arg < 602)) {
       
  1844 #include "mfrom_table.c"
       
  1845         return mfrom_ROM_table[arg];
       
  1846     } else {
       
  1847         return 0;
       
  1848     }
       
  1849 }
       
  1850 #endif
       
  1851 
       
  1852 /*****************************************************************************/
       
  1853 /* Embedded PowerPC specific helpers */
       
  1854 
       
  1855 /* XXX: to be improved to check access rights when in user-mode */
       
  1856 target_ulong helper_load_dcr (target_ulong dcrn)
       
  1857 {
       
  1858     target_ulong val = 0;
       
  1859 
       
  1860     if (unlikely(env->dcr_env == NULL)) {
       
  1861         if (loglevel != 0) {
       
  1862             fprintf(logfile, "No DCR environment\n");
       
  1863         }
       
  1864         helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
       
  1865                                    POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
       
  1866     } else if (unlikely(ppc_dcr_read(env->dcr_env, dcrn, &val) != 0)) {
       
  1867         if (loglevel != 0) {
       
  1868             fprintf(logfile, "DCR read error %d %03x\n", (int)dcrn, (int)dcrn);
       
  1869         }
       
  1870         helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
       
  1871                                    POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
       
  1872     }
       
  1873     return val;
       
  1874 }
       
  1875 
       
  1876 void helper_store_dcr (target_ulong dcrn, target_ulong val)
       
  1877 {
       
  1878     if (unlikely(env->dcr_env == NULL)) {
       
  1879         if (loglevel != 0) {
       
  1880             fprintf(logfile, "No DCR environment\n");
       
  1881         }
       
  1882         helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
       
  1883                                    POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
       
  1884     } else if (unlikely(ppc_dcr_write(env->dcr_env, dcrn, val) != 0)) {
       
  1885         if (loglevel != 0) {
       
  1886             fprintf(logfile, "DCR write error %d %03x\n", (int)dcrn, (int)dcrn);
       
  1887         }
       
  1888         helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
       
  1889                                    POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
       
  1890     }
       
  1891 }
       
  1892 
       
  1893 #if !defined(CONFIG_USER_ONLY)
       
  1894 void helper_40x_rfci (void)
       
  1895 {
       
  1896     do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
       
  1897            ~((target_ulong)0xFFFF0000), 0);
       
  1898 }
       
  1899 
       
  1900 void helper_rfci (void)
       
  1901 {
       
  1902     do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
       
  1903            ~((target_ulong)0x3FFF0000), 0);
       
  1904 }
       
  1905 
       
  1906 void helper_rfdi (void)
       
  1907 {
       
  1908     do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
       
  1909            ~((target_ulong)0x3FFF0000), 0);
       
  1910 }
       
  1911 
       
  1912 void helper_rfmci (void)
       
  1913 {
       
  1914     do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
       
  1915            ~((target_ulong)0x3FFF0000), 0);
       
  1916 }
       
  1917 #endif
       
  1918 
       
  1919 /* 440 specific */
       
  1920 target_ulong helper_dlmzb (target_ulong high, target_ulong low, uint32_t update_Rc)
       
  1921 {
       
  1922     target_ulong mask;
       
  1923     int i;
       
  1924 
       
  1925     i = 1;
       
  1926     for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
       
  1927         if ((high & mask) == 0) {
       
  1928             if (update_Rc) {
       
  1929                 env->crf[0] = 0x4;
       
  1930             }
       
  1931             goto done;
       
  1932         }
       
  1933         i++;
       
  1934     }
       
  1935     for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
       
  1936         if ((low & mask) == 0) {
       
  1937             if (update_Rc) {
       
  1938                 env->crf[0] = 0x8;
       
  1939             }
       
  1940             goto done;
       
  1941         }
       
  1942         i++;
       
  1943     }
       
  1944     if (update_Rc) {
       
  1945         env->crf[0] = 0x2;
       
  1946     }
       
  1947  done:
       
  1948     env->xer = (env->xer & ~0x7F) | i;
       
  1949     if (update_Rc) {
       
  1950         env->crf[0] |= xer_so;
       
  1951     }
       
  1952     return i;
       
  1953 }
       
  1954 
       
  1955 /* Altivec extension helpers.  */
       
  1956 /* FIXME: Crufty Altivec stuff that should probably be rewritten/removed.  */
       
  1957 #if 0
       
  1958 #if defined(WORDS_BIGENDIAN)
       
  1959 #define HI_IDX 0
       
  1960 #define LO_IDX 1
       
  1961 #else
       
  1962 #define HI_IDX 1
       
  1963 #define LO_IDX 0
       
  1964 #endif
       
  1965 
       
  1966 #define N_ELEMS(element) (sizeof (AVR0.element) / sizeof (AVR0.element[0]))
       
  1967 
       
  1968 #define VECTOR_FOR(element)                     \
       
  1969   int i;                                        \
       
  1970   VECTOR_FOR_I(i, element)
       
  1971 
       
  1972 #define VECTOR_FOR_I(index, element)                                    \
       
  1973   for (index = 0; index < N_ELEMS(element); index++)
       
  1974 
       
  1975 #if defined(WORDS_BIGENDIAN)
       
  1976 #define VECTOR_FOR_INORDER_I(index, element) VECTOR_FOR_I(index, element)
       
  1977 #else
       
  1978 #define VECTOR_FOR_INORDER_I(index, element)            \
       
  1979   for (index = N_ELEMS(element)-1; index >= 0; index--)
       
  1980 #endif
       
  1981 
       
  1982 /* Saturating arithmetic helpers.  */
       
  1983 #define SATCVT(from, to, from_type, to_type, min, max, use_min, use_max) \
       
  1984   static always_inline to_type cvt##from##to (from_type x, int *sat)    \
       
  1985   {                                                                     \
       
  1986     to_type r;                                                          \
       
  1987     if (use_min && x < min) {                                           \
       
  1988       r = min;                                                          \
       
  1989       *sat = 1;                                                         \
       
  1990     } else if (use_max && x > max) {                                    \
       
  1991       r = max;                                                          \
       
  1992       *sat = 1;                                                         \
       
  1993     } else {                                                            \
       
  1994       r = x;                                                            \
       
  1995     }                                                                   \
       
  1996     return r;                                                           \
       
  1997   }
       
  1998 SATCVT(sh, sb, int16_t, int8_t, INT8_MIN, INT8_MAX, 1, 1)
       
  1999 SATCVT(sw, sh, int32_t, int16_t, INT16_MIN, INT16_MAX, 1, 1)
       
  2000 SATCVT(sd, sw, int64_t, int32_t, INT32_MIN, INT32_MAX, 1, 1)
       
  2001 SATCVT(uh, ub, uint16_t, uint8_t, 0, UINT8_MAX, 0, 1)
       
  2002 SATCVT(uw, uh, uint32_t, uint16_t, 0, UINT16_MAX, 0, 1)
       
  2003 SATCVT(ud, uw, uint64_t, uint32_t, 0, UINT32_MAX, 0, 1)
       
  2004 SATCVT(sh, ub, int16_t, uint8_t, 0, UINT8_MAX, 1, 1)
       
  2005 SATCVT(sw, uh, int32_t, uint16_t, 0, UINT16_MAX, 1, 1)
       
  2006 SATCVT(sd, uw, int64_t, uint32_t, 0, UINT32_MAX, 1, 1)
       
  2007 #undef SATCVT
       
  2008 
       
  2009 void do_lvsl (void)
       
  2010 {
       
  2011   int sh = (uint32_t)T0 & 0xf;
       
  2012   int i, j = sh;
       
  2013 
       
  2014   VECTOR_FOR_INORDER_I (i, u8) {
       
  2015     AVR0.u8[i] = j++;
       
  2016   }
       
  2017 }
       
  2018 
       
  2019 void do_lvsr (void)
       
  2020 {
       
  2021   int sh = (uint32_t)T0 & 0xf;
       
  2022   int i, j = 0x10-sh;
       
  2023 
       
  2024   VECTOR_FOR_INORDER_I (i, u8) {
       
  2025     AVR0.u8[i] = j++;
       
  2026   }
       
  2027 }
       
  2028 
       
  2029 void do_vaddcuw (void)
       
  2030 {
       
  2031   VECTOR_FOR(u32) {
       
  2032     AVR0.u32[i] = ~AVR0.u32[i] < AVR1.u32[i];
       
  2033   }
       
  2034 }
       
  2035 
       
  2036 #define VARITH_DO(name, op, element)            \
       
  2037   void do_v##name (void)                        \
       
  2038   {                                             \
       
  2039     VECTOR_FOR (element) {                                              \
       
  2040       AVR0.element[i] = AVR0.element[i] op AVR1.element[i];             \
       
  2041     }                                                                   \
       
  2042   }
       
  2043 #define VARITH(suffix, element)                 \
       
  2044   VARITH_DO(add##suffix, +, element)             \
       
  2045   VARITH_DO(sub##suffix, -, element)
       
  2046 VARITH(fp, f)
       
  2047 VARITH(ubm, u8)
       
  2048 VARITH(uhm, u16)
       
  2049 VARITH(uwm, u32)
       
  2050 #undef VARITH_DO
       
  2051 #undef VARITH
       
  2052 
       
  2053 #define VARITHSAT_CASE(type, op, min, max, use_min, use_max, element) \
       
  2054   {                                                                     \
       
  2055     type result = (type)AVR0.element[i] op (type)AVR1.element[i];       \
       
  2056     if (use_min && result < min) {                                      \
       
  2057       result = min;                                                     \
       
  2058       sat = 1;                                                          \
       
  2059     } else if (use_max && result > max) {                               \
       
  2060       result = max;                                                     \
       
  2061       sat = 1;                                                          \
       
  2062     }                                                                   \
       
  2063     AVR0.element[i] = result;                                           \
       
  2064   }
       
  2065 
       
  2066 #define VARITHSAT_DO(name, op, min, max, use_min, use_max, element)     \
       
  2067   void do_v##name (void)                                        \
       
  2068   {                                                             \
       
  2069     int sat = 0;                                                \
       
  2070     VECTOR_FOR (element) {                                      \
       
  2071       switch (sizeof(AVR0.element[0])) {                        \
       
  2072       case 1: VARITHSAT_CASE(int16_t, op, min, max, use_min, use_max, element); break; \
       
  2073       case 2: VARITHSAT_CASE(int32_t, op, min, max, use_min, use_max, element); break; \
       
  2074       case 4: VARITHSAT_CASE(int64_t, op, min, max, use_min, use_max, element); break; \
       
  2075       }                                                         \
       
  2076     }                                                           \
       
  2077     if (sat) {                                                  \
       
  2078       env->vscr |= (1 << VSCR_SAT);                             \
       
  2079     }                                                           \
       
  2080   }
       
  2081 #define VARITHSAT_SIGNED(suffix, element, min, max)             \
       
  2082   VARITHSAT_DO(adds##suffix##s, +, min, max, 1, 1, element)     \
       
  2083   VARITHSAT_DO(subs##suffix##s, -, min, max, 1, 1, element)
       
  2084 #define VARITHSAT_UNSIGNED(suffix, element, max)                 \
       
  2085   VARITHSAT_DO(addu##suffix##s, +, 0, max, 0, 1, element)        \
       
  2086   VARITHSAT_DO(subu##suffix##s, -, 0, max, 1, 0, element)
       
  2087 VARITHSAT_SIGNED(b, s8, INT8_MIN, INT8_MAX)
       
  2088 VARITHSAT_SIGNED(h, s16, INT16_MIN, INT16_MAX)
       
  2089 VARITHSAT_SIGNED(w, s32, INT32_MIN, INT32_MAX)
       
  2090 VARITHSAT_UNSIGNED(b, u8, UINT8_MAX)
       
  2091 VARITHSAT_UNSIGNED(h, u16, UINT16_MAX)
       
  2092 VARITHSAT_UNSIGNED(w, u32, UINT32_MAX)
       
  2093 #undef VARITHSAT_CASE
       
  2094 #undef VARITHSAT_DO
       
  2095 #undef VARITHSAT_SIGNED
       
  2096 #undef VARITHSAT_UNSIGNED
       
  2097 
       
  2098 #define VAVG_CASE(signedp, element, signed_type, unsigned_type)         \
       
  2099   if (signedp) {                                                        \
       
  2100     signed_type x = (signed_type)AVR0.element[i] + (signed_type)AVR1.element[i] + 1; \
       
  2101     AVR0.element[i] = x >> 1;                                           \
       
  2102   } else {                                                              \
       
  2103     unsigned_type x = (unsigned_type)AVR0.element[i] + (unsigned_type)AVR1.element[i] + 1; \
       
  2104     AVR0.element[i] = x >> 1;                                           \
       
  2105   }
       
  2106 
       
  2107 #define VAVG_DO(name, signedp, element)                                 \
       
  2108   void do_v##name (void)                                                \
       
  2109   {                                                                     \
       
  2110     VECTOR_FOR (element) {                                              \
       
  2111       switch (sizeof (AVR0.element[0])) {                               \
       
  2112       case 1: VAVG_CASE(signedp, element, int16_t, uint16_t); break;    \
       
  2113       case 2: VAVG_CASE(signedp, element, int32_t, uint32_t); break;    \
       
  2114       case 4: VAVG_CASE(signedp, element, int64_t, uint64_t); break;    \
       
  2115       }                                                                 \
       
  2116     }                                                                   \
       
  2117   }
       
  2118 
       
  2119 #define VAVG(type, signed_element, unsigned_element)    \
       
  2120   VAVG_DO(avgs##type, 1, signed_element)                \
       
  2121   VAVG_DO(avgu##type, 0, unsigned_element)
       
  2122 VAVG(b, s8, u8)
       
  2123 VAVG(h, s16, u16)
       
  2124 VAVG(w, s32, u32)
       
  2125 #undef VAVG_CASE
       
  2126 #undef VAVG_DO
       
  2127 #undef VAVG
       
  2128 
       
  2129 void do_vcmpbfp (void)
       
  2130 {
       
  2131   int all_in = 0;
       
  2132   VECTOR_FOR (f) {
       
  2133     int le = AVR0.f[i] <= AVR1.f[i];
       
  2134     int ge = AVR0.f[i] >= -AVR1.f[i];
       
  2135     AVR0.u32[i] = ((!le) << 31) | ((!ge) << 30);
       
  2136     all_in |= (!le | !ge);
       
  2137   }
       
  2138   T0 = (all_in == 0) << 1;
       
  2139 }
       
  2140 
       
  2141 void do_vcfsx (void)
       
  2142 {
       
  2143   uint32_t div = 1 << (uint32_t)T0;
       
  2144   VECTOR_FOR (f) {
       
  2145     AVR0.f[i] = (float)AVR1.s32[i] / div;
       
  2146   }
       
  2147 }
       
  2148 
       
  2149 void do_vcfux (void)
       
  2150 {
       
  2151   uint32_t div = 1 << (uint32_t)T0;
       
  2152   VECTOR_FOR (f) {
       
  2153     AVR0.f[i] = (float)AVR1.u32[i] / div;
       
  2154   }
       
  2155 }
       
  2156 
       
  2157 #define VCMP(suffix, compare, element)                                  \
       
  2158   void do_vcmp##suffix (void)                                           \
       
  2159   {                                                                     \
       
  2160     uint32_t ones = (sizeof (AVR0.element[0]) == 4                      \
       
  2161                      ? 0xffffffff                                       \
       
  2162                      : (sizeof (AVR0.element[0]) == 2                   \
       
  2163                         ? 0xffff                                        \
       
  2164                         : 0xff));                                       \
       
  2165     uint32_t all = ones;                                                \
       
  2166     uint32_t none = 0;                                                  \
       
  2167     VECTOR_FOR (element) {                                              \
       
  2168       uint32_t result = (AVR0.element[i] compare AVR1.element[i] ? ones : 0x0); \
       
  2169       switch (sizeof (AVR0.element[0])) {                               \
       
  2170       case 4: AVR0.u32[i] = result; break;                              \
       
  2171       case 2: AVR0.u16[i] = result; break;                              \
       
  2172       case 1: AVR0.u8[i] = result; break;                               \
       
  2173       }                                                                 \
       
  2174       all &= result;                                                    \
       
  2175       none |= result;                                                   \
       
  2176     }                                                                   \
       
  2177     T0 = ((all != 0) << 3) | ((none == 0) << 1);                        \
       
  2178   }
       
  2179 VCMP(eqfp, ==, f)
       
  2180 VCMP(gefp, >=, f)
       
  2181 VCMP(gtfp, >, f)
       
  2182 VCMP(equb, ==, u8)
       
  2183 VCMP(equh, ==, u16)
       
  2184 VCMP(equw, ==, u32)
       
  2185 VCMP(gtub, >, u8)
       
  2186 VCMP(gtuh, >, u16)
       
  2187 VCMP(gtuw, >, u32)
       
  2188 VCMP(gtsb, >, s8)
       
  2189 VCMP(gtsh, >, s16)
       
  2190 VCMP(gtsw, >, s32)
       
  2191 #undef VCMP
       
  2192 
       
  2193 #define VCT(suffix, element, cvt, min, max)     \
       
  2194   void do_vct##suffix (void)                    \
       
  2195   {                                             \
       
  2196     uint32_t uimm = (uint32_t)T0 & 0x1f;                        \
       
  2197     int sat = 0;                                                \
       
  2198     VECTOR_FOR (f) {                                            \
       
  2199       uint32_t fi = AVR1.u32[i];                                \
       
  2200       int exponent = (fi >> 23) & 0xff;                         \
       
  2201       if (exponent == 255 || (exponent + uimm) <= 254) {        \
       
  2202         double prod = ldexp((double)AVR1.f[i], (int)uimm);      \
       
  2203         int64_t x = (int64_t)prod;                              \
       
  2204         AVR0.element[i] = cvt(x, &sat);                         \
       
  2205       } else {                                                  \
       
  2206         if (fi >> 31) {                                         \
       
  2207           AVR0.element[i] = min;                                \
       
  2208         } else {                                                \
       
  2209           AVR0.element[i] = max;                                \
       
  2210         }                                                       \
       
  2211         sat = 1;                                                \
       
  2212       }                                                         \
       
  2213     }                                                           \
       
  2214     if (sat) {                                                  \
       
  2215       env->vscr |= (1 << VSCR_SAT);                             \
       
  2216     }                                                           \
       
  2217   }
       
  2218 VCT(sxs, s32, cvtsdsw, INT32_MIN, INT32_MAX)
       
  2219 VCT(uxs, u32, cvtsduw, 0, UINT32_MAX)
       
  2220 #undef VCT
       
  2221 
       
  2222 void do_vexptefp (void)
       
  2223 {
       
  2224   /* FIXME: need to properly handle special inputs */
       
  2225   VECTOR_FOR (f) {
       
  2226     AVR0.f[i] = powf((float)2.0, AVR1.f[i]);
       
  2227   }
       
  2228 }
       
  2229 
       
  2230 void do_vlogefp (void)
       
  2231 {
       
  2232   VECTOR_FOR (f) {
       
  2233     AVR0.f[i] = logf(AVR1.f[i])/logf((float)2.0);
       
  2234   }
       
  2235 }
       
  2236 
       
  2237 void do_vmaddfp (void)
       
  2238 {
       
  2239   VECTOR_FOR (f) {
       
  2240     AVR0.f[i] = AVR0.f[i] * AVR2.f[i] + AVR1.f[i];
       
  2241   }
       
  2242 }
       
  2243 
       
  2244 #define VMINMAX_DO(name, compare, element)                              \
       
  2245   void do_v##name (void)                                                \
       
  2246   {                                                                     \
       
  2247     VECTOR_FOR (element) {                                              \
       
  2248       if (AVR0.element[i] compare AVR1.element[i]) {                    \
       
  2249         AVR0.element[i] = AVR1.element[i];                              \
       
  2250       }                                                                 \
       
  2251     }                                                                   \
       
  2252   }
       
  2253 #define VMINMAX(suffix, element)                \
       
  2254   VMINMAX_DO(min##suffix, >, element)           \
       
  2255   VMINMAX_DO(max##suffix, <, element)
       
  2256 VMINMAX(fp, f)
       
  2257 VMINMAX(sb, s8)
       
  2258 VMINMAX(sh, s16)
       
  2259 VMINMAX(sw, s32)
       
  2260 VMINMAX(ub, u8)
       
  2261 VMINMAX(uh, u16)
       
  2262 VMINMAX(uw, u32)
       
  2263 #undef VMINMAX_DO
       
  2264 #undef VMINMAX
       
  2265 
       
  2266 void do_vmhaddshs (void)
       
  2267 {
       
  2268   int sat = 0;
       
  2269 
       
  2270   VECTOR_FOR (s16) {
       
  2271     int32_t prod = AVR0.s16[i] * AVR1.s16[i];
       
  2272     int32_t t = (int32_t)AVR2.s16[i] + (prod >> 15);
       
  2273     AVR0.s16[i] = cvtswsh (t, &sat);
       
  2274   }
       
  2275 
       
  2276   if (sat) {
       
  2277     env->vscr |= (1 << VSCR_SAT);
       
  2278   }
       
  2279 }
       
  2280 
       
  2281 void do_vmhraddshs (void)
       
  2282 {
       
  2283   int sat = 0;
       
  2284 
       
  2285   VECTOR_FOR (s16) {
       
  2286     int32_t prod = AVR0.s16[i] * AVR1.s16[i] + 0x00004000;
       
  2287     int32_t t = (int32_t)AVR2.s16[i] + (prod >> 15);
       
  2288     AVR0.s16[i] = cvtswsh (t, &sat);
       
  2289   }
       
  2290 
       
  2291   if (sat) {
       
  2292     env->vscr |= (1 << VSCR_SAT);
       
  2293   }
       
  2294 }
       
  2295 
       
  2296 void do_vmladduhm (void)
       
  2297 {
       
  2298   VECTOR_FOR (s16) {
       
  2299     int32_t prod = AVR0.s16[i] * AVR1.s16[i];
       
  2300     AVR0.s16[i] = (int16_t) (prod + AVR2.s16[i]);
       
  2301   }
       
  2302 }
       
  2303 
       
  2304 #define VMRG_DO(name, element, highp)           \
       
  2305   void do_v##name (void)                        \
       
  2306   {                                             \
       
  2307     ppc_avr_t result;                           \
       
  2308     int i;                                      \
       
  2309     size_t n_elems = N_ELEMS(element);                                  \
       
  2310     for (i = 0; i < n_elems/2; i++) {                                   \
       
  2311       if (highp) {                                                      \
       
  2312         result.element[i*2+HI_IDX] = AVR0.element[i];                   \
       
  2313         result.element[i*2+LO_IDX] = AVR1.element[i];                   \
       
  2314       } else {                                                          \
       
  2315         result.element[n_elems - i*2 - (1+HI_IDX)] = AVR1.element[n_elems - i - 1]; \
       
  2316         result.element[n_elems - i*2 - (1+LO_IDX)] = AVR0.element[n_elems - i - 1]; \
       
  2317       }                                                                 \
       
  2318     }                                                                   \
       
  2319     AVR0 = result;                                                      \
       
  2320   }
       
  2321 #if defined(WORDS_BIGENDIAN)
       
  2322 #define MRGHI 0
       
  2323 #define MRGL0 1
       
  2324 #else
       
  2325 #define MRGHI 1
       
  2326 #define MRGLO 0
       
  2327 #endif
       
  2328 #define VMRG(suffix, element)                   \
       
  2329   VMRG_DO(mrgl##suffix, element, MRGHI)         \
       
  2330   VMRG_DO(mrgh##suffix, element, MRGLO)
       
  2331 VMRG(b, u8)
       
  2332 VMRG(h, u16)
       
  2333 VMRG(w, u32)
       
  2334 #undef VMRG_DO
       
  2335 #undef VMRG
       
  2336 
       
  2337 void do_vmsummbm (void)
       
  2338 {
       
  2339   int32_t prod[16];
       
  2340   int i;
       
  2341 
       
  2342   VECTOR_FOR_I(i, s8) {
       
  2343     prod[i] = (int32_t)AVR0.s8[i] * AVR1.u8[i];
       
  2344   }
       
  2345 
       
  2346   VECTOR_FOR_INORDER_I(i, s32) {
       
  2347     AVR0.s32[i] = AVR2.s32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
       
  2348   }
       
  2349 }
       
  2350 
       
  2351 void do_vmsumshm (void)
       
  2352 {
       
  2353   int32_t prod[8];
       
  2354   int i;
       
  2355 
       
  2356   VECTOR_FOR_I(i, s16) {
       
  2357     prod[i] = AVR0.s16[i] * AVR1.s16[i];
       
  2358   }
       
  2359 
       
  2360   VECTOR_FOR_INORDER_I(i, s32) {
       
  2361     AVR0.s32[i] = AVR2.s32[i] + prod[2*i] + prod[2*i+1];
       
  2362   }
       
  2363 }
       
  2364 
       
  2365 void do_vmsumshs (void)
       
  2366 {
       
  2367   int32_t prod[8];
       
  2368   int i;
       
  2369   int sat = 0;
       
  2370 
       
  2371   VECTOR_FOR_I (i, s16) {
       
  2372     prod[i] = (int32_t)AVR0.s16[i] * AVR1.s16[i];
       
  2373   }
       
  2374 
       
  2375   VECTOR_FOR_INORDER_I (i, s32) {
       
  2376     int64_t t = (int64_t)AVR2.s32[i] + prod[2*i] + prod[2*i+1];
       
  2377     AVR0.u32[i] = cvtsdsw(t, &sat);
       
  2378   }
       
  2379 
       
  2380   if (sat) {
       
  2381     env->vscr |= (1 << VSCR_SAT);
       
  2382   }
       
  2383 }
       
  2384 
       
  2385 void do_vmsumubm (void)
       
  2386 {
       
  2387   uint16_t prod[16];
       
  2388   int i;
       
  2389 
       
  2390   VECTOR_FOR_I(i, u8) {
       
  2391     prod[i] = AVR0.u8[i] * AVR1.u8[i];
       
  2392   }
       
  2393 
       
  2394   VECTOR_FOR_INORDER_I(i, u32) {
       
  2395     AVR0.u32[i] = AVR2.u32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
       
  2396   }
       
  2397 }
       
  2398 
       
  2399 void do_vmsumuhm (void)
       
  2400 {
       
  2401   uint32_t prod[8];
       
  2402   int i;
       
  2403 
       
  2404   VECTOR_FOR_I(i, u16) {
       
  2405     prod[i] = AVR0.u16[i] * AVR1.u16[i];
       
  2406   }
       
  2407 
       
  2408   VECTOR_FOR_INORDER_I(i, u32) {
       
  2409     AVR0.u32[i] = AVR2.u32[i] + prod[2*i] + prod[2*i+1];
       
  2410   }
       
  2411 }
       
  2412 
       
  2413 void do_vmsumuhs (void)
       
  2414 {
       
  2415   uint32_t prod[8];
       
  2416   int i;
       
  2417   int sat = 0;
       
  2418 
       
  2419   VECTOR_FOR_I (i, u16) {
       
  2420     prod[i] = AVR0.u16[i] * AVR1.u16[i];
       
  2421   }
       
  2422 
       
  2423   VECTOR_FOR_INORDER_I (i, s32) {
       
  2424     uint64_t t = (uint64_t)AVR2.u32[i] + prod[2*i] + prod[2*i+1];
       
  2425     AVR0.u32[i] = cvtuduw(t, &sat);
       
  2426   }
       
  2427 
       
  2428   if (sat) {
       
  2429     env->vscr |= (1 << VSCR_SAT);
       
  2430   }
       
  2431 }
       
  2432 
       
  2433 #define VMUL_DO(name, mul_element, prod_element, evenp) \
       
  2434   void do_v##name (void)                                \
       
  2435   {                                                     \
       
  2436     int i;                                              \
       
  2437     VECTOR_FOR_INORDER_I(i, prod_element) {                             \
       
  2438       if (evenp) {                                                      \
       
  2439         AVR0.prod_element[i] = AVR0.mul_element[i*2+HI_IDX] * AVR1.mul_element[i*2+HI_IDX]; \
       
  2440       } else {                                                          \
       
  2441         AVR0.prod_element[i] = AVR0.mul_element[i*2+LO_IDX] * AVR1.mul_element[i*2+LO_IDX]; \
       
  2442       }                                                                 \
       
  2443     }                                                                   \
       
  2444   }
       
  2445 #define VMUL(suffix, mul_element, prod_element) \
       
  2446   VMUL_DO(mule##suffix, mul_element, prod_element, 1) \
       
  2447   VMUL_DO(mulo##suffix, mul_element, prod_element, 0)
       
  2448 VMUL(sb, s8, s16)
       
  2449 VMUL(sh, s16, s32)
       
  2450 VMUL(ub, u8, u16)
       
  2451 VMUL(uh, u16, u32)
       
  2452 #undef VMUL_DO
       
  2453 #undef VMUL
       
  2454 
       
  2455 void do_vnmsubfp (void)
       
  2456 {
       
  2457   VECTOR_FOR (f) {
       
  2458     AVR0.f[i] = -(AVR0.f[i] * AVR2.f[i] - AVR1.f[i]);
       
  2459   }
       
  2460 }
       
  2461 
       
  2462 void do_vperm (void)
       
  2463 {
       
  2464   ppc_avr_t result;
       
  2465   int i;
       
  2466   VECTOR_FOR_INORDER_I (i, u8) {
       
  2467     int s = AVR2.u8[i] & 0x1f;
       
  2468 #if defined(WORDS_BIGENDIAN)
       
  2469     int index = s & 0xf;
       
  2470 #else
       
  2471     int index = 15 - (s & 0xf);
       
  2472 #endif
       
  2473     if (s & 0x10) {
       
  2474       result.u8[i] = AVR1.u8[index];
       
  2475     } else {
       
  2476       result.u8[i] = AVR0.u8[index];
       
  2477     }
       
  2478   }
       
  2479   AVR0 = result;
       
  2480 }
       
  2481 
       
  2482 #if defined(WORDS_BIGENDIAN)
       
  2483 #define PKBIG 1
       
  2484 #else
       
  2485 #define PKBIG 0
       
  2486 #endif
       
  2487 void do_vpkpx (void)
       
  2488 {
       
  2489   int i, j;
       
  2490   ppc_avr_t result;
       
  2491 #if defined(WORDS_BIGENDIAN)
       
  2492   ppc_avr_t x[2] = { AVR0, AVR1 };
       
  2493 #else
       
  2494   ppc_avr_t x[2] = { AVR1, AVR0 };
       
  2495 #endif
       
  2496 
       
  2497   VECTOR_FOR_INORDER_I (i, u64) {
       
  2498     VECTOR_FOR_INORDER_I (j, u32){
       
  2499       uint32_t e = x[i].u32[j];
       
  2500       result.u16[4*i+j] = ((e >> 9) & 0xfc00) | ((e >> 6) & 0x3e0) | ((e >> 3) & 0x1f);
       
  2501     }
       
  2502   }
       
  2503   AVR0 = result;
       
  2504 }
       
  2505 
       
  2506 #define VPK(suffix, from, to, cvt, dosat)       \
       
  2507   void do_vpk##suffix (void)                    \
       
  2508   {                                             \
       
  2509     int i;                                      \
       
  2510     int sat = 0;                                \
       
  2511     ppc_avr_t result;                           \
       
  2512     ppc_avr_t *a0 = PKBIG ? &AVR0 : &AVR1;      \
       
  2513     ppc_avr_t *a1 = PKBIG ? &AVR1 : &AVR0;      \
       
  2514     VECTOR_FOR_INORDER_I (i, from) {            \
       
  2515       result.to[i] = cvt(a0->from[i], &sat);                \
       
  2516       result.to[i+N_ELEMS(from)] = cvt(a1->from[i], &sat);  \
       
  2517     }                                                   \
       
  2518     AVR0 = result;                                      \
       
  2519     if (dosat && sat) {                                 \
       
  2520       env->vscr |= (1 << VSCR_SAT);                     \
       
  2521     }                                                   \
       
  2522   }
       
  2523 #define I(x, y) (x)
       
  2524 VPK(shss, s16, s8, cvtshsb, 1)
       
  2525 VPK(shus, s16, u8, cvtshub, 1)
       
  2526 VPK(swss, s32, s16, cvtswsh, 1)
       
  2527 VPK(swus, s32, u16, cvtswuh, 1)
       
  2528 VPK(uhus, u16, u8, cvtuhub, 1)
       
  2529 VPK(uwus, u32, u16, cvtuwuh, 1)
       
  2530 VPK(uhum, u16, u8, I, 0)
       
  2531 VPK(uwum, u32, u16, I, 0)
       
  2532 #undef I
       
  2533 #undef VPK
       
  2534 #undef PKBIG
       
  2535 
       
  2536 void do_vrefp (void)
       
  2537 {
       
  2538   VECTOR_FOR (f) {
       
  2539     AVR0.f[i] = 1/AVR1.f[i];
       
  2540   }
       
  2541 }
       
  2542 
       
  2543 #define VRFI(suffix, func)                      \
       
  2544   void do_vrfi##suffix (void)                   \
       
  2545   {                                             \
       
  2546     VECTOR_FOR (f) {                            \
       
  2547       AVR0.f[i] = func (AVR1.f[i]);             \
       
  2548     }                                           \
       
  2549   }
       
  2550 VRFI(m, floorf)
       
  2551 VRFI(n, rintf)
       
  2552 VRFI(p, ceilf)
       
  2553 VRFI(z, truncf)
       
  2554 #undef VRFI
       
  2555 
       
  2556 #define VROTATE(suffix, element)                \
       
  2557   void do_vrl##suffix (void)                    \
       
  2558   {                                             \
       
  2559     VECTOR_FOR (element) {                      \
       
  2560       unsigned int mask = ((1 << (3 + (sizeof (AVR0.element[0]) >> 1))) - 1); \
       
  2561       unsigned int shift = AVR1.element[i] & mask;                      \
       
  2562       AVR0.element[i] = (AVR0.element[i] << shift) | (AVR0.element[i] >> (sizeof(AVR0.element[0]) * 8 - shift)); \
       
  2563     }                                                                   \
       
  2564   }
       
  2565 VROTATE(b, u8)
       
  2566 VROTATE(h, u16)
       
  2567 VROTATE(w, u32)
       
  2568 #undef VROTATE
       
  2569 
       
  2570 void do_vrsqrtefp (void)
       
  2571 {
       
  2572   VECTOR_FOR (f) {
       
  2573     AVR0.f[i] = 1/sqrtf(AVR1.f[i]);
       
  2574   }
       
  2575 }
       
  2576 
       
  2577 #if defined(WORDS_BIGENDIAN)
       
  2578 #define LEFT 0
       
  2579 #define RIGHT 1
       
  2580 #else
       
  2581 #define LEFT 1
       
  2582 #define RIGHT 0
       
  2583 #endif
       
  2584 #define VSHIFT(suffix, leftp)                   \
       
  2585   void do_vs##suffix (void)                     \
       
  2586   {                                             \
       
  2587     int shift = AVR1.u8[LO_IDX*0x15] & 0x7;     \
       
  2588     int doit = 1;                               \
       
  2589     VECTOR_FOR (u8) {                           \
       
  2590       doit = doit && ((AVR1.u8[i] & 0x7) == shift);     \
       
  2591     }                                           \
       
  2592     if (doit) {                                 \
       
  2593       if (shift == 0) {                         \
       
  2594         return;                                 \
       
  2595       } else if (leftp) {                                               \
       
  2596         uint64_t carry = AVR0.u64[LO_IDX] >> (64 - shift);              \
       
  2597         AVR0.u64[HI_IDX] = (AVR0.u64[HI_IDX] << shift) | carry;         \
       
  2598         AVR0.u64[LO_IDX] <<= shift;                                     \
       
  2599       } else {                                                          \
       
  2600         uint64_t carry = AVR0.u64[HI_IDX] << (64 - shift);              \
       
  2601         AVR0.u64[LO_IDX] = (AVR0.u64[LO_IDX] >> shift) | carry;         \
       
  2602         AVR0.u64[HI_IDX] >>= shift;                                     \
       
  2603       }                                                                 \
       
  2604     }                                                                   \
       
  2605   }
       
  2606 VSHIFT(l, LEFT)
       
  2607 VSHIFT(r, RIGHT)
       
  2608 #undef VSHIFT
       
  2609 #undef LEFT
       
  2610 #undef RIGHT
       
  2611 
       
  2612 #define VSL(suffix, element)                    \
       
  2613   void do_vsl##suffix (void)                    \
       
  2614   {                                             \
       
  2615     VECTOR_FOR (element) {                      \
       
  2616       unsigned int mask = ((1 << (3 + (sizeof (AVR0.element[0]) >> 1))) - 1); \
       
  2617       unsigned int shift = AVR1.element[i] & mask;                      \
       
  2618       AVR0.element[i] = AVR0.element[i] << shift;                       \
       
  2619     }                                                                   \
       
  2620   }
       
  2621 VSL(b, u8)
       
  2622 VSL(h, u16)
       
  2623 VSL(w, u32)
       
  2624 #undef VSL
       
  2625 
       
  2626 void do_vsldoi (void)
       
  2627 {
       
  2628   int sh = (int)T0 & 0xf;
       
  2629   int i;
       
  2630   ppc_avr_t result;
       
  2631 
       
  2632 #if defined(WORDS_BIGENDIAN)
       
  2633   VECTOR_FOR_I (i, u8) {
       
  2634     int index = sh + i;
       
  2635     if (index > 0xf) {
       
  2636       result.u8[i] = AVR1.u8[index-0x10];
       
  2637     } else {
       
  2638       result.u8[i] = AVR0.u8[index];
       
  2639     }
       
  2640   }
       
  2641 #else
       
  2642   VECTOR_FOR_I (i, u8) {
       
  2643     int index = (16 - sh) + i;
       
  2644     if (index > 0xf) {
       
  2645       result.u8[i] = AVR0.u8[index-0x10];
       
  2646     } else {
       
  2647       result.u8[i] = AVR1.u8[index];
       
  2648     }
       
  2649   }
       
  2650 #endif
       
  2651   AVR0 = result;
       
  2652 }
       
  2653 
       
  2654 void do_vslo (void)
       
  2655 {
       
  2656   int sh = (AVR1.u8[LO_IDX*0xf] >> 3) & 0xf;
       
  2657 
       
  2658 #if defined (WORDS_BIGENDIAN)
       
  2659   memmove (&AVR0.u8[0], &AVR0.u8[sh], 0x10-sh);
       
  2660   memset (&AVR0.u8[16-sh], 0, sh);
       
  2661 #else
       
  2662   memmove (&AVR0.u8[sh], &AVR0.u8[0], 0x10-sh);
       
  2663   memset (&AVR0.u8[0], 0, sh);
       
  2664 #endif
       
  2665 }
       
  2666 
       
  2667 /* Experimental testing shows that hardware masks the immediate.  */
       
  2668 #define _SPLAT_MASKED(element) ((uint32_t)T0 & (N_ELEMS(element) - 1))
       
  2669 #if defined(WORDS_BIGENDIAN)
       
  2670 #define SPLAT_ELEMENT(element) _SPLAT_MASKED(element)
       
  2671 #else
       
  2672 #define SPLAT_ELEMENT(element) (N_ELEMS(element)-1 - _SPLAT_MASKED(element))
       
  2673 #endif
       
  2674 #define VSPLT(suffix, element)                          \
       
  2675   void do_vsplt##suffix (void)                          \
       
  2676   {                                                     \
       
  2677     uint32_t s = AVR1.element[SPLAT_ELEMENT(element)];  \
       
  2678     VECTOR_FOR (element) {                              \
       
  2679       AVR0.element[i] = s;                              \
       
  2680     }                                                   \
       
  2681   }
       
  2682 VSPLT(b, u8)
       
  2683 VSPLT(h, u16)
       
  2684 VSPLT(w, u32)
       
  2685 #undef VSPLT
       
  2686 #undef SPLAT_ELEMENT
       
  2687 #undef _SPLAT_MASKED
       
  2688 
       
  2689 #define VSPLTI(suffix, element, splat_type)     \
       
  2690   void do_vspltis##suffix (void)                \
       
  2691   {                                             \
       
  2692     splat_type x = (splat_type)T0;              \
       
  2693     /* 5-bit sign extension.  */                \
       
  2694     if (x & 0x10)                               \
       
  2695       x -= 0x20;                                \
       
  2696     VECTOR_FOR (element) {                      \
       
  2697       AVR0.element[i] = x;                      \
       
  2698     }                                           \
       
  2699   }
       
  2700 VSPLTI(b, s8, int8_t)
       
  2701 VSPLTI(h, s16, int16_t)
       
  2702 VSPLTI(w, s32, int32_t)
       
  2703 #undef VSPLTI
       
  2704 
       
  2705 #define VSR(suffix, element)                    \
       
  2706   void do_vsr##suffix (void)                    \
       
  2707   {                                             \
       
  2708     VECTOR_FOR (element) {                      \
       
  2709       unsigned int mask = ((1 << (3 + (sizeof (AVR0.element[0]) >> 1))) - 1); \
       
  2710       unsigned int shift = AVR1.element[i] & mask;                      \
       
  2711       AVR0.element[i] = AVR0.element[i] >> shift;                       \
       
  2712     }                                                                   \
       
  2713   }
       
  2714 VSR(ab, s8)
       
  2715 VSR(ah, s16)
       
  2716 VSR(aw, s32)
       
  2717 VSR(b, u8)
       
  2718 VSR(h, u16)
       
  2719 VSR(w, u32)
       
  2720 #undef VSR
       
  2721 
       
  2722 void do_vsro (void)
       
  2723 {
       
  2724   int sh = (AVR1.u8[LO_IDX*0xf] >> 3) & 0xf;
       
  2725 
       
  2726 #if defined (WORDS_BIGENDIAN)
       
  2727   memmove (&AVR0.u8[sh], &AVR0.u8[0], 0x10-sh);
       
  2728   memset (&AVR0.u8[0], 0, sh);
       
  2729 #else
       
  2730   memmove (&AVR0.u8[0], &AVR0.u8[sh], 0x10-sh);
       
  2731   memset (&AVR0.u8[0x10-sh], 0, sh);
       
  2732 #endif
       
  2733 }
       
  2734 
       
  2735 void do_vsubcuw (void)
       
  2736 {
       
  2737   VECTOR_FOR(u32) {
       
  2738     AVR0.u32[i] = AVR0.u32[i] >= AVR1.u32[i];
       
  2739   }
       
  2740 }
       
  2741 
       
  2742 void do_vsumsws (void)
       
  2743 {
       
  2744   int64_t t;
       
  2745   int i, upper;
       
  2746   ppc_avr_t result;
       
  2747   int sat = 0;
       
  2748 
       
  2749 #if defined(WORDS_BIGENDIAN)
       
  2750   upper = N_ELEMS(s32)-1;
       
  2751 #else
       
  2752   upper = 0;
       
  2753 #endif
       
  2754   t = (int64_t)AVR1.s32[upper];
       
  2755   VECTOR_FOR_I (i, s32) {
       
  2756     t += AVR0.s32[i];
       
  2757     result.s32[i] = 0;
       
  2758   }
       
  2759   result.s32[upper] = cvtsdsw(t, &sat);
       
  2760   AVR0 = result;
       
  2761 
       
  2762   if (sat) {
       
  2763     env->vscr |= (1 << VSCR_SAT);
       
  2764   }
       
  2765 }
       
  2766 
       
  2767 void do_vsum2sws (void)
       
  2768 {
       
  2769   int i, j, upper;
       
  2770   ppc_avr_t result;
       
  2771   int sat = 0;
       
  2772 
       
  2773 #if defined(WORDS_BIGENDIAN)
       
  2774   upper = 1;
       
  2775 #else
       
  2776   upper = 0;
       
  2777 #endif
       
  2778   VECTOR_FOR_I (i, u64) {
       
  2779     int64_t t = (int64_t)AVR1.s32[upper+i*2];
       
  2780     result.u64[i] = 0;
       
  2781     VECTOR_FOR_I (j, u64) {
       
  2782       t += AVR0.s32[2*i+j];
       
  2783     }
       
  2784     result.s32[upper+i*2] = cvtsdsw(t, &sat);
       
  2785   }
       
  2786 
       
  2787   AVR0 = result;
       
  2788   if (sat) {
       
  2789     env->vscr |= (1 << VSCR_SAT);
       
  2790   }
       
  2791 }
       
  2792 
       
  2793 void do_vsum4sbs (void)
       
  2794 {
       
  2795   int i, j;
       
  2796   int sat = 0;
       
  2797 
       
  2798   VECTOR_FOR_I (i, s32) {
       
  2799     int64_t t = (int64_t)AVR1.s32[i];
       
  2800     VECTOR_FOR_I (j, s32) {
       
  2801       t += AVR0.s8[4*i+j];
       
  2802     }
       
  2803     AVR0.s32[i] = cvtsdsw(t, &sat);
       
  2804   }
       
  2805 
       
  2806   if (sat) {
       
  2807     env->vscr |= (1 << VSCR_SAT);
       
  2808   }
       
  2809 }
       
  2810 
       
  2811 void do_vsum4shs (void)
       
  2812 {
       
  2813   int sat = 0;
       
  2814 
       
  2815   VECTOR_FOR (s32) {
       
  2816     int64_t t = (int64_t)AVR1.s32[i];
       
  2817     t += AVR0.s16[2*i] + AVR0.s16[2*i+1];
       
  2818     AVR0.s32[i] = cvtsdsw(t, &sat);
       
  2819   }
       
  2820 
       
  2821   if (sat) {
       
  2822     env->vscr |= (1 << VSCR_SAT);
       
  2823   }
       
  2824 }
       
  2825 
       
  2826 void do_vsum4ubs (void)
       
  2827 {
       
  2828   int i, j;
       
  2829   int sat = 0;
       
  2830 
       
  2831   VECTOR_FOR_I (i, u32) {
       
  2832     uint64_t t = (uint64_t)AVR1.u32[i];
       
  2833     VECTOR_FOR_I (j, u32) {
       
  2834       t += AVR0.u8[4*i+j];
       
  2835     }
       
  2836     AVR0.u32[i] = cvtuduw(t, &sat);
       
  2837   }
       
  2838 
       
  2839   if (sat) {
       
  2840     env->vscr |= (1 << VSCR_SAT);
       
  2841   }
       
  2842 }
       
  2843 
       
  2844 #if defined(WORDS_BIGENDIAN)
       
  2845 #define UPKHI 1
       
  2846 #define UPKLO 0
       
  2847 #else
       
  2848 #define UPKHI 0
       
  2849 #define UPKLO 1
       
  2850 #endif
       
  2851 #define VUPKPX(suffix, hi)                      \
       
  2852   void do_vupk##suffix (void)                   \
       
  2853   {                                             \
       
  2854     int i;                                      \
       
  2855     VECTOR_FOR_I (i, u32) {                     \
       
  2856       uint16_t e = AVR1.u16[hi ? i : i+4];      \
       
  2857       uint8_t a = (e >> 15) ? 0xff : 0;         \
       
  2858       uint8_t r = (e >> 10) & 0x1f;             \
       
  2859       uint8_t g = (e >> 5) & 0x1f;                              \
       
  2860       uint8_t b = e & 0x1f;                                     \
       
  2861       AVR0.u32[i] = (a << 24) | (r << 16) | (g << 8) | b;       \
       
  2862     }                                                           \
       
  2863   }
       
  2864 VUPKPX(lpx, UPKLO)
       
  2865 VUPKPX(hpx, UPKHI)
       
  2866 
       
  2867 #define VUPK(suffix, unpacked, packee, hi)      \
       
  2868   void do_vupk##suffix (void)                   \
       
  2869   {                                             \
       
  2870     int i;                                      \
       
  2871     ppc_avr_t result;                                                   \
       
  2872     if (hi) {                                                           \
       
  2873       for (i = 0; i < N_ELEMS(unpacked); i++) {                         \
       
  2874         result.unpacked[i] = AVR1.packee[i];                            \
       
  2875       }                                                                 \
       
  2876     } else {                                                            \
       
  2877       for (i = N_ELEMS(unpacked); i < N_ELEMS(packee); i++) {           \
       
  2878         result.unpacked[i-N_ELEMS(unpacked)] = AVR1.packee[i];          \
       
  2879       }                                                                 \
       
  2880     }                                                                   \
       
  2881     AVR0 = result;                                                      \
       
  2882   }
       
  2883 VUPK(hsb, s16, s8, UPKHI)
       
  2884 VUPK(hsh, s32, s16, UPKHI)
       
  2885 VUPK(lsb, s16, s8, UPKLO)
       
  2886 VUPK(lsh, s32, s16, UPKLO)
       
  2887 #undef VUPK
       
  2888 #undef UPKHI
       
  2889 #undef UPKLO
       
  2890 
       
  2891 #undef VECTOR_FOR
       
  2892 #undef VECTOR_FOR_I
       
  2893 #undef VECTOR_FOR_INORDER_I
       
  2894 #undef HI_IDX
       
  2895 #undef LO_IDX
       
  2896 #endif
       
  2897 
       
  2898 /*****************************************************************************/
       
  2899 /* SPE extension helpers */
       
  2900 /* Use a table to make this quicker */
       
  2901 static uint8_t hbrev[16] = {
       
  2902     0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
       
  2903     0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
       
  2904 };
       
  2905 
       
  2906 static always_inline uint8_t byte_reverse (uint8_t val)
       
  2907 {
       
  2908     return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
       
  2909 }
       
  2910 
       
  2911 static always_inline uint32_t word_reverse (uint32_t val)
       
  2912 {
       
  2913     return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
       
  2914         (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
       
  2915 }
       
  2916 
       
  2917 #define MASKBITS 16 // Random value - to be fixed (implementation dependant)
       
  2918 target_ulong helper_brinc (target_ulong arg1, target_ulong arg2)
       
  2919 {
       
  2920     uint32_t a, b, d, mask;
       
  2921 
       
  2922     mask = UINT32_MAX >> (32 - MASKBITS);
       
  2923     a = arg1 & mask;
       
  2924     b = arg2 & mask;
       
  2925     d = word_reverse(1 + word_reverse(a | ~b));
       
  2926     return (arg1 & ~mask) | (d & b);
       
  2927 }
       
  2928 
       
  2929 uint32_t helper_cntlsw32 (uint32_t val)
       
  2930 {
       
  2931     if (val & 0x80000000)
       
  2932         return clz32(~val);
       
  2933     else
       
  2934         return clz32(val);
       
  2935 }
       
  2936 
       
  2937 uint32_t helper_cntlzw32 (uint32_t val)
       
  2938 {
       
  2939     return clz32(val);
       
  2940 }
       
  2941 
       
  2942 /* Single-precision floating-point conversions */
       
  2943 static always_inline uint32_t efscfsi (uint32_t val)
       
  2944 {
       
  2945     CPU_FloatU u;
       
  2946 
       
  2947     u.f = int32_to_float32(val, &env->spe_status);
       
  2948 
       
  2949     return u.l;
       
  2950 }
       
  2951 
       
  2952 static always_inline uint32_t efscfui (uint32_t val)
       
  2953 {
       
  2954     CPU_FloatU u;
       
  2955 
       
  2956     u.f = uint32_to_float32(val, &env->spe_status);
       
  2957 
       
  2958     return u.l;
       
  2959 }
       
  2960 
       
  2961 static always_inline int32_t efsctsi (uint32_t val)
       
  2962 {
       
  2963     CPU_FloatU u;
       
  2964 
       
  2965     u.l = val;
       
  2966     /* NaN are not treated the same way IEEE 754 does */
       
  2967     if (unlikely(float32_is_nan(u.f)))
       
  2968         return 0;
       
  2969 
       
  2970     return float32_to_int32(u.f, &env->spe_status);
       
  2971 }
       
  2972 
       
  2973 static always_inline uint32_t efsctui (uint32_t val)
       
  2974 {
       
  2975     CPU_FloatU u;
       
  2976 
       
  2977     u.l = val;
       
  2978     /* NaN are not treated the same way IEEE 754 does */
       
  2979     if (unlikely(float32_is_nan(u.f)))
       
  2980         return 0;
       
  2981 
       
  2982     return float32_to_uint32(u.f, &env->spe_status);
       
  2983 }
       
  2984 
       
  2985 static always_inline uint32_t efsctsiz (uint32_t val)
       
  2986 {
       
  2987     CPU_FloatU u;
       
  2988 
       
  2989     u.l = val;
       
  2990     /* NaN are not treated the same way IEEE 754 does */
       
  2991     if (unlikely(float32_is_nan(u.f)))
       
  2992         return 0;
       
  2993 
       
  2994     return float32_to_int32_round_to_zero(u.f, &env->spe_status);
       
  2995 }
       
  2996 
       
  2997 static always_inline uint32_t efsctuiz (uint32_t val)
       
  2998 {
       
  2999     CPU_FloatU u;
       
  3000 
       
  3001     u.l = val;
       
  3002     /* NaN are not treated the same way IEEE 754 does */
       
  3003     if (unlikely(float32_is_nan(u.f)))
       
  3004         return 0;
       
  3005 
       
  3006     return float32_to_uint32_round_to_zero(u.f, &env->spe_status);
       
  3007 }
       
  3008 
       
  3009 static always_inline uint32_t efscfsf (uint32_t val)
       
  3010 {
       
  3011     CPU_FloatU u;
       
  3012     float32 tmp;
       
  3013 
       
  3014     u.f = int32_to_float32(val, &env->spe_status);
       
  3015     tmp = int64_to_float32(1ULL << 32, &env->spe_status);
       
  3016     u.f = float32_div(u.f, tmp, &env->spe_status);
       
  3017 
       
  3018     return u.l;
       
  3019 }
       
  3020 
       
  3021 static always_inline uint32_t efscfuf (uint32_t val)
       
  3022 {
       
  3023     CPU_FloatU u;
       
  3024     float32 tmp;
       
  3025 
       
  3026     u.f = uint32_to_float32(val, &env->spe_status);
       
  3027     tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
       
  3028     u.f = float32_div(u.f, tmp, &env->spe_status);
       
  3029 
       
  3030     return u.l;
       
  3031 }
       
  3032 
       
  3033 static always_inline uint32_t efsctsf (uint32_t val)
       
  3034 {
       
  3035     CPU_FloatU u;
       
  3036     float32 tmp;
       
  3037 
       
  3038     u.l = val;
       
  3039     /* NaN are not treated the same way IEEE 754 does */
       
  3040     if (unlikely(float32_is_nan(u.f)))
       
  3041         return 0;
       
  3042     tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
       
  3043     u.f = float32_mul(u.f, tmp, &env->spe_status);
       
  3044 
       
  3045     return float32_to_int32(u.f, &env->spe_status);
       
  3046 }
       
  3047 
       
  3048 static always_inline uint32_t efsctuf (uint32_t val)
       
  3049 {
       
  3050     CPU_FloatU u;
       
  3051     float32 tmp;
       
  3052 
       
  3053     u.l = val;
       
  3054     /* NaN are not treated the same way IEEE 754 does */
       
  3055     if (unlikely(float32_is_nan(u.f)))
       
  3056         return 0;
       
  3057     tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
       
  3058     u.f = float32_mul(u.f, tmp, &env->spe_status);
       
  3059 
       
  3060     return float32_to_uint32(u.f, &env->spe_status);
       
  3061 }
       
  3062 
       
  3063 #define HELPER_SPE_SINGLE_CONV(name)                                          \
       
  3064 uint32_t helper_e##name (uint32_t val)                                        \
       
  3065 {                                                                             \
       
  3066     return e##name(val);                                                      \
       
  3067 }
       
  3068 /* efscfsi */
       
  3069 HELPER_SPE_SINGLE_CONV(fscfsi);
       
  3070 /* efscfui */
       
  3071 HELPER_SPE_SINGLE_CONV(fscfui);
       
  3072 /* efscfuf */
       
  3073 HELPER_SPE_SINGLE_CONV(fscfuf);
       
  3074 /* efscfsf */
       
  3075 HELPER_SPE_SINGLE_CONV(fscfsf);
       
  3076 /* efsctsi */
       
  3077 HELPER_SPE_SINGLE_CONV(fsctsi);
       
  3078 /* efsctui */
       
  3079 HELPER_SPE_SINGLE_CONV(fsctui);
       
  3080 /* efsctsiz */
       
  3081 HELPER_SPE_SINGLE_CONV(fsctsiz);
       
  3082 /* efsctuiz */
       
  3083 HELPER_SPE_SINGLE_CONV(fsctuiz);
       
  3084 /* efsctsf */
       
  3085 HELPER_SPE_SINGLE_CONV(fsctsf);
       
  3086 /* efsctuf */
       
  3087 HELPER_SPE_SINGLE_CONV(fsctuf);
       
  3088 
       
  3089 #define HELPER_SPE_VECTOR_CONV(name)                                          \
       
  3090 uint64_t helper_ev##name (uint64_t val)                                       \
       
  3091 {                                                                             \
       
  3092     return ((uint64_t)e##name(val >> 32) << 32) |                             \
       
  3093             (uint64_t)e##name(val);                                           \
       
  3094 }
       
  3095 /* evfscfsi */
       
  3096 HELPER_SPE_VECTOR_CONV(fscfsi);
       
  3097 /* evfscfui */
       
  3098 HELPER_SPE_VECTOR_CONV(fscfui);
       
  3099 /* evfscfuf */
       
  3100 HELPER_SPE_VECTOR_CONV(fscfuf);
       
  3101 /* evfscfsf */
       
  3102 HELPER_SPE_VECTOR_CONV(fscfsf);
       
  3103 /* evfsctsi */
       
  3104 HELPER_SPE_VECTOR_CONV(fsctsi);
       
  3105 /* evfsctui */
       
  3106 HELPER_SPE_VECTOR_CONV(fsctui);
       
  3107 /* evfsctsiz */
       
  3108 HELPER_SPE_VECTOR_CONV(fsctsiz);
       
  3109 /* evfsctuiz */
       
  3110 HELPER_SPE_VECTOR_CONV(fsctuiz);
       
  3111 /* evfsctsf */
       
  3112 HELPER_SPE_VECTOR_CONV(fsctsf);
       
  3113 /* evfsctuf */
       
  3114 HELPER_SPE_VECTOR_CONV(fsctuf);
       
  3115 
       
  3116 /* Single-precision floating-point arithmetic */
       
  3117 static always_inline uint32_t efsadd (uint32_t op1, uint32_t op2)
       
  3118 {
       
  3119     CPU_FloatU u1, u2;
       
  3120     u1.l = op1;
       
  3121     u2.l = op2;
       
  3122     u1.f = float32_add(u1.f, u2.f, &env->spe_status);
       
  3123     return u1.l;
       
  3124 }
       
  3125 
       
  3126 static always_inline uint32_t efssub (uint32_t op1, uint32_t op2)
       
  3127 {
       
  3128     CPU_FloatU u1, u2;
       
  3129     u1.l = op1;
       
  3130     u2.l = op2;
       
  3131     u1.f = float32_sub(u1.f, u2.f, &env->spe_status);
       
  3132     return u1.l;
       
  3133 }
       
  3134 
       
  3135 static always_inline uint32_t efsmul (uint32_t op1, uint32_t op2)
       
  3136 {
       
  3137     CPU_FloatU u1, u2;
       
  3138     u1.l = op1;
       
  3139     u2.l = op2;
       
  3140     u1.f = float32_mul(u1.f, u2.f, &env->spe_status);
       
  3141     return u1.l;
       
  3142 }
       
  3143 
       
  3144 static always_inline uint32_t efsdiv (uint32_t op1, uint32_t op2)
       
  3145 {
       
  3146     CPU_FloatU u1, u2;
       
  3147     u1.l = op1;
       
  3148     u2.l = op2;
       
  3149     u1.f = float32_div(u1.f, u2.f, &env->spe_status);
       
  3150     return u1.l;
       
  3151 }
       
  3152 
       
  3153 #define HELPER_SPE_SINGLE_ARITH(name)                                         \
       
  3154 uint32_t helper_e##name (uint32_t op1, uint32_t op2)                          \
       
  3155 {                                                                             \
       
  3156     return e##name(op1, op2);                                                 \
       
  3157 }
       
  3158 /* efsadd */
       
  3159 HELPER_SPE_SINGLE_ARITH(fsadd);
       
  3160 /* efssub */
       
  3161 HELPER_SPE_SINGLE_ARITH(fssub);
       
  3162 /* efsmul */
       
  3163 HELPER_SPE_SINGLE_ARITH(fsmul);
       
  3164 /* efsdiv */
       
  3165 HELPER_SPE_SINGLE_ARITH(fsdiv);
       
  3166 
       
  3167 #define HELPER_SPE_VECTOR_ARITH(name)                                         \
       
  3168 uint64_t helper_ev##name (uint64_t op1, uint64_t op2)                         \
       
  3169 {                                                                             \
       
  3170     return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) |                  \
       
  3171             (uint64_t)e##name(op1, op2);                                      \
       
  3172 }
       
  3173 /* evfsadd */
       
  3174 HELPER_SPE_VECTOR_ARITH(fsadd);
       
  3175 /* evfssub */
       
  3176 HELPER_SPE_VECTOR_ARITH(fssub);
       
  3177 /* evfsmul */
       
  3178 HELPER_SPE_VECTOR_ARITH(fsmul);
       
  3179 /* evfsdiv */
       
  3180 HELPER_SPE_VECTOR_ARITH(fsdiv);
       
  3181 
       
  3182 /* Single-precision floating-point comparisons */
       
  3183 static always_inline uint32_t efststlt (uint32_t op1, uint32_t op2)
       
  3184 {
       
  3185     CPU_FloatU u1, u2;
       
  3186     u1.l = op1;
       
  3187     u2.l = op2;
       
  3188     return float32_lt(u1.f, u2.f, &env->spe_status) ? 4 : 0;
       
  3189 }
       
  3190 
       
  3191 static always_inline uint32_t efststgt (uint32_t op1, uint32_t op2)
       
  3192 {
       
  3193     CPU_FloatU u1, u2;
       
  3194     u1.l = op1;
       
  3195     u2.l = op2;
       
  3196     return float32_le(u1.f, u2.f, &env->spe_status) ? 0 : 4;
       
  3197 }
       
  3198 
       
  3199 static always_inline uint32_t efststeq (uint32_t op1, uint32_t op2)
       
  3200 {
       
  3201     CPU_FloatU u1, u2;
       
  3202     u1.l = op1;
       
  3203     u2.l = op2;
       
  3204     return float32_eq(u1.f, u2.f, &env->spe_status) ? 4 : 0;
       
  3205 }
       
  3206 
       
  3207 static always_inline uint32_t efscmplt (uint32_t op1, uint32_t op2)
       
  3208 {
       
  3209     /* XXX: TODO: test special values (NaN, infinites, ...) */
       
  3210     return efststlt(op1, op2);
       
  3211 }
       
  3212 
       
  3213 static always_inline uint32_t efscmpgt (uint32_t op1, uint32_t op2)
       
  3214 {
       
  3215     /* XXX: TODO: test special values (NaN, infinites, ...) */
       
  3216     return efststgt(op1, op2);
       
  3217 }
       
  3218 
       
  3219 static always_inline uint32_t efscmpeq (uint32_t op1, uint32_t op2)
       
  3220 {
       
  3221     /* XXX: TODO: test special values (NaN, infinites, ...) */
       
  3222     return efststeq(op1, op2);
       
  3223 }
       
  3224 
       
  3225 #define HELPER_SINGLE_SPE_CMP(name)                                           \
       
  3226 uint32_t helper_e##name (uint32_t op1, uint32_t op2)                          \
       
  3227 {                                                                             \
       
  3228     return e##name(op1, op2) << 2;                                            \
       
  3229 }
       
  3230 /* efststlt */
       
  3231 HELPER_SINGLE_SPE_CMP(fststlt);
       
  3232 /* efststgt */
       
  3233 HELPER_SINGLE_SPE_CMP(fststgt);
       
  3234 /* efststeq */
       
  3235 HELPER_SINGLE_SPE_CMP(fststeq);
       
  3236 /* efscmplt */
       
  3237 HELPER_SINGLE_SPE_CMP(fscmplt);
       
  3238 /* efscmpgt */
       
  3239 HELPER_SINGLE_SPE_CMP(fscmpgt);
       
  3240 /* efscmpeq */
       
  3241 HELPER_SINGLE_SPE_CMP(fscmpeq);
       
  3242 
       
  3243 static always_inline uint32_t evcmp_merge (int t0, int t1)
       
  3244 {
       
  3245     return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
       
  3246 }
       
  3247 
       
  3248 #define HELPER_VECTOR_SPE_CMP(name)                                           \
       
  3249 uint32_t helper_ev##name (uint64_t op1, uint64_t op2)                         \
       
  3250 {                                                                             \
       
  3251     return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2));     \
       
  3252 }
       
  3253 /* evfststlt */
       
  3254 HELPER_VECTOR_SPE_CMP(fststlt);
       
  3255 /* evfststgt */
       
  3256 HELPER_VECTOR_SPE_CMP(fststgt);
       
  3257 /* evfststeq */
       
  3258 HELPER_VECTOR_SPE_CMP(fststeq);
       
  3259 /* evfscmplt */
       
  3260 HELPER_VECTOR_SPE_CMP(fscmplt);
       
  3261 /* evfscmpgt */
       
  3262 HELPER_VECTOR_SPE_CMP(fscmpgt);
       
  3263 /* evfscmpeq */
       
  3264 HELPER_VECTOR_SPE_CMP(fscmpeq);
       
  3265 
       
  3266 /* Double-precision floating-point conversion */
       
  3267 uint64_t helper_efdcfsi (uint32_t val)
       
  3268 {
       
  3269     CPU_DoubleU u;
       
  3270 
       
  3271     u.d = int32_to_float64(val, &env->spe_status);
       
  3272 
       
  3273     return u.ll;
       
  3274 }
       
  3275 
       
  3276 uint64_t helper_efdcfsid (uint64_t val)
       
  3277 {
       
  3278     CPU_DoubleU u;
       
  3279 
       
  3280     u.d = int64_to_float64(val, &env->spe_status);
       
  3281 
       
  3282     return u.ll;
       
  3283 }
       
  3284 
       
  3285 uint64_t helper_efdcfui (uint32_t val)
       
  3286 {
       
  3287     CPU_DoubleU u;
       
  3288 
       
  3289     u.d = uint32_to_float64(val, &env->spe_status);
       
  3290 
       
  3291     return u.ll;
       
  3292 }
       
  3293 
       
  3294 uint64_t helper_efdcfuid (uint64_t val)
       
  3295 {
       
  3296     CPU_DoubleU u;
       
  3297 
       
  3298     u.d = uint64_to_float64(val, &env->spe_status);
       
  3299 
       
  3300     return u.ll;
       
  3301 }
       
  3302 
       
  3303 uint32_t helper_efdctsi (uint64_t val)
       
  3304 {
       
  3305     CPU_DoubleU u;
       
  3306 
       
  3307     u.ll = val;
       
  3308     /* NaN are not treated the same way IEEE 754 does */
       
  3309     if (unlikely(float64_is_nan(u.d)))
       
  3310         return 0;
       
  3311 
       
  3312     return float64_to_int32(u.d, &env->spe_status);
       
  3313 }
       
  3314 
       
  3315 uint32_t helper_efdctui (uint64_t val)
       
  3316 {
       
  3317     CPU_DoubleU u;
       
  3318 
       
  3319     u.ll = val;
       
  3320     /* NaN are not treated the same way IEEE 754 does */
       
  3321     if (unlikely(float64_is_nan(u.d)))
       
  3322         return 0;
       
  3323 
       
  3324     return float64_to_uint32(u.d, &env->spe_status);
       
  3325 }
       
  3326 
       
  3327 uint32_t helper_efdctsiz (uint64_t val)
       
  3328 {
       
  3329     CPU_DoubleU u;
       
  3330 
       
  3331     u.ll = val;
       
  3332     /* NaN are not treated the same way IEEE 754 does */
       
  3333     if (unlikely(float64_is_nan(u.d)))
       
  3334         return 0;
       
  3335 
       
  3336     return float64_to_int32_round_to_zero(u.d, &env->spe_status);
       
  3337 }
       
  3338 
       
  3339 uint64_t helper_efdctsidz (uint64_t val)
       
  3340 {
       
  3341     CPU_DoubleU u;
       
  3342 
       
  3343     u.ll = val;
       
  3344     /* NaN are not treated the same way IEEE 754 does */
       
  3345     if (unlikely(float64_is_nan(u.d)))
       
  3346         return 0;
       
  3347 
       
  3348     return float64_to_int64_round_to_zero(u.d, &env->spe_status);
       
  3349 }
       
  3350 
       
  3351 uint32_t helper_efdctuiz (uint64_t val)
       
  3352 {
       
  3353     CPU_DoubleU u;
       
  3354 
       
  3355     u.ll = val;
       
  3356     /* NaN are not treated the same way IEEE 754 does */
       
  3357     if (unlikely(float64_is_nan(u.d)))
       
  3358         return 0;
       
  3359 
       
  3360     return float64_to_uint32_round_to_zero(u.d, &env->spe_status);
       
  3361 }
       
  3362 
       
  3363 uint64_t helper_efdctuidz (uint64_t val)
       
  3364 {
       
  3365     CPU_DoubleU u;
       
  3366 
       
  3367     u.ll = val;
       
  3368     /* NaN are not treated the same way IEEE 754 does */
       
  3369     if (unlikely(float64_is_nan(u.d)))
       
  3370         return 0;
       
  3371 
       
  3372     return float64_to_uint64_round_to_zero(u.d, &env->spe_status);
       
  3373 }
       
  3374 
       
  3375 uint64_t helper_efdcfsf (uint32_t val)
       
  3376 {
       
  3377     CPU_DoubleU u;
       
  3378     float64 tmp;
       
  3379 
       
  3380     u.d = int32_to_float64(val, &env->spe_status);
       
  3381     tmp = int64_to_float64(1ULL << 32, &env->spe_status);
       
  3382     u.d = float64_div(u.d, tmp, &env->spe_status);
       
  3383 
       
  3384     return u.ll;
       
  3385 }
       
  3386 
       
  3387 uint64_t helper_efdcfuf (uint32_t val)
       
  3388 {
       
  3389     CPU_DoubleU u;
       
  3390     float64 tmp;
       
  3391 
       
  3392     u.d = uint32_to_float64(val, &env->spe_status);
       
  3393     tmp = int64_to_float64(1ULL << 32, &env->spe_status);
       
  3394     u.d = float64_div(u.d, tmp, &env->spe_status);
       
  3395 
       
  3396     return u.ll;
       
  3397 }
       
  3398 
       
  3399 uint32_t helper_efdctsf (uint64_t val)
       
  3400 {
       
  3401     CPU_DoubleU u;
       
  3402     float64 tmp;
       
  3403 
       
  3404     u.ll = val;
       
  3405     /* NaN are not treated the same way IEEE 754 does */
       
  3406     if (unlikely(float64_is_nan(u.d)))
       
  3407         return 0;
       
  3408     tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
       
  3409     u.d = float64_mul(u.d, tmp, &env->spe_status);
       
  3410 
       
  3411     return float64_to_int32(u.d, &env->spe_status);
       
  3412 }
       
  3413 
       
  3414 uint32_t helper_efdctuf (uint64_t val)
       
  3415 {
       
  3416     CPU_DoubleU u;
       
  3417     float64 tmp;
       
  3418 
       
  3419     u.ll = val;
       
  3420     /* NaN are not treated the same way IEEE 754 does */
       
  3421     if (unlikely(float64_is_nan(u.d)))
       
  3422         return 0;
       
  3423     tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
       
  3424     u.d = float64_mul(u.d, tmp, &env->spe_status);
       
  3425 
       
  3426     return float64_to_uint32(u.d, &env->spe_status);
       
  3427 }
       
  3428 
       
  3429 uint32_t helper_efscfd (uint64_t val)
       
  3430 {
       
  3431     CPU_DoubleU u1;
       
  3432     CPU_FloatU u2;
       
  3433 
       
  3434     u1.ll = val;
       
  3435     u2.f = float64_to_float32(u1.d, &env->spe_status);
       
  3436 
       
  3437     return u2.l;
       
  3438 }
       
  3439 
       
  3440 uint64_t helper_efdcfs (uint32_t val)
       
  3441 {
       
  3442     CPU_DoubleU u2;
       
  3443     CPU_FloatU u1;
       
  3444 
       
  3445     u1.l = val;
       
  3446     u2.d = float32_to_float64(u1.f, &env->spe_status);
       
  3447 
       
  3448     return u2.ll;
       
  3449 }
       
  3450 
       
  3451 /* Double precision fixed-point arithmetic */
       
  3452 uint64_t helper_efdadd (uint64_t op1, uint64_t op2)
       
  3453 {
       
  3454     CPU_DoubleU u1, u2;
       
  3455     u1.ll = op1;
       
  3456     u2.ll = op2;
       
  3457     u1.d = float64_add(u1.d, u2.d, &env->spe_status);
       
  3458     return u1.ll;
       
  3459 }
       
  3460 
       
  3461 uint64_t helper_efdsub (uint64_t op1, uint64_t op2)
       
  3462 {
       
  3463     CPU_DoubleU u1, u2;
       
  3464     u1.ll = op1;
       
  3465     u2.ll = op2;
       
  3466     u1.d = float64_sub(u1.d, u2.d, &env->spe_status);
       
  3467     return u1.ll;
       
  3468 }
       
  3469 
       
  3470 uint64_t helper_efdmul (uint64_t op1, uint64_t op2)
       
  3471 {
       
  3472     CPU_DoubleU u1, u2;
       
  3473     u1.ll = op1;
       
  3474     u2.ll = op2;
       
  3475     u1.d = float64_mul(u1.d, u2.d, &env->spe_status);
       
  3476     return u1.ll;
       
  3477 }
       
  3478 
       
  3479 uint64_t helper_efddiv (uint64_t op1, uint64_t op2)
       
  3480 {
       
  3481     CPU_DoubleU u1, u2;
       
  3482     u1.ll = op1;
       
  3483     u2.ll = op2;
       
  3484     u1.d = float64_div(u1.d, u2.d, &env->spe_status);
       
  3485     return u1.ll;
       
  3486 }
       
  3487 
       
  3488 /* Double precision floating point helpers */
       
  3489 uint32_t helper_efdtstlt (uint64_t op1, uint64_t op2)
       
  3490 {
       
  3491     CPU_DoubleU u1, u2;
       
  3492     u1.ll = op1;
       
  3493     u2.ll = op2;
       
  3494     return float64_lt(u1.d, u2.d, &env->spe_status) ? 4 : 0;
       
  3495 }
       
  3496 
       
  3497 uint32_t helper_efdtstgt (uint64_t op1, uint64_t op2)
       
  3498 {
       
  3499     CPU_DoubleU u1, u2;
       
  3500     u1.ll = op1;
       
  3501     u2.ll = op2;
       
  3502     return float64_le(u1.d, u2.d, &env->spe_status) ? 0 : 4;
       
  3503 }
       
  3504 
       
  3505 uint32_t helper_efdtsteq (uint64_t op1, uint64_t op2)
       
  3506 {
       
  3507     CPU_DoubleU u1, u2;
       
  3508     u1.ll = op1;
       
  3509     u2.ll = op2;
       
  3510     return float64_eq(u1.d, u2.d, &env->spe_status) ? 4 : 0;
       
  3511 }
       
  3512 
       
  3513 uint32_t helper_efdcmplt (uint64_t op1, uint64_t op2)
       
  3514 {
       
  3515     /* XXX: TODO: test special values (NaN, infinites, ...) */
       
  3516     return helper_efdtstlt(op1, op2);
       
  3517 }
       
  3518 
       
  3519 uint32_t helper_efdcmpgt (uint64_t op1, uint64_t op2)
       
  3520 {
       
  3521     /* XXX: TODO: test special values (NaN, infinites, ...) */
       
  3522     return helper_efdtstgt(op1, op2);
       
  3523 }
       
  3524 
       
  3525 uint32_t helper_efdcmpeq (uint64_t op1, uint64_t op2)
       
  3526 {
       
  3527     /* XXX: TODO: test special values (NaN, infinites, ...) */
       
  3528     return helper_efdtsteq(op1, op2);
       
  3529 }
       
  3530 
       
  3531 /*****************************************************************************/
       
  3532 /* Softmmu support */
       
  3533 #if !defined (CONFIG_USER_ONLY)
       
  3534 
       
  3535 #define MMUSUFFIX _mmu
       
  3536 
       
  3537 #define SHIFT 0
       
  3538 #include "softmmu_template.h"
       
  3539 
       
  3540 #define SHIFT 1
       
  3541 #include "softmmu_template.h"
       
  3542 
       
  3543 #define SHIFT 2
       
  3544 #include "softmmu_template.h"
       
  3545 
       
  3546 #define SHIFT 3
       
  3547 #include "softmmu_template.h"
       
  3548 
       
  3549 /* try to fill the TLB and return an exception if error. If retaddr is
       
  3550    NULL, it means that the function was called in C code (i.e. not
       
  3551    from generated code or from helper.c) */
       
  3552 /* XXX: fix it to restore all registers */
       
  3553 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
       
  3554 {
       
  3555     TranslationBlock *tb;
       
  3556     CPUState *saved_env;
       
  3557     unsigned long pc;
       
  3558     int ret;
       
  3559 
       
  3560     /* XXX: hack to restore env in all cases, even if not called from
       
  3561        generated code */
       
  3562     saved_env = env;
       
  3563     env = cpu_single_env;
       
  3564     ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
       
  3565     if (unlikely(ret != 0)) {
       
  3566         if (likely(retaddr)) {
       
  3567             /* now we have a real cpu fault */
       
  3568             pc = (unsigned long)retaddr;
       
  3569             tb = tb_find_pc(pc);
       
  3570             if (likely(tb)) {
       
  3571                 /* the PC is inside the translated code. It means that we have
       
  3572                    a virtual CPU fault */
       
  3573                 cpu_restore_state(tb, env, pc, NULL);
       
  3574             }
       
  3575         }
       
  3576         helper_raise_exception_err(env->exception_index, env->error_code);
       
  3577     }
       
  3578     env = saved_env;
       
  3579 }
       
  3580 
       
  3581 /* Segment registers load and store */
       
  3582 target_ulong helper_load_sr (target_ulong sr_num)
       
  3583 {
       
  3584     return env->sr[sr_num];
       
  3585 }
       
  3586 
       
  3587 void helper_store_sr (target_ulong sr_num, target_ulong val)
       
  3588 {
       
  3589     ppc_store_sr(env, sr_num, val);
       
  3590 }
       
  3591 
       
  3592 /* SLB management */
       
  3593 #if defined(TARGET_PPC64)
       
  3594 target_ulong helper_load_slb (target_ulong slb_nr)
       
  3595 {
       
  3596     return ppc_load_slb(env, slb_nr);
       
  3597 }
       
  3598 
       
  3599 void helper_store_slb (target_ulong slb_nr, target_ulong rs)
       
  3600 {
       
  3601     ppc_store_slb(env, slb_nr, rs);
       
  3602 }
       
  3603 
       
  3604 void helper_slbia (void)
       
  3605 {
       
  3606     ppc_slb_invalidate_all(env);
       
  3607 }
       
  3608 
       
  3609 void helper_slbie (target_ulong addr)
       
  3610 {
       
  3611     ppc_slb_invalidate_one(env, addr);
       
  3612 }
       
  3613 
       
  3614 #endif /* defined(TARGET_PPC64) */
       
  3615 
       
  3616 /* TLB management */
       
  3617 void helper_tlbia (void)
       
  3618 {
       
  3619     ppc_tlb_invalidate_all(env);
       
  3620 }
       
  3621 
       
  3622 void helper_tlbie (target_ulong addr)
       
  3623 {
       
  3624     ppc_tlb_invalidate_one(env, addr);
       
  3625 }
       
  3626 
       
  3627 /* Software driven TLBs management */
       
  3628 /* PowerPC 602/603 software TLB load instructions helpers */
       
  3629 static void do_6xx_tlb (target_ulong new_EPN, int is_code)
       
  3630 {
       
  3631     target_ulong RPN, CMP, EPN;
       
  3632     int way;
       
  3633 
       
  3634     RPN = env->spr[SPR_RPA];
       
  3635     if (is_code) {
       
  3636         CMP = env->spr[SPR_ICMP];
       
  3637         EPN = env->spr[SPR_IMISS];
       
  3638     } else {
       
  3639         CMP = env->spr[SPR_DCMP];
       
  3640         EPN = env->spr[SPR_DMISS];
       
  3641     }
       
  3642     way = (env->spr[SPR_SRR1] >> 17) & 1;
       
  3643 #if defined (DEBUG_SOFTWARE_TLB)
       
  3644     if (loglevel != 0) {
       
  3645         fprintf(logfile, "%s: EPN " ADDRX " " ADDRX " PTE0 " ADDRX
       
  3646                 " PTE1 " ADDRX " way %d\n",
       
  3647                 __func__, new_EPN, EPN, CMP, RPN, way);
       
  3648     }
       
  3649 #endif
       
  3650     /* Store this TLB */
       
  3651     ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
       
  3652                      way, is_code, CMP, RPN);
       
  3653 }
       
  3654 
       
  3655 void helper_6xx_tlbd (target_ulong EPN)
       
  3656 {
       
  3657     do_6xx_tlb(EPN, 0);
       
  3658 }
       
  3659 
       
  3660 void helper_6xx_tlbi (target_ulong EPN)
       
  3661 {
       
  3662     do_6xx_tlb(EPN, 1);
       
  3663 }
       
  3664 
       
  3665 /* PowerPC 74xx software TLB load instructions helpers */
       
  3666 static void do_74xx_tlb (target_ulong new_EPN, int is_code)
       
  3667 {
       
  3668     target_ulong RPN, CMP, EPN;
       
  3669     int way;
       
  3670 
       
  3671     RPN = env->spr[SPR_PTELO];
       
  3672     CMP = env->spr[SPR_PTEHI];
       
  3673     EPN = env->spr[SPR_TLBMISS] & ~0x3;
       
  3674     way = env->spr[SPR_TLBMISS] & 0x3;
       
  3675 #if defined (DEBUG_SOFTWARE_TLB)
       
  3676     if (loglevel != 0) {
       
  3677         fprintf(logfile, "%s: EPN " ADDRX " " ADDRX " PTE0 " ADDRX
       
  3678                 " PTE1 " ADDRX " way %d\n",
       
  3679                 __func__, new_EPN, EPN, CMP, RPN, way);
       
  3680     }
       
  3681 #endif
       
  3682     /* Store this TLB */
       
  3683     ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
       
  3684                      way, is_code, CMP, RPN);
       
  3685 }
       
  3686 
       
  3687 void helper_74xx_tlbd (target_ulong EPN)
       
  3688 {
       
  3689     do_74xx_tlb(EPN, 0);
       
  3690 }
       
  3691 
       
  3692 void helper_74xx_tlbi (target_ulong EPN)
       
  3693 {
       
  3694     do_74xx_tlb(EPN, 1);
       
  3695 }
       
  3696 
       
  3697 static always_inline target_ulong booke_tlb_to_page_size (int size)
       
  3698 {
       
  3699     return 1024 << (2 * size);
       
  3700 }
       
  3701 
       
  3702 static always_inline int booke_page_size_to_tlb (target_ulong page_size)
       
  3703 {
       
  3704     int size;
       
  3705 
       
  3706     switch (page_size) {
       
  3707     case 0x00000400UL:
       
  3708         size = 0x0;
       
  3709         break;
       
  3710     case 0x00001000UL:
       
  3711         size = 0x1;
       
  3712         break;
       
  3713     case 0x00004000UL:
       
  3714         size = 0x2;
       
  3715         break;
       
  3716     case 0x00010000UL:
       
  3717         size = 0x3;
       
  3718         break;
       
  3719     case 0x00040000UL:
       
  3720         size = 0x4;
       
  3721         break;
       
  3722     case 0x00100000UL:
       
  3723         size = 0x5;
       
  3724         break;
       
  3725     case 0x00400000UL:
       
  3726         size = 0x6;
       
  3727         break;
       
  3728     case 0x01000000UL:
       
  3729         size = 0x7;
       
  3730         break;
       
  3731     case 0x04000000UL:
       
  3732         size = 0x8;
       
  3733         break;
       
  3734     case 0x10000000UL:
       
  3735         size = 0x9;
       
  3736         break;
       
  3737     case 0x40000000UL:
       
  3738         size = 0xA;
       
  3739         break;
       
  3740 #if defined (TARGET_PPC64)
       
  3741     case 0x000100000000ULL:
       
  3742         size = 0xB;
       
  3743         break;
       
  3744     case 0x000400000000ULL:
       
  3745         size = 0xC;
       
  3746         break;
       
  3747     case 0x001000000000ULL:
       
  3748         size = 0xD;
       
  3749         break;
       
  3750     case 0x004000000000ULL:
       
  3751         size = 0xE;
       
  3752         break;
       
  3753     case 0x010000000000ULL:
       
  3754         size = 0xF;
       
  3755         break;
       
  3756 #endif
       
  3757     default:
       
  3758         size = -1;
       
  3759         break;
       
  3760     }
       
  3761 
       
  3762     return size;
       
  3763 }
       
  3764 
       
  3765 /* Helpers for 4xx TLB management */
       
  3766 target_ulong helper_4xx_tlbre_lo (target_ulong entry)
       
  3767 {
       
  3768     ppcemb_tlb_t *tlb;
       
  3769     target_ulong ret;
       
  3770     int size;
       
  3771 
       
  3772     entry &= 0x3F;
       
  3773     tlb = &env->tlb[entry].tlbe;
       
  3774     ret = tlb->EPN;
       
  3775     if (tlb->prot & PAGE_VALID)
       
  3776         ret |= 0x400;
       
  3777     size = booke_page_size_to_tlb(tlb->size);
       
  3778     if (size < 0 || size > 0x7)
       
  3779         size = 1;
       
  3780     ret |= size << 7;
       
  3781     env->spr[SPR_40x_PID] = tlb->PID;
       
  3782     return ret;
       
  3783 }
       
  3784 
       
  3785 target_ulong helper_4xx_tlbre_hi (target_ulong entry)
       
  3786 {
       
  3787     ppcemb_tlb_t *tlb;
       
  3788     target_ulong ret;
       
  3789 
       
  3790     entry &= 0x3F;
       
  3791     tlb = &env->tlb[entry].tlbe;
       
  3792     ret = tlb->RPN;
       
  3793     if (tlb->prot & PAGE_EXEC)
       
  3794         ret |= 0x200;
       
  3795     if (tlb->prot & PAGE_WRITE)
       
  3796         ret |= 0x100;
       
  3797     return ret;
       
  3798 }
       
  3799 
       
  3800 void helper_4xx_tlbwe_hi (target_ulong entry, target_ulong val)
       
  3801 {
       
  3802     ppcemb_tlb_t *tlb;
       
  3803     target_ulong page, end;
       
  3804 
       
  3805 #if defined (DEBUG_SOFTWARE_TLB)
       
  3806     if (loglevel != 0) {
       
  3807         fprintf(logfile, "%s entry %d val " ADDRX "\n", __func__, (int)entry, val);
       
  3808     }
       
  3809 #endif
       
  3810     entry &= 0x3F;
       
  3811     tlb = &env->tlb[entry].tlbe;
       
  3812     /* Invalidate previous TLB (if it's valid) */
       
  3813     if (tlb->prot & PAGE_VALID) {
       
  3814         end = tlb->EPN + tlb->size;
       
  3815 #if defined (DEBUG_SOFTWARE_TLB)
       
  3816         if (loglevel != 0) {
       
  3817             fprintf(logfile, "%s: invalidate old TLB %d start " ADDRX
       
  3818                     " end " ADDRX "\n", __func__, (int)entry, tlb->EPN, end);
       
  3819         }
       
  3820 #endif
       
  3821         for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
       
  3822             tlb_flush_page(env, page);
       
  3823     }
       
  3824     tlb->size = booke_tlb_to_page_size((val >> 7) & 0x7);
       
  3825     /* We cannot handle TLB size < TARGET_PAGE_SIZE.
       
  3826      * If this ever occurs, one should use the ppcemb target instead
       
  3827      * of the ppc or ppc64 one
       
  3828      */
       
  3829     if ((val & 0x40) && tlb->size < TARGET_PAGE_SIZE) {
       
  3830         cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
       
  3831                   "are not supported (%d)\n",
       
  3832                   tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
       
  3833     }
       
  3834     tlb->EPN = val & ~(tlb->size - 1);
       
  3835     if (val & 0x40)
       
  3836         tlb->prot |= PAGE_VALID;
       
  3837     else
       
  3838         tlb->prot &= ~PAGE_VALID;
       
  3839     if (val & 0x20) {
       
  3840         /* XXX: TO BE FIXED */
       
  3841         cpu_abort(env, "Little-endian TLB entries are not supported by now\n");
       
  3842     }
       
  3843     tlb->PID = env->spr[SPR_40x_PID]; /* PID */
       
  3844     tlb->attr = val & 0xFF;
       
  3845 #if defined (DEBUG_SOFTWARE_TLB)
       
  3846     if (loglevel != 0) {
       
  3847         fprintf(logfile, "%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
       
  3848                 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
       
  3849                 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
       
  3850                 tlb->prot & PAGE_READ ? 'r' : '-',
       
  3851                 tlb->prot & PAGE_WRITE ? 'w' : '-',
       
  3852                 tlb->prot & PAGE_EXEC ? 'x' : '-',
       
  3853                 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
       
  3854     }
       
  3855 #endif
       
  3856     /* Invalidate new TLB (if valid) */
       
  3857     if (tlb->prot & PAGE_VALID) {
       
  3858         end = tlb->EPN + tlb->size;
       
  3859 #if defined (DEBUG_SOFTWARE_TLB)
       
  3860         if (loglevel != 0) {
       
  3861             fprintf(logfile, "%s: invalidate TLB %d start " ADDRX
       
  3862                     " end " ADDRX "\n", __func__, (int)entry, tlb->EPN, end);
       
  3863         }
       
  3864 #endif
       
  3865         for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
       
  3866             tlb_flush_page(env, page);
       
  3867     }
       
  3868 }
       
  3869 
       
  3870 void helper_4xx_tlbwe_lo (target_ulong entry, target_ulong val)
       
  3871 {
       
  3872     ppcemb_tlb_t *tlb;
       
  3873 
       
  3874 #if defined (DEBUG_SOFTWARE_TLB)
       
  3875     if (loglevel != 0) {
       
  3876         fprintf(logfile, "%s entry %i val " ADDRX "\n", __func__, (int)entry, val);
       
  3877     }
       
  3878 #endif
       
  3879     entry &= 0x3F;
       
  3880     tlb = &env->tlb[entry].tlbe;
       
  3881     tlb->RPN = val & 0xFFFFFC00;
       
  3882     tlb->prot = PAGE_READ;
       
  3883     if (val & 0x200)
       
  3884         tlb->prot |= PAGE_EXEC;
       
  3885     if (val & 0x100)
       
  3886         tlb->prot |= PAGE_WRITE;
       
  3887 #if defined (DEBUG_SOFTWARE_TLB)
       
  3888     if (loglevel != 0) {
       
  3889         fprintf(logfile, "%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
       
  3890                 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
       
  3891                 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
       
  3892                 tlb->prot & PAGE_READ ? 'r' : '-',
       
  3893                 tlb->prot & PAGE_WRITE ? 'w' : '-',
       
  3894                 tlb->prot & PAGE_EXEC ? 'x' : '-',
       
  3895                 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
       
  3896     }
       
  3897 #endif
       
  3898 }
       
  3899 
       
  3900 target_ulong helper_4xx_tlbsx (target_ulong address)
       
  3901 {
       
  3902     return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
       
  3903 }
       
  3904 
       
  3905 /* PowerPC 440 TLB management */
       
  3906 void helper_440_tlbwe (uint32_t word, target_ulong entry, target_ulong value)
       
  3907 {
       
  3908     ppcemb_tlb_t *tlb;
       
  3909     target_ulong EPN, RPN, size;
       
  3910     int do_flush_tlbs;
       
  3911 
       
  3912 #if defined (DEBUG_SOFTWARE_TLB)
       
  3913     if (loglevel != 0) {
       
  3914         fprintf(logfile, "%s word %d entry %d value " ADDRX "\n",
       
  3915                 __func__, word, (int)entry, value);
       
  3916     }
       
  3917 #endif
       
  3918     do_flush_tlbs = 0;
       
  3919     entry &= 0x3F;
       
  3920     tlb = &env->tlb[entry].tlbe;
       
  3921     switch (word) {
       
  3922     default:
       
  3923         /* Just here to please gcc */
       
  3924     case 0:
       
  3925         EPN = value & 0xFFFFFC00;
       
  3926         if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
       
  3927             do_flush_tlbs = 1;
       
  3928         tlb->EPN = EPN;
       
  3929         size = booke_tlb_to_page_size((value >> 4) & 0xF);
       
  3930         if ((tlb->prot & PAGE_VALID) && tlb->size < size)
       
  3931             do_flush_tlbs = 1;
       
  3932         tlb->size = size;
       
  3933         tlb->attr &= ~0x1;
       
  3934         tlb->attr |= (value >> 8) & 1;
       
  3935         if (value & 0x200) {
       
  3936             tlb->prot |= PAGE_VALID;
       
  3937         } else {
       
  3938             if (tlb->prot & PAGE_VALID) {
       
  3939                 tlb->prot &= ~PAGE_VALID;
       
  3940                 do_flush_tlbs = 1;
       
  3941             }
       
  3942         }
       
  3943         tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
       
  3944         if (do_flush_tlbs)
       
  3945             tlb_flush(env, 1);
       
  3946         break;
       
  3947     case 1:
       
  3948         RPN = value & 0xFFFFFC0F;
       
  3949         if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
       
  3950             tlb_flush(env, 1);
       
  3951         tlb->RPN = RPN;
       
  3952         break;
       
  3953     case 2:
       
  3954         tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
       
  3955         tlb->prot = tlb->prot & PAGE_VALID;
       
  3956         if (value & 0x1)
       
  3957             tlb->prot |= PAGE_READ << 4;
       
  3958         if (value & 0x2)
       
  3959             tlb->prot |= PAGE_WRITE << 4;
       
  3960         if (value & 0x4)
       
  3961             tlb->prot |= PAGE_EXEC << 4;
       
  3962         if (value & 0x8)
       
  3963             tlb->prot |= PAGE_READ;
       
  3964         if (value & 0x10)
       
  3965             tlb->prot |= PAGE_WRITE;
       
  3966         if (value & 0x20)
       
  3967             tlb->prot |= PAGE_EXEC;
       
  3968         break;
       
  3969     }
       
  3970 }
       
  3971 
       
  3972 target_ulong helper_440_tlbre (uint32_t word, target_ulong entry)
       
  3973 {
       
  3974     ppcemb_tlb_t *tlb;
       
  3975     target_ulong ret;
       
  3976     int size;
       
  3977 
       
  3978     entry &= 0x3F;
       
  3979     tlb = &env->tlb[entry].tlbe;
       
  3980     switch (word) {
       
  3981     default:
       
  3982         /* Just here to please gcc */
       
  3983     case 0:
       
  3984         ret = tlb->EPN;
       
  3985         size = booke_page_size_to_tlb(tlb->size);
       
  3986         if (size < 0 || size > 0xF)
       
  3987             size = 1;
       
  3988         ret |= size << 4;
       
  3989         if (tlb->attr & 0x1)
       
  3990             ret |= 0x100;
       
  3991         if (tlb->prot & PAGE_VALID)
       
  3992             ret |= 0x200;
       
  3993         env->spr[SPR_440_MMUCR] &= ~0x000000FF;
       
  3994         env->spr[SPR_440_MMUCR] |= tlb->PID;
       
  3995         break;
       
  3996     case 1:
       
  3997         ret = tlb->RPN;
       
  3998         break;
       
  3999     case 2:
       
  4000         ret = tlb->attr & ~0x1;
       
  4001         if (tlb->prot & (PAGE_READ << 4))
       
  4002             ret |= 0x1;
       
  4003         if (tlb->prot & (PAGE_WRITE << 4))
       
  4004             ret |= 0x2;
       
  4005         if (tlb->prot & (PAGE_EXEC << 4))
       
  4006             ret |= 0x4;
       
  4007         if (tlb->prot & PAGE_READ)
       
  4008             ret |= 0x8;
       
  4009         if (tlb->prot & PAGE_WRITE)
       
  4010             ret |= 0x10;
       
  4011         if (tlb->prot & PAGE_EXEC)
       
  4012             ret |= 0x20;
       
  4013         break;
       
  4014     }
       
  4015     return ret;
       
  4016 }
       
  4017 
       
  4018 target_ulong helper_440_tlbsx (target_ulong address)
       
  4019 {
       
  4020     return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
       
  4021 }
       
  4022 
       
  4023 #endif /* !CONFIG_USER_ONLY */