symbian-qemu-0.9.1-12/qemu-symbian-svp/cpu-all.h
changeset 1 2fb8b9db1c86
equal deleted inserted replaced
0:ffa851df0825 1:2fb8b9db1c86
       
     1 /*
       
     2  * defines common to all virtual CPUs
       
     3  *
       
     4  *  Copyright (c) 2003 Fabrice Bellard
       
     5  *
       
     6  * This library is free software; you can redistribute it and/or
       
     7  * modify it under the terms of the GNU Lesser General Public
       
     8  * License as published by the Free Software Foundation; either
       
     9  * version 2 of the License, or (at your option) any later version.
       
    10  *
       
    11  * This library is distributed in the hope that it will be useful,
       
    12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
       
    13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
       
    14  * Lesser General Public License for more details.
       
    15  *
       
    16  * You should have received a copy of the GNU Lesser General Public
       
    17  * License along with this library; if not, write to the Free Software
       
    18  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
       
    19  */
       
    20 #ifndef CPU_ALL_H
       
    21 #define CPU_ALL_H
       
    22 
       
    23 #if defined(__arm__) || defined(__sparc__) || defined(__mips__) || defined(__hppa__)
       
    24 #define WORDS_ALIGNED
       
    25 #endif
       
    26 
       
    27 /* some important defines:
       
    28  *
       
    29  * WORDS_ALIGNED : if defined, the host cpu can only make word aligned
       
    30  * memory accesses.
       
    31  *
       
    32  * WORDS_BIGENDIAN : if defined, the host cpu is big endian and
       
    33  * otherwise little endian.
       
    34  *
       
    35  * (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet))
       
    36  *
       
    37  * TARGET_WORDS_BIGENDIAN : same for target cpu
       
    38  */
       
    39 
       
    40 #include "bswap.h"
       
    41 #include "softfloat.h"
       
    42 
       
    43 #if defined(WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
       
    44 #define BSWAP_NEEDED
       
    45 #endif
       
    46 
       
    47 #ifdef BSWAP_NEEDED
       
    48 
       
    49 static inline uint16_t tswap16(uint16_t s)
       
    50 {
       
    51     return bswap16(s);
       
    52 }
       
    53 
       
    54 static inline uint32_t tswap32(uint32_t s)
       
    55 {
       
    56     return bswap32(s);
       
    57 }
       
    58 
       
    59 static inline uint64_t tswap64(uint64_t s)
       
    60 {
       
    61     return bswap64(s);
       
    62 }
       
    63 
       
    64 static inline void tswap16s(uint16_t *s)
       
    65 {
       
    66     *s = bswap16(*s);
       
    67 }
       
    68 
       
    69 static inline void tswap32s(uint32_t *s)
       
    70 {
       
    71     *s = bswap32(*s);
       
    72 }
       
    73 
       
    74 static inline void tswap64s(uint64_t *s)
       
    75 {
       
    76     *s = bswap64(*s);
       
    77 }
       
    78 
       
    79 #else
       
    80 
       
    81 static inline uint16_t tswap16(uint16_t s)
       
    82 {
       
    83     return s;
       
    84 }
       
    85 
       
    86 static inline uint32_t tswap32(uint32_t s)
       
    87 {
       
    88     return s;
       
    89 }
       
    90 
       
    91 static inline uint64_t tswap64(uint64_t s)
       
    92 {
       
    93     return s;
       
    94 }
       
    95 
       
    96 static inline void tswap16s(uint16_t *s)
       
    97 {
       
    98 }
       
    99 
       
   100 static inline void tswap32s(uint32_t *s)
       
   101 {
       
   102 }
       
   103 
       
   104 static inline void tswap64s(uint64_t *s)
       
   105 {
       
   106 }
       
   107 
       
   108 #endif
       
   109 
       
   110 #if TARGET_LONG_SIZE == 4
       
   111 #define tswapl(s) tswap32(s)
       
   112 #define tswapls(s) tswap32s((uint32_t *)(s))
       
   113 #define bswaptls(s) bswap32s(s)
       
   114 #else
       
   115 #define tswapl(s) tswap64(s)
       
   116 #define tswapls(s) tswap64s((uint64_t *)(s))
       
   117 #define bswaptls(s) bswap64s(s)
       
   118 #endif
       
   119 
       
   120 typedef union {
       
   121     float32 f;
       
   122     uint32_t l;
       
   123 } CPU_FloatU;
       
   124 
       
   125 /* NOTE: arm FPA is horrible as double 32 bit words are stored in big
       
   126    endian ! */
       
   127 typedef union {
       
   128     float64 d;
       
   129 #if defined(WORDS_BIGENDIAN) \
       
   130     || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT))
       
   131     struct {
       
   132         uint32_t upper;
       
   133         uint32_t lower;
       
   134     } l;
       
   135 #else
       
   136     struct {
       
   137         uint32_t lower;
       
   138         uint32_t upper;
       
   139     } l;
       
   140 #endif
       
   141     uint64_t ll;
       
   142 } CPU_DoubleU;
       
   143 
       
   144 #ifdef TARGET_SPARC
       
   145 typedef union {
       
   146     float128 q;
       
   147 #if defined(WORDS_BIGENDIAN) \
       
   148     || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT))
       
   149     struct {
       
   150         uint32_t upmost;
       
   151         uint32_t upper;
       
   152         uint32_t lower;
       
   153         uint32_t lowest;
       
   154     } l;
       
   155     struct {
       
   156         uint64_t upper;
       
   157         uint64_t lower;
       
   158     } ll;
       
   159 #else
       
   160     struct {
       
   161         uint32_t lowest;
       
   162         uint32_t lower;
       
   163         uint32_t upper;
       
   164         uint32_t upmost;
       
   165     } l;
       
   166     struct {
       
   167         uint64_t lower;
       
   168         uint64_t upper;
       
   169     } ll;
       
   170 #endif
       
   171 } CPU_QuadU;
       
   172 #endif
       
   173 
       
   174 /* CPU memory access without any memory or io remapping */
       
   175 
       
   176 /*
       
   177  * the generic syntax for the memory accesses is:
       
   178  *
       
   179  * load: ld{type}{sign}{size}{endian}_{access_type}(ptr)
       
   180  *
       
   181  * store: st{type}{size}{endian}_{access_type}(ptr, val)
       
   182  *
       
   183  * type is:
       
   184  * (empty): integer access
       
   185  *   f    : float access
       
   186  *
       
   187  * sign is:
       
   188  * (empty): for floats or 32 bit size
       
   189  *   u    : unsigned
       
   190  *   s    : signed
       
   191  *
       
   192  * size is:
       
   193  *   b: 8 bits
       
   194  *   w: 16 bits
       
   195  *   l: 32 bits
       
   196  *   q: 64 bits
       
   197  *
       
   198  * endian is:
       
   199  * (empty): target cpu endianness or 8 bit access
       
   200  *   r    : reversed target cpu endianness (not implemented yet)
       
   201  *   be   : big endian (not implemented yet)
       
   202  *   le   : little endian (not implemented yet)
       
   203  *
       
   204  * access_type is:
       
   205  *   raw    : host memory access
       
   206  *   user   : user mode access using soft MMU
       
   207  *   kernel : kernel mode access using soft MMU
       
   208  */
       
   209 static inline int ldub_p(const void *ptr)
       
   210 {
       
   211     return *(uint8_t *)ptr;
       
   212 }
       
   213 
       
   214 static inline int ldsb_p(const void *ptr)
       
   215 {
       
   216     return *(int8_t *)ptr;
       
   217 }
       
   218 
       
   219 static inline void stb_p(void *ptr, int v)
       
   220 {
       
   221     *(uint8_t *)ptr = v;
       
   222 }
       
   223 
       
   224 /* NOTE: on arm, putting 2 in /proc/sys/debug/alignment so that the
       
   225    kernel handles unaligned load/stores may give better results, but
       
   226    it is a system wide setting : bad */
       
   227 #if defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
       
   228 
       
   229 /* conservative code for little endian unaligned accesses */
       
   230 static inline int lduw_le_p(const void *ptr)
       
   231 {
       
   232 #ifdef __powerpc__
       
   233     int val;
       
   234     __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
       
   235     return val;
       
   236 #else
       
   237     const uint8_t *p = ptr;
       
   238     return p[0] | (p[1] << 8);
       
   239 #endif
       
   240 }
       
   241 
       
   242 static inline int ldsw_le_p(const void *ptr)
       
   243 {
       
   244 #ifdef __powerpc__
       
   245     int val;
       
   246     __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
       
   247     return (int16_t)val;
       
   248 #else
       
   249     const uint8_t *p = ptr;
       
   250     return (int16_t)(p[0] | (p[1] << 8));
       
   251 #endif
       
   252 }
       
   253 
       
   254 static inline int ldl_le_p(const void *ptr)
       
   255 {
       
   256 #ifdef __powerpc__
       
   257     int val;
       
   258     __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (ptr));
       
   259     return val;
       
   260 #else
       
   261     const uint8_t *p = ptr;
       
   262     return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24);
       
   263 #endif
       
   264 }
       
   265 
       
   266 static inline uint64_t ldq_le_p(const void *ptr)
       
   267 {
       
   268     const uint8_t *p = ptr;
       
   269     uint32_t v1, v2;
       
   270     v1 = ldl_le_p(p);
       
   271     v2 = ldl_le_p(p + 4);
       
   272     return v1 | ((uint64_t)v2 << 32);
       
   273 }
       
   274 
       
   275 static inline void stw_le_p(void *ptr, int v)
       
   276 {
       
   277 #ifdef __powerpc__
       
   278     __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr));
       
   279 #else
       
   280     uint8_t *p = ptr;
       
   281     p[0] = v;
       
   282     p[1] = v >> 8;
       
   283 #endif
       
   284 }
       
   285 
       
   286 static inline void stl_le_p(void *ptr, int v)
       
   287 {
       
   288 #ifdef __powerpc__
       
   289     __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr));
       
   290 #else
       
   291     uint8_t *p = ptr;
       
   292     p[0] = v;
       
   293     p[1] = v >> 8;
       
   294     p[2] = v >> 16;
       
   295     p[3] = v >> 24;
       
   296 #endif
       
   297 }
       
   298 
       
   299 static inline void stq_le_p(void *ptr, uint64_t v)
       
   300 {
       
   301     uint8_t *p = ptr;
       
   302     stl_le_p(p, (uint32_t)v);
       
   303     stl_le_p(p + 4, v >> 32);
       
   304 }
       
   305 
       
   306 /* float access */
       
   307 
       
   308 static inline float32 ldfl_le_p(const void *ptr)
       
   309 {
       
   310     union {
       
   311         float32 f;
       
   312         uint32_t i;
       
   313     } u;
       
   314     u.i = ldl_le_p(ptr);
       
   315     return u.f;
       
   316 }
       
   317 
       
   318 static inline void stfl_le_p(void *ptr, float32 v)
       
   319 {
       
   320     union {
       
   321         float32 f;
       
   322         uint32_t i;
       
   323     } u;
       
   324     u.f = v;
       
   325     stl_le_p(ptr, u.i);
       
   326 }
       
   327 
       
   328 static inline float64 ldfq_le_p(const void *ptr)
       
   329 {
       
   330     CPU_DoubleU u;
       
   331     u.l.lower = ldl_le_p(ptr);
       
   332     u.l.upper = ldl_le_p(ptr + 4);
       
   333     return u.d;
       
   334 }
       
   335 
       
   336 static inline void stfq_le_p(void *ptr, float64 v)
       
   337 {
       
   338     CPU_DoubleU u;
       
   339     u.d = v;
       
   340     stl_le_p(ptr, u.l.lower);
       
   341     stl_le_p(ptr + 4, u.l.upper);
       
   342 }
       
   343 
       
   344 #else
       
   345 
       
   346 static inline int lduw_le_p(const void *ptr)
       
   347 {
       
   348     return *(uint16_t *)ptr;
       
   349 }
       
   350 
       
   351 static inline int ldsw_le_p(const void *ptr)
       
   352 {
       
   353     return *(int16_t *)ptr;
       
   354 }
       
   355 
       
   356 static inline int ldl_le_p(const void *ptr)
       
   357 {
       
   358     return *(uint32_t *)ptr;
       
   359 }
       
   360 
       
   361 static inline uint64_t ldq_le_p(const void *ptr)
       
   362 {
       
   363     return *(uint64_t *)ptr;
       
   364 }
       
   365 
       
   366 static inline void stw_le_p(void *ptr, int v)
       
   367 {
       
   368     *(uint16_t *)ptr = v;
       
   369 }
       
   370 
       
   371 static inline void stl_le_p(void *ptr, int v)
       
   372 {
       
   373     *(uint32_t *)ptr = v;
       
   374 }
       
   375 
       
   376 static inline void stq_le_p(void *ptr, uint64_t v)
       
   377 {
       
   378     *(uint64_t *)ptr = v;
       
   379 }
       
   380 
       
   381 /* float access */
       
   382 
       
   383 static inline float32 ldfl_le_p(const void *ptr)
       
   384 {
       
   385     return *(float32 *)ptr;
       
   386 }
       
   387 
       
   388 static inline float64 ldfq_le_p(const void *ptr)
       
   389 {
       
   390     return *(float64 *)ptr;
       
   391 }
       
   392 
       
   393 static inline void stfl_le_p(void *ptr, float32 v)
       
   394 {
       
   395     *(float32 *)ptr = v;
       
   396 }
       
   397 
       
   398 static inline void stfq_le_p(void *ptr, float64 v)
       
   399 {
       
   400     *(float64 *)ptr = v;
       
   401 }
       
   402 #endif
       
   403 
       
   404 #if !defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
       
   405 
       
   406 static inline int lduw_be_p(const void *ptr)
       
   407 {
       
   408 #if defined(__i386__)
       
   409     int val;
       
   410     asm volatile ("movzwl %1, %0\n"
       
   411                   "xchgb %b0, %h0\n"
       
   412                   : "=q" (val)
       
   413                   : "m" (*(uint16_t *)ptr));
       
   414     return val;
       
   415 #else
       
   416     const uint8_t *b = ptr;
       
   417     return ((b[0] << 8) | b[1]);
       
   418 #endif
       
   419 }
       
   420 
       
   421 static inline int ldsw_be_p(const void *ptr)
       
   422 {
       
   423 #if defined(__i386__)
       
   424     int val;
       
   425     asm volatile ("movzwl %1, %0\n"
       
   426                   "xchgb %b0, %h0\n"
       
   427                   : "=q" (val)
       
   428                   : "m" (*(uint16_t *)ptr));
       
   429     return (int16_t)val;
       
   430 #else
       
   431     const uint8_t *b = ptr;
       
   432     return (int16_t)((b[0] << 8) | b[1]);
       
   433 #endif
       
   434 }
       
   435 
       
   436 static inline int ldl_be_p(const void *ptr)
       
   437 {
       
   438 #if defined(__i386__) || defined(__x86_64__)
       
   439     int val;
       
   440     asm volatile ("movl %1, %0\n"
       
   441                   "bswap %0\n"
       
   442                   : "=r" (val)
       
   443                   : "m" (*(uint32_t *)ptr));
       
   444     return val;
       
   445 #else
       
   446     const uint8_t *b = ptr;
       
   447     return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3];
       
   448 #endif
       
   449 }
       
   450 
       
   451 static inline uint64_t ldq_be_p(const void *ptr)
       
   452 {
       
   453     uint32_t a,b;
       
   454     a = ldl_be_p(ptr);
       
   455     b = ldl_be_p((uint8_t *)ptr + 4);
       
   456     return (((uint64_t)a<<32)|b);
       
   457 }
       
   458 
       
   459 static inline void stw_be_p(void *ptr, int v)
       
   460 {
       
   461 #if defined(__i386__)
       
   462     asm volatile ("xchgb %b0, %h0\n"
       
   463                   "movw %w0, %1\n"
       
   464                   : "=q" (v)
       
   465                   : "m" (*(uint16_t *)ptr), "0" (v));
       
   466 #else
       
   467     uint8_t *d = (uint8_t *) ptr;
       
   468     d[0] = v >> 8;
       
   469     d[1] = v;
       
   470 #endif
       
   471 }
       
   472 
       
   473 static inline void stl_be_p(void *ptr, int v)
       
   474 {
       
   475 #if defined(__i386__) || defined(__x86_64__)
       
   476     asm volatile ("bswap %0\n"
       
   477                   "movl %0, %1\n"
       
   478                   : "=r" (v)
       
   479                   : "m" (*(uint32_t *)ptr), "0" (v));
       
   480 #else
       
   481     uint8_t *d = (uint8_t *) ptr;
       
   482     d[0] = v >> 24;
       
   483     d[1] = v >> 16;
       
   484     d[2] = v >> 8;
       
   485     d[3] = v;
       
   486 #endif
       
   487 }
       
   488 
       
   489 static inline void stq_be_p(void *ptr, uint64_t v)
       
   490 {
       
   491     stl_be_p(ptr, v >> 32);
       
   492     stl_be_p((uint8_t *)ptr + 4, v);
       
   493 }
       
   494 
       
   495 /* float access */
       
   496 
       
   497 static inline float32 ldfl_be_p(const void *ptr)
       
   498 {
       
   499     union {
       
   500         float32 f;
       
   501         uint32_t i;
       
   502     } u;
       
   503     u.i = ldl_be_p(ptr);
       
   504     return u.f;
       
   505 }
       
   506 
       
   507 static inline void stfl_be_p(void *ptr, float32 v)
       
   508 {
       
   509     union {
       
   510         float32 f;
       
   511         uint32_t i;
       
   512     } u;
       
   513     u.f = v;
       
   514     stl_be_p(ptr, u.i);
       
   515 }
       
   516 
       
   517 static inline float64 ldfq_be_p(const void *ptr)
       
   518 {
       
   519     CPU_DoubleU u;
       
   520     u.l.upper = ldl_be_p(ptr);
       
   521     u.l.lower = ldl_be_p((uint8_t *)ptr + 4);
       
   522     return u.d;
       
   523 }
       
   524 
       
   525 static inline void stfq_be_p(void *ptr, float64 v)
       
   526 {
       
   527     CPU_DoubleU u;
       
   528     u.d = v;
       
   529     stl_be_p(ptr, u.l.upper);
       
   530     stl_be_p((uint8_t *)ptr + 4, u.l.lower);
       
   531 }
       
   532 
       
   533 #else
       
   534 
       
   535 static inline int lduw_be_p(const void *ptr)
       
   536 {
       
   537     return *(uint16_t *)ptr;
       
   538 }
       
   539 
       
   540 static inline int ldsw_be_p(const void *ptr)
       
   541 {
       
   542     return *(int16_t *)ptr;
       
   543 }
       
   544 
       
   545 static inline int ldl_be_p(const void *ptr)
       
   546 {
       
   547     return *(uint32_t *)ptr;
       
   548 }
       
   549 
       
   550 static inline uint64_t ldq_be_p(const void *ptr)
       
   551 {
       
   552     return *(uint64_t *)ptr;
       
   553 }
       
   554 
       
   555 static inline void stw_be_p(void *ptr, int v)
       
   556 {
       
   557     *(uint16_t *)ptr = v;
       
   558 }
       
   559 
       
   560 static inline void stl_be_p(void *ptr, int v)
       
   561 {
       
   562     *(uint32_t *)ptr = v;
       
   563 }
       
   564 
       
   565 static inline void stq_be_p(void *ptr, uint64_t v)
       
   566 {
       
   567     *(uint64_t *)ptr = v;
       
   568 }
       
   569 
       
   570 /* float access */
       
   571 
       
   572 static inline float32 ldfl_be_p(const void *ptr)
       
   573 {
       
   574     return *(float32 *)ptr;
       
   575 }
       
   576 
       
   577 static inline float64 ldfq_be_p(const void *ptr)
       
   578 {
       
   579     return *(float64 *)ptr;
       
   580 }
       
   581 
       
   582 static inline void stfl_be_p(void *ptr, float32 v)
       
   583 {
       
   584     *(float32 *)ptr = v;
       
   585 }
       
   586 
       
   587 static inline void stfq_be_p(void *ptr, float64 v)
       
   588 {
       
   589     *(float64 *)ptr = v;
       
   590 }
       
   591 
       
   592 #endif
       
   593 
       
   594 /* target CPU memory access functions */
       
   595 #if defined(TARGET_WORDS_BIGENDIAN)
       
   596 #define lduw_p(p) lduw_be_p(p)
       
   597 #define ldsw_p(p) ldsw_be_p(p)
       
   598 #define ldl_p(p) ldl_be_p(p)
       
   599 #define ldq_p(p) ldq_be_p(p)
       
   600 #define ldfl_p(p) ldfl_be_p(p)
       
   601 #define ldfq_p(p) ldfq_be_p(p)
       
   602 #define stw_p(p, v) stw_be_p(p, v)
       
   603 #define stl_p(p, v) stl_be_p(p, v)
       
   604 #define stq_p(p, v) stq_be_p(p, v)
       
   605 #define stfl_p(p, v) stfl_be_p(p, v)
       
   606 #define stfq_p(p, v) stfq_be_p(p, v)
       
   607 #else
       
   608 #define lduw_p(p) lduw_le_p(p)
       
   609 #define ldsw_p(p) ldsw_le_p(p)
       
   610 #define ldl_p(p) ldl_le_p(p)
       
   611 #define ldq_p(p) ldq_le_p(p)
       
   612 #define ldfl_p(p) ldfl_le_p(p)
       
   613 #define ldfq_p(p) ldfq_le_p(p)
       
   614 #define stw_p(p, v) stw_le_p(p, v)
       
   615 #define stl_p(p, v) stl_le_p(p, v)
       
   616 #define stq_p(p, v) stq_le_p(p, v)
       
   617 #define stfl_p(p, v) stfl_le_p(p, v)
       
   618 #define stfq_p(p, v) stfq_le_p(p, v)
       
   619 #endif
       
   620 
       
   621 /* MMU memory access macros */
       
   622 
       
   623 #if defined(CONFIG_USER_ONLY)
       
   624 #include <assert.h>
       
   625 #include "qemu-types.h"
       
   626 
       
   627 /* On some host systems the guest address space is reserved on the host.
       
   628  * This allows the guest address space to be offset to a convenient location.
       
   629  */
       
   630 //#define GUEST_BASE 0x20000000
       
   631 #define GUEST_BASE 0
       
   632 
       
   633 /* All direct uses of g2h and h2g need to go away for usermode softmmu.  */
       
   634 #define g2h(x) ((void *)((unsigned long)(x) + GUEST_BASE))
       
   635 #define h2g(x) ({ \
       
   636     unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \
       
   637     /* Check if given address fits target address space */ \
       
   638     assert(__ret == (abi_ulong)__ret); \
       
   639     (abi_ulong)__ret; \
       
   640 })
       
   641 #define h2g_valid(x) ({ \
       
   642     unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \
       
   643     (__guest == (abi_ulong)__guest); \
       
   644 })
       
   645 
       
   646 #define saddr(x) g2h(x)
       
   647 #define laddr(x) g2h(x)
       
   648 
       
   649 #else /* !CONFIG_USER_ONLY */
       
   650 /* NOTE: we use double casts if pointers and target_ulong have
       
   651    different sizes */
       
   652 #define saddr(x) (uint8_t *)(long)(x)
       
   653 #define laddr(x) (uint8_t *)(long)(x)
       
   654 #endif
       
   655 
       
   656 #define ldub_raw(p) ldub_p(laddr((p)))
       
   657 #define ldsb_raw(p) ldsb_p(laddr((p)))
       
   658 #define lduw_raw(p) lduw_p(laddr((p)))
       
   659 #define ldsw_raw(p) ldsw_p(laddr((p)))
       
   660 #define ldl_raw(p) ldl_p(laddr((p)))
       
   661 #define ldq_raw(p) ldq_p(laddr((p)))
       
   662 #define ldfl_raw(p) ldfl_p(laddr((p)))
       
   663 #define ldfq_raw(p) ldfq_p(laddr((p)))
       
   664 #define stb_raw(p, v) stb_p(saddr((p)), v)
       
   665 #define stw_raw(p, v) stw_p(saddr((p)), v)
       
   666 #define stl_raw(p, v) stl_p(saddr((p)), v)
       
   667 #define stq_raw(p, v) stq_p(saddr((p)), v)
       
   668 #define stfl_raw(p, v) stfl_p(saddr((p)), v)
       
   669 #define stfq_raw(p, v) stfq_p(saddr((p)), v)
       
   670 
       
   671 
       
   672 #if defined(CONFIG_USER_ONLY)
       
   673 
       
   674 /* if user mode, no other memory access functions */
       
   675 #define ldub(p) ldub_raw(p)
       
   676 #define ldsb(p) ldsb_raw(p)
       
   677 #define lduw(p) lduw_raw(p)
       
   678 #define ldsw(p) ldsw_raw(p)
       
   679 #define ldl(p) ldl_raw(p)
       
   680 #define ldq(p) ldq_raw(p)
       
   681 #define ldfl(p) ldfl_raw(p)
       
   682 #define ldfq(p) ldfq_raw(p)
       
   683 #define stb(p, v) stb_raw(p, v)
       
   684 #define stw(p, v) stw_raw(p, v)
       
   685 #define stl(p, v) stl_raw(p, v)
       
   686 #define stq(p, v) stq_raw(p, v)
       
   687 #define stfl(p, v) stfl_raw(p, v)
       
   688 #define stfq(p, v) stfq_raw(p, v)
       
   689 
       
   690 #define ldub_code(p) ldub_raw(p)
       
   691 #define ldsb_code(p) ldsb_raw(p)
       
   692 #define lduw_code(p) lduw_raw(p)
       
   693 #define ldsw_code(p) ldsw_raw(p)
       
   694 #define ldl_code(p) ldl_raw(p)
       
   695 #define ldq_code(p) ldq_raw(p)
       
   696 
       
   697 #define ldub_kernel(p) ldub_raw(p)
       
   698 #define ldsb_kernel(p) ldsb_raw(p)
       
   699 #define lduw_kernel(p) lduw_raw(p)
       
   700 #define ldsw_kernel(p) ldsw_raw(p)
       
   701 #define ldl_kernel(p) ldl_raw(p)
       
   702 #define ldq_kernel(p) ldq_raw(p)
       
   703 #define ldfl_kernel(p) ldfl_raw(p)
       
   704 #define ldfq_kernel(p) ldfq_raw(p)
       
   705 #define stb_kernel(p, v) stb_raw(p, v)
       
   706 #define stw_kernel(p, v) stw_raw(p, v)
       
   707 #define stl_kernel(p, v) stl_raw(p, v)
       
   708 #define stq_kernel(p, v) stq_raw(p, v)
       
   709 #define stfl_kernel(p, v) stfl_raw(p, v)
       
   710 #define stfq_kernel(p, vt) stfq_raw(p, v)
       
   711 
       
   712 #endif /* defined(CONFIG_USER_ONLY) */
       
   713 
       
   714 /* page related stuff */
       
   715 
       
   716 #define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
       
   717 #define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
       
   718 #define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)
       
   719 
       
   720 /* ??? These should be the larger of unsigned long and target_ulong.  */
       
   721 extern unsigned long qemu_real_host_page_size;
       
   722 extern unsigned long qemu_host_page_bits;
       
   723 extern unsigned long qemu_host_page_size;
       
   724 extern unsigned long qemu_host_page_mask;
       
   725 
       
   726 #define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask)
       
   727 
       
   728 /* same as PROT_xxx */
       
   729 #define PAGE_READ      0x0001
       
   730 #define PAGE_WRITE     0x0002
       
   731 #define PAGE_EXEC      0x0004
       
   732 #define PAGE_BITS      (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
       
   733 #define PAGE_VALID     0x0008
       
   734 /* original state of the write flag (used when tracking self-modifying
       
   735    code */
       
   736 #define PAGE_WRITE_ORG 0x0010
       
   737 #define PAGE_RESERVED  0x0020
       
   738 
       
   739 void page_dump(FILE *f);
       
   740 int page_get_flags(target_ulong address);
       
   741 void page_set_flags(target_ulong start, target_ulong end, int flags);
       
   742 int page_check_range(target_ulong start, target_ulong len, int flags);
       
   743 
       
   744 void cpu_exec_init_all(unsigned long tb_size);
       
   745 CPUState *cpu_copy(CPUState *env);
       
   746 
       
   747 void cpu_dump_state(CPUState *env, FILE *f,
       
   748                     int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
       
   749                     int flags);
       
   750 void cpu_dump_statistics (CPUState *env, FILE *f,
       
   751                           int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
       
   752                           int flags);
       
   753 
       
   754 void cpu_abort(CPUState *env, const char *fmt, ...)
       
   755     __attribute__ ((__format__ (__printf__, 2, 3)))
       
   756     __attribute__ ((__noreturn__));
       
   757 extern CPUState *first_cpu;
       
   758 extern CPUState *cpu_single_env;
       
   759 extern int64_t qemu_icount;
       
   760 extern int use_icount;
       
   761 
       
   762 #define CPU_INTERRUPT_EXIT   0x01 /* wants exit from main loop */
       
   763 #define CPU_INTERRUPT_HARD   0x02 /* hardware interrupt pending */
       
   764 #define CPU_INTERRUPT_EXITTB 0x04 /* exit the current TB (use for x86 a20 case) */
       
   765 #define CPU_INTERRUPT_TIMER  0x08 /* internal timer exception pending */
       
   766 #define CPU_INTERRUPT_FIQ    0x10 /* Fast interrupt pending.  */
       
   767 #define CPU_INTERRUPT_HALT   0x20 /* CPU halt wanted */
       
   768 #define CPU_INTERRUPT_SMI    0x40 /* (x86 only) SMI interrupt pending */
       
   769 #define CPU_INTERRUPT_DEBUG  0x80 /* Debug event occured.  */
       
   770 #define CPU_INTERRUPT_VIRQ   0x100 /* virtual interrupt pending.  */
       
   771 #define CPU_INTERRUPT_NMI    0x200 /* NMI pending. */
       
   772 
       
   773 void cpu_interrupt(CPUState *s, int mask);
       
   774 void cpu_reset_interrupt(CPUState *env, int mask);
       
   775 
       
   776 /* Breakpoint/watchpoint flags */
       
   777 #define BP_MEM_READ           0x01
       
   778 #define BP_MEM_WRITE          0x02
       
   779 #define BP_MEM_ACCESS         (BP_MEM_READ | BP_MEM_WRITE)
       
   780 #define BP_STOP_BEFORE_ACCESS 0x04
       
   781 #define BP_WATCHPOINT_HIT     0x08
       
   782 #define BP_GDB                0x10
       
   783 #define BP_CPU                0x20
       
   784 
       
   785 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
       
   786                           CPUBreakpoint **breakpoint);
       
   787 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags);
       
   788 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint);
       
   789 void cpu_breakpoint_remove_all(CPUState *env, int mask);
       
   790 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
       
   791                           int flags, CPUWatchpoint **watchpoint);
       
   792 int cpu_watchpoint_remove(CPUState *env, target_ulong addr,
       
   793                           target_ulong len, int flags);
       
   794 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint);
       
   795 void cpu_watchpoint_remove_all(CPUState *env, int mask);
       
   796 
       
   797 #define SSTEP_ENABLE  0x1  /* Enable simulated HW single stepping */
       
   798 #define SSTEP_NOIRQ   0x2  /* Do not use IRQ while single stepping */
       
   799 #define SSTEP_NOTIMER 0x4  /* Do not Timers while single stepping */
       
   800 
       
   801 void cpu_single_step(CPUState *env, int enabled);
       
   802 void cpu_reset(CPUState *s);
       
   803 
       
   804 /* Return the physical page corresponding to a virtual one. Use it
       
   805    only for debugging because no protection checks are done. Return -1
       
   806    if no page found. */
       
   807 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr);
       
   808 
       
   809 #define CPU_LOG_TB_OUT_ASM (1 << 0)
       
   810 #define CPU_LOG_TB_IN_ASM  (1 << 1)
       
   811 #define CPU_LOG_TB_OP      (1 << 2)
       
   812 #define CPU_LOG_TB_OP_OPT  (1 << 3)
       
   813 #define CPU_LOG_INT        (1 << 4)
       
   814 #define CPU_LOG_EXEC       (1 << 5)
       
   815 #define CPU_LOG_PCALL      (1 << 6)
       
   816 #define CPU_LOG_IOPORT     (1 << 7)
       
   817 #define CPU_LOG_TB_CPU     (1 << 8)
       
   818 
       
   819 /* define log items */
       
   820 typedef struct CPULogItem {
       
   821     int mask;
       
   822     const char *name;
       
   823     const char *help;
       
   824 } CPULogItem;
       
   825 
       
   826 extern const CPULogItem cpu_log_items[];
       
   827 
       
   828 void cpu_set_log(int log_flags);
       
   829 void cpu_set_log_filename(const char *filename);
       
   830 int cpu_str_to_log_mask(const char *str);
       
   831 
       
   832 /* IO ports API */
       
   833 
       
   834 /* NOTE: as these functions may be even used when there is an isa
       
   835    brige on non x86 targets, we always defined them */
       
   836 #ifndef NO_CPU_IO_DEFS
       
   837 void cpu_outb(CPUState *env, int addr, int val);
       
   838 void cpu_outw(CPUState *env, int addr, int val);
       
   839 void cpu_outl(CPUState *env, int addr, int val);
       
   840 int cpu_inb(CPUState *env, int addr);
       
   841 int cpu_inw(CPUState *env, int addr);
       
   842 int cpu_inl(CPUState *env, int addr);
       
   843 #endif
       
   844 
       
   845 /* address in the RAM (different from a physical address) */
       
   846 #ifdef USE_KQEMU
       
   847 typedef uint32_t ram_addr_t;
       
   848 #else
       
   849 typedef unsigned long ram_addr_t;
       
   850 #endif
       
   851 
       
   852 /* memory API */
       
   853 
       
   854 extern ram_addr_t phys_ram_size;
       
   855 extern int phys_ram_fd;
       
   856 #ifdef USE_KQEMU
       
   857 extern uint8_t *kqemu_phys_ram_base;
       
   858 #endif
       
   859 extern uint8_t *phys_ram_dirty;
       
   860 extern ram_addr_t ram_size;
       
   861 
       
   862 /* physical memory access */
       
   863 
       
   864 /* MMIO pages are identified by a combination of an IO device index and
       
   865    3 flags.  The ROMD code stores the page ram offset in iotlb entry, 
       
   866    so only a limited number of ids are avaiable.  */
       
   867 
       
   868 #define IO_MEM_SHIFT       3
       
   869 #define IO_MEM_NB_ENTRIES  (1 << (TARGET_PAGE_BITS  - IO_MEM_SHIFT))
       
   870 
       
   871 #define IO_MEM_RAM         (0 << IO_MEM_SHIFT) /* hardcoded offset */
       
   872 #define IO_MEM_ROM         (1 << IO_MEM_SHIFT) /* hardcoded offset */
       
   873 #define IO_MEM_UNASSIGNED  (2 << IO_MEM_SHIFT)
       
   874 #define IO_MEM_NOTDIRTY    (3 << IO_MEM_SHIFT)
       
   875 
       
   876 /* Acts like a ROM when read and like a device when written.  */
       
   877 #define IO_MEM_ROMD        (1)
       
   878 #define IO_MEM_SUBPAGE     (2)
       
   879 #define IO_MEM_SUBWIDTH    (4)
       
   880 
       
   881 /* Flags stored in the low bits of the TLB virtual address.  These are
       
   882    defined so that fast path ram access is all zeros.  */
       
   883 /* Zero if TLB entry is valid.  */
       
   884 #define TLB_INVALID_MASK   (1 << 3)
       
   885 /* Set if TLB entry references a clean RAM page.  The iotlb entry will
       
   886    contain the page physical address.  */
       
   887 #define TLB_NOTDIRTY    (1 << 4)
       
   888 /* Set if TLB entry is an IO callback.  */
       
   889 #define TLB_MMIO        (1 << 5)
       
   890 
       
   891 typedef void CPUWriteMemoryFunc(void *opaque, target_phys_addr_t addr, uint32_t value);
       
   892 typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr);
       
   893 
       
   894 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
       
   895                                          ram_addr_t size,
       
   896                                          ram_addr_t phys_offset,
       
   897                                          ram_addr_t region_offset);
       
   898 static inline void cpu_register_physical_memory(target_phys_addr_t start_addr,
       
   899                                                 ram_addr_t size,
       
   900                                                 ram_addr_t phys_offset)
       
   901 {
       
   902     cpu_register_physical_memory_offset(start_addr, size, phys_offset, 0);
       
   903 }
       
   904 
       
   905 /* FIXME: These should not exist.  cpu_physical_memory_rw or a DMA API
       
   906    should be used instead.  */ 
       
   907 uint8_t *host_ram_addr(ram_addr_t offset);
       
   908 ram_addr_t ram_offset_from_host(uint8_t *addr);
       
   909 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr);
       
   910 ram_addr_t get_ram_offset_phys(target_phys_addr_t addr);
       
   911 ram_addr_t qemu_ram_alloc(ram_addr_t);
       
   912 void qemu_ram_free(ram_addr_t addr);
       
   913 int cpu_register_io_memory(int io_index,
       
   914                            CPUReadMemoryFunc **mem_read,
       
   915                            CPUWriteMemoryFunc **mem_write,
       
   916                            void *opaque);
       
   917 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index);
       
   918 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index);
       
   919 
       
   920 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
       
   921                             int len, int is_write);
       
   922 static inline void cpu_physical_memory_read(target_phys_addr_t addr,
       
   923                                             uint8_t *buf, int len)
       
   924 {
       
   925     cpu_physical_memory_rw(addr, buf, len, 0);
       
   926 }
       
   927 static inline void cpu_physical_memory_write(target_phys_addr_t addr,
       
   928                                              const uint8_t *buf, int len)
       
   929 {
       
   930     cpu_physical_memory_rw(addr, (uint8_t *)buf, len, 1);
       
   931 }
       
   932 uint32_t ldub_phys(target_phys_addr_t addr);
       
   933 uint32_t lduw_phys(target_phys_addr_t addr);
       
   934 uint32_t ldl_phys(target_phys_addr_t addr);
       
   935 uint64_t ldq_phys(target_phys_addr_t addr);
       
   936 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val);
       
   937 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val);
       
   938 void stb_phys(target_phys_addr_t addr, uint32_t val);
       
   939 void stw_phys(target_phys_addr_t addr, uint32_t val);
       
   940 void stl_phys(target_phys_addr_t addr, uint32_t val);
       
   941 void stq_phys(target_phys_addr_t addr, uint64_t val);
       
   942 
       
   943 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
       
   944                                    const uint8_t *buf, int len);
       
   945 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
       
   946                         uint8_t *buf, int len, int is_write);
       
   947 
       
   948 #define VGA_DIRTY_FLAG       0x01
       
   949 #define CODE_DIRTY_FLAG      0x02
       
   950 #define KQEMU_DIRTY_FLAG     0x04
       
   951 #define MIGRATION_DIRTY_FLAG 0x08
       
   952 
       
   953 /* read dirty bit (return 0 or 1) */
       
   954 static inline int cpu_physical_memory_is_dirty(ram_addr_t addr)
       
   955 {
       
   956     return phys_ram_dirty[addr >> TARGET_PAGE_BITS] == 0xff;
       
   957 }
       
   958 
       
   959 static inline int cpu_physical_memory_get_dirty(ram_addr_t addr,
       
   960                                                 int dirty_flags)
       
   961 {
       
   962     return phys_ram_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags;
       
   963 }
       
   964 
       
   965 static inline void cpu_physical_memory_set_dirty(ram_addr_t addr)
       
   966 {
       
   967     phys_ram_dirty[addr >> TARGET_PAGE_BITS] = 0xff;
       
   968 }
       
   969 
       
   970 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
       
   971                                      int dirty_flags);
       
   972 void cpu_tlb_update_dirty(CPUState *env);
       
   973 
       
   974 int cpu_physical_memory_set_dirty_tracking(int enable);
       
   975 
       
   976 int cpu_physical_memory_get_dirty_tracking(void);
       
   977 
       
   978 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr);
       
   979 
       
   980 void dump_exec_info(FILE *f,
       
   981                     int (*cpu_fprintf)(FILE *f, const char *fmt, ...));
       
   982 
       
   983 /* Coalesced MMIO regions are areas where write operations can be reordered.
       
   984  * This usually implies that write operations are side-effect free.  This allows
       
   985  * batching which can make a major impact on performance when using
       
   986  * virtualization.
       
   987  */
       
   988 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
       
   989 
       
   990 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
       
   991 
       
   992 /*******************************************/
       
   993 /* host CPU ticks (if available) */
       
   994 
       
   995 #if defined(__powerpc__)
       
   996 
       
   997 static inline uint32_t get_tbl(void)
       
   998 {
       
   999     uint32_t tbl;
       
  1000     asm volatile("mftb %0" : "=r" (tbl));
       
  1001     return tbl;
       
  1002 }
       
  1003 
       
  1004 static inline uint32_t get_tbu(void)
       
  1005 {
       
  1006 	uint32_t tbl;
       
  1007 	asm volatile("mftbu %0" : "=r" (tbl));
       
  1008 	return tbl;
       
  1009 }
       
  1010 
       
  1011 static inline int64_t cpu_get_real_ticks(void)
       
  1012 {
       
  1013     uint32_t l, h, h1;
       
  1014     /* NOTE: we test if wrapping has occurred */
       
  1015     do {
       
  1016         h = get_tbu();
       
  1017         l = get_tbl();
       
  1018         h1 = get_tbu();
       
  1019     } while (h != h1);
       
  1020     return ((int64_t)h << 32) | l;
       
  1021 }
       
  1022 
       
  1023 #elif defined(__i386__)
       
  1024 
       
  1025 static inline int64_t cpu_get_real_ticks(void)
       
  1026 {
       
  1027     int64_t val;
       
  1028     asm volatile ("rdtsc" : "=A" (val));
       
  1029     return val;
       
  1030 }
       
  1031 
       
  1032 #elif defined(__x86_64__)
       
  1033 
       
  1034 static inline int64_t cpu_get_real_ticks(void)
       
  1035 {
       
  1036     uint32_t low,high;
       
  1037     int64_t val;
       
  1038     asm volatile("rdtsc" : "=a" (low), "=d" (high));
       
  1039     val = high;
       
  1040     val <<= 32;
       
  1041     val |= low;
       
  1042     return val;
       
  1043 }
       
  1044 
       
  1045 #elif defined(__hppa__)
       
  1046 
       
  1047 static inline int64_t cpu_get_real_ticks(void)
       
  1048 {
       
  1049     int val;
       
  1050     asm volatile ("mfctl %%cr16, %0" : "=r"(val));
       
  1051     return val;
       
  1052 }
       
  1053 
       
  1054 #elif defined(__ia64)
       
  1055 
       
  1056 static inline int64_t cpu_get_real_ticks(void)
       
  1057 {
       
  1058 	int64_t val;
       
  1059 	asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory");
       
  1060 	return val;
       
  1061 }
       
  1062 
       
  1063 #elif defined(__s390__)
       
  1064 
       
  1065 static inline int64_t cpu_get_real_ticks(void)
       
  1066 {
       
  1067     int64_t val;
       
  1068     asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc");
       
  1069     return val;
       
  1070 }
       
  1071 
       
  1072 #elif defined(__sparc_v8plus__) || defined(__sparc_v8plusa__) || defined(__sparc_v9__)
       
  1073 
       
  1074 static inline int64_t cpu_get_real_ticks (void)
       
  1075 {
       
  1076 #if     defined(_LP64)
       
  1077         uint64_t        rval;
       
  1078         asm volatile("rd %%tick,%0" : "=r"(rval));
       
  1079         return rval;
       
  1080 #else
       
  1081         union {
       
  1082                 uint64_t i64;
       
  1083                 struct {
       
  1084                         uint32_t high;
       
  1085                         uint32_t low;
       
  1086                 }       i32;
       
  1087         } rval;
       
  1088         asm volatile("rd %%tick,%1; srlx %1,32,%0"
       
  1089                 : "=r"(rval.i32.high), "=r"(rval.i32.low));
       
  1090         return rval.i64;
       
  1091 #endif
       
  1092 }
       
  1093 
       
  1094 #elif defined(__mips__)
       
  1095 
       
  1096 static inline int64_t cpu_get_real_ticks(void)
       
  1097 {
       
  1098 #if __mips_isa_rev >= 2
       
  1099     uint32_t count;
       
  1100     static uint32_t cyc_per_count = 0;
       
  1101 
       
  1102     if (!cyc_per_count)
       
  1103         __asm__ __volatile__("rdhwr %0, $3" : "=r" (cyc_per_count));
       
  1104 
       
  1105     __asm__ __volatile__("rdhwr %1, $2" : "=r" (count));
       
  1106     return (int64_t)(count * cyc_per_count);
       
  1107 #else
       
  1108     /* FIXME */
       
  1109     static int64_t ticks = 0;
       
  1110     return ticks++;
       
  1111 #endif
       
  1112 }
       
  1113 
       
  1114 #else
       
  1115 /* The host CPU doesn't have an easily accessible cycle counter.
       
  1116    Just return a monotonically increasing value.  This will be
       
  1117    totally wrong, but hopefully better than nothing.  */
       
  1118 static inline int64_t cpu_get_real_ticks (void)
       
  1119 {
       
  1120     static int64_t ticks = 0;
       
  1121     return ticks++;
       
  1122 }
       
  1123 #endif
       
  1124 
       
  1125 /* profiling */
       
  1126 #ifdef CONFIG_PROFILER
       
  1127 static inline int64_t profile_getclock(void)
       
  1128 {
       
  1129     return cpu_get_real_ticks();
       
  1130 }
       
  1131 
       
  1132 extern int64_t kqemu_time, kqemu_time_start;
       
  1133 extern int64_t qemu_time, qemu_time_start;
       
  1134 extern int64_t tlb_flush_time;
       
  1135 extern int64_t kqemu_exec_count;
       
  1136 extern int64_t dev_time;
       
  1137 extern int64_t kqemu_ret_int_count;
       
  1138 extern int64_t kqemu_ret_excp_count;
       
  1139 extern int64_t kqemu_ret_intr_count;
       
  1140 #endif
       
  1141 
       
  1142 #endif /* CPU_ALL_H */