symbian-qemu-0.9.1-12/qemu-symbian-svp/target-i386/kvm.c
changeset 1 2fb8b9db1c86
equal deleted inserted replaced
0:ffa851df0825 1:2fb8b9db1c86
       
     1 /*
       
     2  * QEMU KVM support
       
     3  *
       
     4  * Copyright (C) 2006-2008 Qumranet Technologies
       
     5  * Copyright IBM, Corp. 2008
       
     6  *
       
     7  * Authors:
       
     8  *  Anthony Liguori   <aliguori@us.ibm.com>
       
     9  *
       
    10  * This work is licensed under the terms of the GNU GPL, version 2 or later.
       
    11  * See the COPYING file in the top-level directory.
       
    12  *
       
    13  */
       
    14 
       
    15 #include <sys/types.h>
       
    16 #include <sys/ioctl.h>
       
    17 #include <sys/mman.h>
       
    18 
       
    19 #include <linux/kvm.h>
       
    20 
       
    21 #include "qemu-common.h"
       
    22 #include "sysemu.h"
       
    23 #include "kvm.h"
       
    24 #include "cpu.h"
       
    25 
       
    26 //#define DEBUG_KVM
       
    27 
       
    28 #ifdef DEBUG_KVM
       
    29 #define dprintf(fmt, ...) \
       
    30     do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
       
    31 #else
       
    32 #define dprintf(fmt, ...) \
       
    33     do { } while (0)
       
    34 #endif
       
    35 
       
    36 int kvm_arch_init_vcpu(CPUState *env)
       
    37 {
       
    38     struct {
       
    39         struct kvm_cpuid cpuid;
       
    40         struct kvm_cpuid_entry entries[100];
       
    41     } __attribute__((packed)) cpuid_data;
       
    42     uint32_t limit, i, cpuid_i;
       
    43     uint32_t eax, ebx, ecx, edx;
       
    44 
       
    45     cpuid_i = 0;
       
    46 
       
    47     cpu_x86_cpuid(env, 0, &eax, &ebx, &ecx, &edx);
       
    48     limit = eax;
       
    49 
       
    50     for (i = 0; i <= limit; i++) {
       
    51         struct kvm_cpuid_entry *c = &cpuid_data.entries[cpuid_i++];
       
    52 
       
    53         cpu_x86_cpuid(env, i, &eax, &ebx, &ecx, &edx);
       
    54         c->function = i;
       
    55         c->eax = eax;
       
    56         c->ebx = ebx;
       
    57         c->ecx = ecx;
       
    58         c->edx = edx;
       
    59     }
       
    60 
       
    61     cpu_x86_cpuid(env, 0x80000000, &eax, &ebx, &ecx, &edx);
       
    62     limit = eax;
       
    63 
       
    64     for (i = 0x80000000; i <= limit; i++) {
       
    65         struct kvm_cpuid_entry *c = &cpuid_data.entries[cpuid_i++];
       
    66 
       
    67         cpu_x86_cpuid(env, i, &eax, &ebx, &ecx, &edx);
       
    68         c->function = i;
       
    69         c->eax = eax;
       
    70         c->ebx = ebx;
       
    71         c->ecx = ecx;
       
    72         c->edx = edx;
       
    73     }
       
    74 
       
    75     cpuid_data.cpuid.nent = cpuid_i;
       
    76 
       
    77     return kvm_vcpu_ioctl(env, KVM_SET_CPUID, &cpuid_data);
       
    78 }
       
    79 
       
    80 static int kvm_has_msr_star(CPUState *env)
       
    81 {
       
    82     static int has_msr_star;
       
    83     int ret;
       
    84 
       
    85     /* first time */
       
    86     if (has_msr_star == 0) {        
       
    87         struct kvm_msr_list msr_list, *kvm_msr_list;
       
    88 
       
    89         has_msr_star = -1;
       
    90 
       
    91         /* Obtain MSR list from KVM.  These are the MSRs that we must
       
    92          * save/restore */
       
    93         msr_list.nmsrs = 0;
       
    94         ret = kvm_ioctl(env->kvm_state, KVM_GET_MSR_INDEX_LIST, &msr_list);
       
    95         if (ret < 0)
       
    96             return 0;
       
    97 
       
    98         kvm_msr_list = qemu_mallocz(sizeof(msr_list) +
       
    99                                     msr_list.nmsrs * sizeof(msr_list.indices[0]));
       
   100         if (kvm_msr_list == NULL)
       
   101             return 0;
       
   102 
       
   103         kvm_msr_list->nmsrs = msr_list.nmsrs;
       
   104         ret = kvm_ioctl(env->kvm_state, KVM_GET_MSR_INDEX_LIST, kvm_msr_list);
       
   105         if (ret >= 0) {
       
   106             int i;
       
   107 
       
   108             for (i = 0; i < kvm_msr_list->nmsrs; i++) {
       
   109                 if (kvm_msr_list->indices[i] == MSR_STAR) {
       
   110                     has_msr_star = 1;
       
   111                     break;
       
   112                 }
       
   113             }
       
   114         }
       
   115 
       
   116         free(kvm_msr_list);
       
   117     }
       
   118 
       
   119     if (has_msr_star == 1)
       
   120         return 1;
       
   121     return 0;
       
   122 }
       
   123 
       
   124 int kvm_arch_init(KVMState *s, int smp_cpus)
       
   125 {
       
   126     int ret;
       
   127 
       
   128     /* create vm86 tss.  KVM uses vm86 mode to emulate 16-bit code
       
   129      * directly.  In order to use vm86 mode, a TSS is needed.  Since this
       
   130      * must be part of guest physical memory, we need to allocate it.  Older
       
   131      * versions of KVM just assumed that it would be at the end of physical
       
   132      * memory but that doesn't work with more than 4GB of memory.  We simply
       
   133      * refuse to work with those older versions of KVM. */
       
   134     ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, KVM_CAP_SET_TSS_ADDR);
       
   135     if (ret <= 0) {
       
   136         fprintf(stderr, "kvm does not support KVM_CAP_SET_TSS_ADDR\n");
       
   137         return ret;
       
   138     }
       
   139 
       
   140     /* this address is 3 pages before the bios, and the bios should present
       
   141      * as unavaible memory.  FIXME, need to ensure the e820 map deals with
       
   142      * this?
       
   143      */
       
   144     return kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, 0xfffbd000);
       
   145 }
       
   146                     
       
   147 static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
       
   148 {
       
   149     lhs->selector = rhs->selector;
       
   150     lhs->base = rhs->base;
       
   151     lhs->limit = rhs->limit;
       
   152     lhs->type = 3;
       
   153     lhs->present = 1;
       
   154     lhs->dpl = 3;
       
   155     lhs->db = 0;
       
   156     lhs->s = 1;
       
   157     lhs->l = 0;
       
   158     lhs->g = 0;
       
   159     lhs->avl = 0;
       
   160     lhs->unusable = 0;
       
   161 }
       
   162 
       
   163 static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
       
   164 {
       
   165     unsigned flags = rhs->flags;
       
   166     lhs->selector = rhs->selector;
       
   167     lhs->base = rhs->base;
       
   168     lhs->limit = rhs->limit;
       
   169     lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
       
   170     lhs->present = (flags & DESC_P_MASK) != 0;
       
   171     lhs->dpl = rhs->selector & 3;
       
   172     lhs->db = (flags >> DESC_B_SHIFT) & 1;
       
   173     lhs->s = (flags & DESC_S_MASK) != 0;
       
   174     lhs->l = (flags >> DESC_L_SHIFT) & 1;
       
   175     lhs->g = (flags & DESC_G_MASK) != 0;
       
   176     lhs->avl = (flags & DESC_AVL_MASK) != 0;
       
   177     lhs->unusable = 0;
       
   178 }
       
   179 
       
   180 static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
       
   181 {
       
   182     lhs->selector = rhs->selector;
       
   183     lhs->base = rhs->base;
       
   184     lhs->limit = rhs->limit;
       
   185     lhs->flags =
       
   186 	(rhs->type << DESC_TYPE_SHIFT)
       
   187 	| (rhs->present * DESC_P_MASK)
       
   188 	| (rhs->dpl << DESC_DPL_SHIFT)
       
   189 	| (rhs->db << DESC_B_SHIFT)
       
   190 	| (rhs->s * DESC_S_MASK)
       
   191 	| (rhs->l << DESC_L_SHIFT)
       
   192 	| (rhs->g * DESC_G_MASK)
       
   193 	| (rhs->avl * DESC_AVL_MASK);
       
   194 }
       
   195 
       
   196 static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
       
   197 {
       
   198     if (set)
       
   199         *kvm_reg = *qemu_reg;
       
   200     else
       
   201         *qemu_reg = *kvm_reg;
       
   202 }
       
   203 
       
   204 static int kvm_getput_regs(CPUState *env, int set)
       
   205 {
       
   206     struct kvm_regs regs;
       
   207     int ret = 0;
       
   208 
       
   209     if (!set) {
       
   210         ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, &regs);
       
   211         if (ret < 0)
       
   212             return ret;
       
   213     }
       
   214 
       
   215     kvm_getput_reg(&regs.rax, &env->regs[R_EAX], set);
       
   216     kvm_getput_reg(&regs.rbx, &env->regs[R_EBX], set);
       
   217     kvm_getput_reg(&regs.rcx, &env->regs[R_ECX], set);
       
   218     kvm_getput_reg(&regs.rdx, &env->regs[R_EDX], set);
       
   219     kvm_getput_reg(&regs.rsi, &env->regs[R_ESI], set);
       
   220     kvm_getput_reg(&regs.rdi, &env->regs[R_EDI], set);
       
   221     kvm_getput_reg(&regs.rsp, &env->regs[R_ESP], set);
       
   222     kvm_getput_reg(&regs.rbp, &env->regs[R_EBP], set);
       
   223 #ifdef TARGET_X86_64
       
   224     kvm_getput_reg(&regs.r8, &env->regs[8], set);
       
   225     kvm_getput_reg(&regs.r9, &env->regs[9], set);
       
   226     kvm_getput_reg(&regs.r10, &env->regs[10], set);
       
   227     kvm_getput_reg(&regs.r11, &env->regs[11], set);
       
   228     kvm_getput_reg(&regs.r12, &env->regs[12], set);
       
   229     kvm_getput_reg(&regs.r13, &env->regs[13], set);
       
   230     kvm_getput_reg(&regs.r14, &env->regs[14], set);
       
   231     kvm_getput_reg(&regs.r15, &env->regs[15], set);
       
   232 #endif
       
   233 
       
   234     kvm_getput_reg(&regs.rflags, &env->eflags, set);
       
   235     kvm_getput_reg(&regs.rip, &env->eip, set);
       
   236 
       
   237     if (set)
       
   238         ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, &regs);
       
   239 
       
   240     return ret;
       
   241 }
       
   242 
       
   243 static int kvm_put_fpu(CPUState *env)
       
   244 {
       
   245     struct kvm_fpu fpu;
       
   246     int i;
       
   247 
       
   248     memset(&fpu, 0, sizeof fpu);
       
   249     fpu.fsw = env->fpus & ~(7 << 11);
       
   250     fpu.fsw |= (env->fpstt & 7) << 11;
       
   251     fpu.fcw = env->fpuc;
       
   252     for (i = 0; i < 8; ++i)
       
   253 	fpu.ftwx |= (!env->fptags[i]) << i;
       
   254     memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
       
   255     memcpy(fpu.xmm, env->xmm_regs, sizeof env->xmm_regs);
       
   256     fpu.mxcsr = env->mxcsr;
       
   257 
       
   258     return kvm_vcpu_ioctl(env, KVM_SET_FPU, &fpu);
       
   259 }
       
   260 
       
   261 static int kvm_put_sregs(CPUState *env)
       
   262 {
       
   263     struct kvm_sregs sregs;
       
   264 
       
   265     memcpy(sregs.interrupt_bitmap,
       
   266            env->interrupt_bitmap,
       
   267            sizeof(sregs.interrupt_bitmap));
       
   268 
       
   269     if ((env->eflags & VM_MASK)) {
       
   270 	    set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
       
   271 	    set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
       
   272 	    set_v8086_seg(&sregs.es, &env->segs[R_ES]);
       
   273 	    set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
       
   274 	    set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
       
   275 	    set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
       
   276     } else {
       
   277 	    set_seg(&sregs.cs, &env->segs[R_CS]);
       
   278 	    set_seg(&sregs.ds, &env->segs[R_DS]);
       
   279 	    set_seg(&sregs.es, &env->segs[R_ES]);
       
   280 	    set_seg(&sregs.fs, &env->segs[R_FS]);
       
   281 	    set_seg(&sregs.gs, &env->segs[R_GS]);
       
   282 	    set_seg(&sregs.ss, &env->segs[R_SS]);
       
   283 
       
   284 	    if (env->cr[0] & CR0_PE_MASK) {
       
   285 		/* force ss cpl to cs cpl */
       
   286 		sregs.ss.selector = (sregs.ss.selector & ~3) |
       
   287 			(sregs.cs.selector & 3);
       
   288 		sregs.ss.dpl = sregs.ss.selector & 3;
       
   289 	    }
       
   290     }
       
   291 
       
   292     set_seg(&sregs.tr, &env->tr);
       
   293     set_seg(&sregs.ldt, &env->ldt);
       
   294 
       
   295     sregs.idt.limit = env->idt.limit;
       
   296     sregs.idt.base = env->idt.base;
       
   297     sregs.gdt.limit = env->gdt.limit;
       
   298     sregs.gdt.base = env->gdt.base;
       
   299 
       
   300     sregs.cr0 = env->cr[0];
       
   301     sregs.cr2 = env->cr[2];
       
   302     sregs.cr3 = env->cr[3];
       
   303     sregs.cr4 = env->cr[4];
       
   304 
       
   305     sregs.cr8 = cpu_get_apic_tpr(env);
       
   306     sregs.apic_base = cpu_get_apic_base(env);
       
   307 
       
   308     sregs.efer = env->efer;
       
   309 
       
   310     return kvm_vcpu_ioctl(env, KVM_SET_SREGS, &sregs);
       
   311 }
       
   312 
       
   313 static void kvm_msr_entry_set(struct kvm_msr_entry *entry,
       
   314                               uint32_t index, uint64_t value)
       
   315 {
       
   316     entry->index = index;
       
   317     entry->data = value;
       
   318 }
       
   319 
       
   320 static int kvm_put_msrs(CPUState *env)
       
   321 {
       
   322     struct {
       
   323         struct kvm_msrs info;
       
   324         struct kvm_msr_entry entries[100];
       
   325     } msr_data;
       
   326     struct kvm_msr_entry *msrs = msr_data.entries;
       
   327     int n = 0;
       
   328 
       
   329     kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs);
       
   330     kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
       
   331     kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
       
   332     if (kvm_has_msr_star(env))
       
   333 	kvm_msr_entry_set(&msrs[n++], MSR_STAR, env->star);
       
   334     kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc);
       
   335 #ifdef TARGET_X86_64
       
   336     /* FIXME if lm capable */
       
   337     kvm_msr_entry_set(&msrs[n++], MSR_CSTAR, env->cstar);
       
   338     kvm_msr_entry_set(&msrs[n++], MSR_KERNELGSBASE, env->kernelgsbase);
       
   339     kvm_msr_entry_set(&msrs[n++], MSR_FMASK, env->fmask);
       
   340     kvm_msr_entry_set(&msrs[n++], MSR_LSTAR, env->lstar);
       
   341 #endif
       
   342     msr_data.info.nmsrs = n;
       
   343 
       
   344     return kvm_vcpu_ioctl(env, KVM_SET_MSRS, &msr_data);
       
   345 
       
   346 }
       
   347 
       
   348 
       
   349 static int kvm_get_fpu(CPUState *env)
       
   350 {
       
   351     struct kvm_fpu fpu;
       
   352     int i, ret;
       
   353 
       
   354     ret = kvm_vcpu_ioctl(env, KVM_GET_FPU, &fpu);
       
   355     if (ret < 0)
       
   356         return ret;
       
   357 
       
   358     env->fpstt = (fpu.fsw >> 11) & 7;
       
   359     env->fpus = fpu.fsw;
       
   360     env->fpuc = fpu.fcw;
       
   361     for (i = 0; i < 8; ++i)
       
   362 	env->fptags[i] = !((fpu.ftwx >> i) & 1);
       
   363     memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
       
   364     memcpy(env->xmm_regs, fpu.xmm, sizeof env->xmm_regs);
       
   365     env->mxcsr = fpu.mxcsr;
       
   366 
       
   367     return 0;
       
   368 }
       
   369 
       
   370 static int kvm_get_sregs(CPUState *env)
       
   371 {
       
   372     struct kvm_sregs sregs;
       
   373     uint32_t hflags;
       
   374     int ret;
       
   375 
       
   376     ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
       
   377     if (ret < 0)
       
   378         return ret;
       
   379 
       
   380     memcpy(env->interrupt_bitmap, 
       
   381            sregs.interrupt_bitmap,
       
   382            sizeof(sregs.interrupt_bitmap));
       
   383 
       
   384     get_seg(&env->segs[R_CS], &sregs.cs);
       
   385     get_seg(&env->segs[R_DS], &sregs.ds);
       
   386     get_seg(&env->segs[R_ES], &sregs.es);
       
   387     get_seg(&env->segs[R_FS], &sregs.fs);
       
   388     get_seg(&env->segs[R_GS], &sregs.gs);
       
   389     get_seg(&env->segs[R_SS], &sregs.ss);
       
   390 
       
   391     get_seg(&env->tr, &sregs.tr);
       
   392     get_seg(&env->ldt, &sregs.ldt);
       
   393 
       
   394     env->idt.limit = sregs.idt.limit;
       
   395     env->idt.base = sregs.idt.base;
       
   396     env->gdt.limit = sregs.gdt.limit;
       
   397     env->gdt.base = sregs.gdt.base;
       
   398 
       
   399     env->cr[0] = sregs.cr0;
       
   400     env->cr[2] = sregs.cr2;
       
   401     env->cr[3] = sregs.cr3;
       
   402     env->cr[4] = sregs.cr4;
       
   403 
       
   404     cpu_set_apic_base(env, sregs.apic_base);
       
   405 
       
   406     env->efer = sregs.efer;
       
   407     //cpu_set_apic_tpr(env, sregs.cr8);
       
   408 
       
   409 #define HFLAG_COPY_MASK ~( \
       
   410 			HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
       
   411 			HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
       
   412 			HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
       
   413 			HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
       
   414 
       
   415 
       
   416 
       
   417     hflags = (env->segs[R_CS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
       
   418     hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
       
   419     hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
       
   420 	    (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
       
   421     hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
       
   422     hflags |= (env->cr[4] & CR4_OSFXSR_MASK) <<
       
   423 	    (HF_OSFXSR_SHIFT - CR4_OSFXSR_SHIFT);
       
   424 
       
   425     if (env->efer & MSR_EFER_LMA) {
       
   426         hflags |= HF_LMA_MASK;
       
   427     }
       
   428 
       
   429     if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
       
   430         hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
       
   431     } else {
       
   432         hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
       
   433 		(DESC_B_SHIFT - HF_CS32_SHIFT);
       
   434         hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
       
   435 		(DESC_B_SHIFT - HF_SS32_SHIFT);
       
   436         if (!(env->cr[0] & CR0_PE_MASK) ||
       
   437                    (env->eflags & VM_MASK) ||
       
   438                    !(hflags & HF_CS32_MASK)) {
       
   439                 hflags |= HF_ADDSEG_MASK;
       
   440             } else {
       
   441                 hflags |= ((env->segs[R_DS].base |
       
   442                                 env->segs[R_ES].base |
       
   443                                 env->segs[R_SS].base) != 0) <<
       
   444                     HF_ADDSEG_SHIFT;
       
   445             }
       
   446     }
       
   447     env->hflags = (env->hflags & HFLAG_COPY_MASK) | hflags;
       
   448     env->cc_src = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
       
   449     env->df = 1 - (2 * ((env->eflags >> 10) & 1));
       
   450     env->cc_op = CC_OP_EFLAGS;
       
   451     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
       
   452 
       
   453     return 0;
       
   454 }
       
   455 
       
   456 static int kvm_get_msrs(CPUState *env)
       
   457 {
       
   458     struct {
       
   459         struct kvm_msrs info;
       
   460         struct kvm_msr_entry entries[100];
       
   461     } msr_data;
       
   462     struct kvm_msr_entry *msrs = msr_data.entries;
       
   463     int ret, i, n;
       
   464 
       
   465     n = 0;
       
   466     msrs[n++].index = MSR_IA32_SYSENTER_CS;
       
   467     msrs[n++].index = MSR_IA32_SYSENTER_ESP;
       
   468     msrs[n++].index = MSR_IA32_SYSENTER_EIP;
       
   469     if (kvm_has_msr_star(env))
       
   470 	msrs[n++].index = MSR_STAR;
       
   471     msrs[n++].index = MSR_IA32_TSC;
       
   472 #ifdef TARGET_X86_64
       
   473     /* FIXME lm_capable_kernel */
       
   474     msrs[n++].index = MSR_CSTAR;
       
   475     msrs[n++].index = MSR_KERNELGSBASE;
       
   476     msrs[n++].index = MSR_FMASK;
       
   477     msrs[n++].index = MSR_LSTAR;
       
   478 #endif
       
   479     msr_data.info.nmsrs = n;
       
   480     ret = kvm_vcpu_ioctl(env, KVM_GET_MSRS, &msr_data);
       
   481     if (ret < 0)
       
   482         return ret;
       
   483 
       
   484     for (i = 0; i < ret; i++) {
       
   485         switch (msrs[i].index) {
       
   486         case MSR_IA32_SYSENTER_CS:
       
   487             env->sysenter_cs = msrs[i].data;
       
   488             break;
       
   489         case MSR_IA32_SYSENTER_ESP:
       
   490             env->sysenter_esp = msrs[i].data;
       
   491             break;
       
   492         case MSR_IA32_SYSENTER_EIP:
       
   493             env->sysenter_eip = msrs[i].data;
       
   494             break;
       
   495         case MSR_STAR:
       
   496             env->star = msrs[i].data;
       
   497             break;
       
   498 #ifdef TARGET_X86_64
       
   499         case MSR_CSTAR:
       
   500             env->cstar = msrs[i].data;
       
   501             break;
       
   502         case MSR_KERNELGSBASE:
       
   503             env->kernelgsbase = msrs[i].data;
       
   504             break;
       
   505         case MSR_FMASK:
       
   506             env->fmask = msrs[i].data;
       
   507             break;
       
   508         case MSR_LSTAR:
       
   509             env->lstar = msrs[i].data;
       
   510             break;
       
   511 #endif
       
   512         case MSR_IA32_TSC:
       
   513             env->tsc = msrs[i].data;
       
   514             break;
       
   515         }
       
   516     }
       
   517 
       
   518     return 0;
       
   519 }
       
   520 
       
   521 int kvm_arch_put_registers(CPUState *env)
       
   522 {
       
   523     int ret;
       
   524 
       
   525     ret = kvm_getput_regs(env, 1);
       
   526     if (ret < 0)
       
   527         return ret;
       
   528 
       
   529     ret = kvm_put_fpu(env);
       
   530     if (ret < 0)
       
   531         return ret;
       
   532 
       
   533     ret = kvm_put_sregs(env);
       
   534     if (ret < 0)
       
   535         return ret;
       
   536 
       
   537     ret = kvm_put_msrs(env);
       
   538     if (ret < 0)
       
   539         return ret;
       
   540 
       
   541     return 0;
       
   542 }
       
   543 
       
   544 int kvm_arch_get_registers(CPUState *env)
       
   545 {
       
   546     int ret;
       
   547 
       
   548     ret = kvm_getput_regs(env, 0);
       
   549     if (ret < 0)
       
   550         return ret;
       
   551 
       
   552     ret = kvm_get_fpu(env);
       
   553     if (ret < 0)
       
   554         return ret;
       
   555 
       
   556     ret = kvm_get_sregs(env);
       
   557     if (ret < 0)
       
   558         return ret;
       
   559 
       
   560     ret = kvm_get_msrs(env);
       
   561     if (ret < 0)
       
   562         return ret;
       
   563 
       
   564     return 0;
       
   565 }
       
   566 
       
   567 int kvm_arch_pre_run(CPUState *env, struct kvm_run *run)
       
   568 {
       
   569     /* Try to inject an interrupt if the guest can accept it */
       
   570     if (run->ready_for_interrupt_injection &&
       
   571         (env->interrupt_request & CPU_INTERRUPT_HARD) &&
       
   572         (env->eflags & IF_MASK)) {
       
   573         int irq;
       
   574 
       
   575         env->interrupt_request &= ~CPU_INTERRUPT_HARD;
       
   576         irq = cpu_get_pic_interrupt(env);
       
   577         if (irq >= 0) {
       
   578             struct kvm_interrupt intr;
       
   579             intr.irq = irq;
       
   580             /* FIXME: errors */
       
   581             dprintf("injected interrupt %d\n", irq);
       
   582             kvm_vcpu_ioctl(env, KVM_INTERRUPT, &intr);
       
   583         }
       
   584     }
       
   585 
       
   586     /* If we have an interrupt but the guest is not ready to receive an
       
   587      * interrupt, request an interrupt window exit.  This will
       
   588      * cause a return to userspace as soon as the guest is ready to
       
   589      * receive interrupts. */
       
   590     if ((env->interrupt_request & CPU_INTERRUPT_HARD))
       
   591         run->request_interrupt_window = 1;
       
   592     else
       
   593         run->request_interrupt_window = 0;
       
   594 
       
   595     dprintf("setting tpr\n");
       
   596     run->cr8 = cpu_get_apic_tpr(env);
       
   597 
       
   598     return 0;
       
   599 }
       
   600 
       
   601 int kvm_arch_post_run(CPUState *env, struct kvm_run *run)
       
   602 {
       
   603     if (run->if_flag)
       
   604         env->eflags |= IF_MASK;
       
   605     else
       
   606         env->eflags &= ~IF_MASK;
       
   607     
       
   608     cpu_set_apic_tpr(env, run->cr8);
       
   609     cpu_set_apic_base(env, run->apic_base);
       
   610 
       
   611     return 0;
       
   612 }
       
   613 
       
   614 static int kvm_handle_halt(CPUState *env)
       
   615 {
       
   616     if (!((env->interrupt_request & CPU_INTERRUPT_HARD) &&
       
   617           (env->eflags & IF_MASK)) &&
       
   618         !(env->interrupt_request & CPU_INTERRUPT_NMI)) {
       
   619         env->halted = 1;
       
   620         env->exception_index = EXCP_HLT;
       
   621         return 0;
       
   622     }
       
   623 
       
   624     return 1;
       
   625 }
       
   626 
       
   627 int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run)
       
   628 {
       
   629     int ret = 0;
       
   630 
       
   631     switch (run->exit_reason) {
       
   632     case KVM_EXIT_HLT:
       
   633         dprintf("handle_hlt\n");
       
   634         ret = kvm_handle_halt(env);
       
   635         break;
       
   636     }
       
   637 
       
   638     return ret;
       
   639 }