OSDN Git Service

KVM: Add support for in-kernel PIC emulation
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / drivers / kvm / svm.c
index 9c15f32..2237a59 100644 (file)
  *
  */
 
+#include "kvm_svm.h"
+#include "x86_emulate.h"
+#include "irq.h"
+
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/vmalloc.h>
 #include <linux/highmem.h>
 #include <linux/profile.h>
-#include <asm/desc.h>
+#include <linux/sched.h>
 
-#include "kvm_svm.h"
-#include "x86_emulate.h"
+#include <asm/desc.h>
 
 MODULE_AUTHOR("Qumranet");
 MODULE_LICENSE("GPL");
@@ -36,7 +39,6 @@ MODULE_LICENSE("GPL");
 
 #define DR7_GD_MASK (1 << 13)
 #define DR6_BD_MASK (1 << 13)
-#define CR4_DE_MASK (1UL << 3)
 
 #define SEG_TYPE_LDT 2
 #define SEG_TYPE_BUSY_TSS16 3
@@ -48,6 +50,11 @@ MODULE_LICENSE("GPL");
 #define SVM_FEATURE_LBRV (1 << 1)
 #define SVM_DEATURE_SVML (1 << 2)
 
+static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
+{
+       return container_of(vcpu, struct vcpu_svm, vcpu);
+}
+
 unsigned long iopm_base;
 unsigned long msrpm_base;
 
@@ -92,20 +99,6 @@ static inline u32 svm_has(u32 feat)
        return svm_features & feat;
 }
 
-static unsigned get_addr_size(struct kvm_vcpu *vcpu)
-{
-       struct vmcb_save_area *sa = &vcpu->svm->vmcb->save;
-       u16 cs_attrib;
-
-       if (!(sa->cr0 & CR0_PE_MASK) || (sa->rflags & X86_EFLAGS_VM))
-               return 2;
-
-       cs_attrib = sa->cs.attrib;
-
-       return (cs_attrib & SVM_SELECTOR_L_MASK) ? 8 :
-                               (cs_attrib & SVM_SELECTOR_DB_MASK) ? 4 : 2;
-}
-
 static inline u8 pop_irq(struct kvm_vcpu *vcpu)
 {
        int word_index = __ffs(vcpu->irq_summary);
@@ -180,7 +173,7 @@ static inline void write_dr7(unsigned long val)
 
 static inline void force_new_asid(struct kvm_vcpu *vcpu)
 {
-       vcpu->svm->asid_generation--;
+       to_svm(vcpu)->asid_generation--;
 }
 
 static inline void flush_guest_tlb(struct kvm_vcpu *vcpu)
@@ -193,22 +186,24 @@ static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
        if (!(efer & KVM_EFER_LMA))
                efer &= ~KVM_EFER_LME;
 
-       vcpu->svm->vmcb->save.efer = efer | MSR_EFER_SVME_MASK;
+       to_svm(vcpu)->vmcb->save.efer = efer | MSR_EFER_SVME_MASK;
        vcpu->shadow_efer = efer;
 }
 
 static void svm_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
 {
-       vcpu->svm->vmcb->control.event_inj =    SVM_EVTINJ_VALID |
+       struct vcpu_svm *svm = to_svm(vcpu);
+
+       svm->vmcb->control.event_inj =          SVM_EVTINJ_VALID |
                                                SVM_EVTINJ_VALID_ERR |
                                                SVM_EVTINJ_TYPE_EXEPT |
                                                GP_VECTOR;
-       vcpu->svm->vmcb->control.event_inj_err = error_code;
+       svm->vmcb->control.event_inj_err = error_code;
 }
 
 static void inject_ud(struct kvm_vcpu *vcpu)
 {
-       vcpu->svm->vmcb->control.event_inj =    SVM_EVTINJ_VALID |
+       to_svm(vcpu)->vmcb->control.event_inj = SVM_EVTINJ_VALID |
                                                SVM_EVTINJ_TYPE_EXEPT |
                                                UD_VECTOR;
 }
@@ -227,19 +222,21 @@ static int is_external_interrupt(u32 info)
 
 static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
 {
-       if (!vcpu->svm->next_rip) {
+       struct vcpu_svm *svm = to_svm(vcpu);
+
+       if (!svm->next_rip) {
                printk(KERN_DEBUG "%s: NOP\n", __FUNCTION__);
                return;
        }
-       if (vcpu->svm->next_rip - vcpu->svm->vmcb->save.rip > 15) {
+       if (svm->next_rip - svm->vmcb->save.rip > MAX_INST_SIZE) {
                printk(KERN_ERR "%s: ip 0x%llx next 0x%llx\n",
                       __FUNCTION__,
-                      vcpu->svm->vmcb->save.rip,
-                      vcpu->svm->next_rip);
+                      svm->vmcb->save.rip,
+                      svm->next_rip);
        }
 
-       vcpu->rip = vcpu->svm->vmcb->save.rip = vcpu->svm->next_rip;
-       vcpu->svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
+       vcpu->rip = svm->vmcb->save.rip = svm->next_rip;
+       svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
 
        vcpu->interrupt_window_open = 1;
 }
@@ -349,8 +346,8 @@ err_1:
 
 }
 
-static int set_msr_interception(u32 *msrpm, unsigned msr,
-                               int read, int write)
+static void set_msr_interception(u32 *msrpm, unsigned msr,
+                                int read, int write)
 {
        int i;
 
@@ -365,11 +362,10 @@ static int set_msr_interception(u32 *msrpm, unsigned msr,
                        u32 mask = ((write) ? 0 : 2) | ((read) ? 0 : 1);
                        *base = (*base & ~(0x3 << msr_shift)) |
                                (mask << msr_shift);
-                       return 1;
+                       return;
                }
        }
-       printk(KERN_DEBUG "%s: not found 0x%x\n", __FUNCTION__, msr);
-       return 0;
+       BUG();
 }
 
 static __init int svm_hardware_setup(void)
@@ -377,7 +373,7 @@ static __init int svm_hardware_setup(void)
        int cpu;
        struct page *iopm_pages;
        struct page *msrpm_pages;
-       void *msrpm_va;
+       void *iopm_va, *msrpm_va;
        int r;
 
        kvm_emulator_want_group7_invlpg();
@@ -386,8 +382,10 @@ static __init int svm_hardware_setup(void)
 
        if (!iopm_pages)
                return -ENOMEM;
-       memset(page_address(iopm_pages), 0xff,
-                                       PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
+
+       iopm_va = page_address(iopm_pages);
+       memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
+       clear_bit(0x80, iopm_va); /* allow direct access to PC debug port */
        iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
 
 
@@ -454,11 +452,6 @@ static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
        seg->base = 0;
 }
 
-static int svm_vcpu_setup(struct kvm_vcpu *vcpu)
-{
-       return 0;
-}
-
 static void init_vmcb(struct vmcb *vmcb)
 {
        struct vmcb_control_area *control = &vmcb->control;
@@ -559,59 +552,70 @@ static void init_vmcb(struct vmcb *vmcb)
         * cr0 val on cpu init should be 0x60000010, we enable cpu
         * cache by default. the orderly way is to enable cache in bios.
         */
-       save->cr0 = 0x00000010 | CR0_PG_MASK | CR0_WP_MASK;
-       save->cr4 = CR4_PAE_MASK;
+       save->cr0 = 0x00000010 | X86_CR0_PG | X86_CR0_WP;
+       save->cr4 = X86_CR4_PAE;
        /* rdx = ?? */
 }
 
-static int svm_create_vcpu(struct kvm_vcpu *vcpu)
+static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
 {
+       struct vcpu_svm *svm;
        struct page *page;
-       int r;
+       int err;
+
+       svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
+       if (!svm) {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       err = kvm_vcpu_init(&svm->vcpu, kvm, id);
+       if (err)
+               goto free_svm;
 
-       r = -ENOMEM;
-       vcpu->svm = kzalloc(sizeof *vcpu->svm, GFP_KERNEL);
-       if (!vcpu->svm)
-               goto out1;
        page = alloc_page(GFP_KERNEL);
-       if (!page)
-               goto out2;
-
-       vcpu->svm->vmcb = page_address(page);
-       memset(vcpu->svm->vmcb, 0, PAGE_SIZE);
-       vcpu->svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
-       vcpu->svm->asid_generation = 0;
-       memset(vcpu->svm->db_regs, 0, sizeof(vcpu->svm->db_regs));
-       init_vmcb(vcpu->svm->vmcb);
-
-       fx_init(vcpu);
-       vcpu->fpu_active = 1;
-       vcpu->apic_base = 0xfee00000 |
-                       /*for vcpu 0*/ MSR_IA32_APICBASE_BSP |
-                       MSR_IA32_APICBASE_ENABLE;
+       if (!page) {
+               err = -ENOMEM;
+               goto uninit;
+       }
 
-       return 0;
+       svm->vmcb = page_address(page);
+       clear_page(svm->vmcb);
+       svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
+       svm->asid_generation = 0;
+       memset(svm->db_regs, 0, sizeof(svm->db_regs));
+       init_vmcb(svm->vmcb);
 
-out2:
-       kfree(vcpu->svm);
-out1:
-       return r;
+       fx_init(&svm->vcpu);
+       svm->vcpu.fpu_active = 1;
+       svm->vcpu.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
+       if (svm->vcpu.vcpu_id == 0)
+               svm->vcpu.apic_base |= MSR_IA32_APICBASE_BSP;
+
+       return &svm->vcpu;
+
+uninit:
+       kvm_vcpu_uninit(&svm->vcpu);
+free_svm:
+       kmem_cache_free(kvm_vcpu_cache, svm);
+out:
+       return ERR_PTR(err);
 }
 
 static void svm_free_vcpu(struct kvm_vcpu *vcpu)
 {
-       if (!vcpu->svm)
-               return;
-       if (vcpu->svm->vmcb)
-               __free_page(pfn_to_page(vcpu->svm->vmcb_pa >> PAGE_SHIFT));
-       kfree(vcpu->svm);
+       struct vcpu_svm *svm = to_svm(vcpu);
+
+       __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
+       kvm_vcpu_uninit(vcpu);
+       kmem_cache_free(kvm_vcpu_cache, svm);
 }
 
-static void svm_vcpu_load(struct kvm_vcpu *vcpu)
+static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
-       int cpu, i;
+       struct vcpu_svm *svm = to_svm(vcpu);
+       int i;
 
-       cpu = get_cpu();
        if (unlikely(cpu != vcpu->cpu)) {
                u64 tsc_this, delta;
 
@@ -621,23 +625,23 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu)
                 */
                rdtscll(tsc_this);
                delta = vcpu->host_tsc - tsc_this;
-               vcpu->svm->vmcb->control.tsc_offset += delta;
+               svm->vmcb->control.tsc_offset += delta;
                vcpu->cpu = cpu;
        }
 
        for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
-               rdmsrl(host_save_user_msrs[i], vcpu->svm->host_user_msrs[i]);
+               rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
 }
 
 static void svm_vcpu_put(struct kvm_vcpu *vcpu)
 {
+       struct vcpu_svm *svm = to_svm(vcpu);
        int i;
 
        for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
-               wrmsrl(host_save_user_msrs[i], vcpu->svm->host_user_msrs[i]);
+               wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
 
        rdtscll(vcpu->host_tsc);
-       put_cpu();
 }
 
 static void svm_vcpu_decache(struct kvm_vcpu *vcpu)
@@ -646,31 +650,34 @@ static void svm_vcpu_decache(struct kvm_vcpu *vcpu)
 
 static void svm_cache_regs(struct kvm_vcpu *vcpu)
 {
-       vcpu->regs[VCPU_REGS_RAX] = vcpu->svm->vmcb->save.rax;
-       vcpu->regs[VCPU_REGS_RSP] = vcpu->svm->vmcb->save.rsp;
-       vcpu->rip = vcpu->svm->vmcb->save.rip;
+       struct vcpu_svm *svm = to_svm(vcpu);
+
+       vcpu->regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
+       vcpu->regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
+       vcpu->rip = svm->vmcb->save.rip;
 }
 
 static void svm_decache_regs(struct kvm_vcpu *vcpu)
 {
-       vcpu->svm->vmcb->save.rax = vcpu->regs[VCPU_REGS_RAX];
-       vcpu->svm->vmcb->save.rsp = vcpu->regs[VCPU_REGS_RSP];
-       vcpu->svm->vmcb->save.rip = vcpu->rip;
+       struct vcpu_svm *svm = to_svm(vcpu);
+       svm->vmcb->save.rax = vcpu->regs[VCPU_REGS_RAX];
+       svm->vmcb->save.rsp = vcpu->regs[VCPU_REGS_RSP];
+       svm->vmcb->save.rip = vcpu->rip;
 }
 
 static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
 {
-       return vcpu->svm->vmcb->save.rflags;
+       return to_svm(vcpu)->vmcb->save.rflags;
 }
 
 static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
 {
-       vcpu->svm->vmcb->save.rflags = rflags;
+       to_svm(vcpu)->vmcb->save.rflags = rflags;
 }
 
 static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
 {
-       struct vmcb_save_area *save = &vcpu->svm->vmcb->save;
+       struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
 
        switch (seg) {
        case VCPU_SREG_CS: return &save->cs;
@@ -722,26 +729,34 @@ static void svm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
 
 static void svm_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
 {
-       dt->limit = vcpu->svm->vmcb->save.idtr.limit;
-       dt->base = vcpu->svm->vmcb->save.idtr.base;
+       struct vcpu_svm *svm = to_svm(vcpu);
+
+       dt->limit = svm->vmcb->save.idtr.limit;
+       dt->base = svm->vmcb->save.idtr.base;
 }
 
 static void svm_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
 {
-       vcpu->svm->vmcb->save.idtr.limit = dt->limit;
-       vcpu->svm->vmcb->save.idtr.base = dt->base ;
+       struct vcpu_svm *svm = to_svm(vcpu);
+
+       svm->vmcb->save.idtr.limit = dt->limit;
+       svm->vmcb->save.idtr.base = dt->base ;
 }
 
 static void svm_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
 {
-       dt->limit = vcpu->svm->vmcb->save.gdtr.limit;
-       dt->base = vcpu->svm->vmcb->save.gdtr.base;
+       struct vcpu_svm *svm = to_svm(vcpu);
+
+       dt->limit = svm->vmcb->save.gdtr.limit;
+       dt->base = svm->vmcb->save.gdtr.base;
 }
 
 static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
 {
-       vcpu->svm->vmcb->save.gdtr.limit = dt->limit;
-       vcpu->svm->vmcb->save.gdtr.base = dt->base ;
+       struct vcpu_svm *svm = to_svm(vcpu);
+
+       svm->vmcb->save.gdtr.limit = dt->limit;
+       svm->vmcb->save.gdtr.base = dt->base ;
 }
 
 static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
@@ -750,39 +765,42 @@ static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
 
 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 {
+       struct vcpu_svm *svm = to_svm(vcpu);
+
 #ifdef CONFIG_X86_64
        if (vcpu->shadow_efer & KVM_EFER_LME) {
-               if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) {
+               if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
                        vcpu->shadow_efer |= KVM_EFER_LMA;
-                       vcpu->svm->vmcb->save.efer |= KVM_EFER_LMA | KVM_EFER_LME;
+                       svm->vmcb->save.efer |= KVM_EFER_LMA | KVM_EFER_LME;
                }
 
-               if (is_paging(vcpu) && !(cr0 & CR0_PG_MASK) ) {
+               if (is_paging(vcpu) && !(cr0 & X86_CR0_PG) ) {
                        vcpu->shadow_efer &= ~KVM_EFER_LMA;
-                       vcpu->svm->vmcb->save.efer &= ~(KVM_EFER_LMA | KVM_EFER_LME);
+                       svm->vmcb->save.efer &= ~(KVM_EFER_LMA | KVM_EFER_LME);
                }
        }
 #endif
-       if ((vcpu->cr0 & CR0_TS_MASK) && !(cr0 & CR0_TS_MASK)) {
-               vcpu->svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
+       if ((vcpu->cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) {
+               svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
                vcpu->fpu_active = 1;
        }
 
        vcpu->cr0 = cr0;
-       cr0 |= CR0_PG_MASK | CR0_WP_MASK;
-       cr0 &= ~(CR0_CD_MASK | CR0_NW_MASK);
-       vcpu->svm->vmcb->save.cr0 = cr0;
+       cr0 |= X86_CR0_PG | X86_CR0_WP;
+       cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
+       svm->vmcb->save.cr0 = cr0;
 }
 
 static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 {
        vcpu->cr4 = cr4;
-       vcpu->svm->vmcb->save.cr4 = cr4 | CR4_PAE_MASK;
+       to_svm(vcpu)->vmcb->save.cr4 = cr4 | X86_CR4_PAE;
 }
 
 static void svm_set_segment(struct kvm_vcpu *vcpu,
                            struct kvm_segment *var, int seg)
 {
+       struct vcpu_svm *svm = to_svm(vcpu);
        struct vmcb_seg *s = svm_seg(vcpu, seg);
 
        s->base = var->base;
@@ -801,16 +819,16 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
                s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
        }
        if (seg == VCPU_SREG_CS)
-               vcpu->svm->vmcb->save.cpl
-                       = (vcpu->svm->vmcb->save.cs.attrib
+               svm->vmcb->save.cpl
+                       = (svm->vmcb->save.cs.attrib
                           >> SVM_SELECTOR_DPL_SHIFT) & 3;
 
 }
 
 /* FIXME:
 
-       vcpu->svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
-       vcpu->svm->vmcb->control.int_ctl |= (sregs->cr8 & V_TPR_MASK);
+       svm(vcpu)->vmcb->control.int_ctl &= ~V_TPR_MASK;
+       svm(vcpu)->vmcb->control.int_ctl |= (sregs->cr8 & V_TPR_MASK);
 
 */
 
@@ -822,58 +840,60 @@ static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
 static void load_host_msrs(struct kvm_vcpu *vcpu)
 {
 #ifdef CONFIG_X86_64
-       wrmsrl(MSR_GS_BASE, vcpu->svm->host_gs_base);
+       wrmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
 #endif
 }
 
 static void save_host_msrs(struct kvm_vcpu *vcpu)
 {
 #ifdef CONFIG_X86_64
-       rdmsrl(MSR_GS_BASE, vcpu->svm->host_gs_base);
+       rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
 #endif
 }
 
-static void new_asid(struct kvm_vcpu *vcpu, struct svm_cpu_data *svm_data)
+static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data)
 {
        if (svm_data->next_asid > svm_data->max_asid) {
                ++svm_data->asid_generation;
                svm_data->next_asid = 1;
-               vcpu->svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
+               svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
        }
 
-       vcpu->cpu = svm_data->cpu;
-       vcpu->svm->asid_generation = svm_data->asid_generation;
-       vcpu->svm->vmcb->control.asid = svm_data->next_asid++;
+       svm->vcpu.cpu = svm_data->cpu;
+       svm->asid_generation = svm_data->asid_generation;
+       svm->vmcb->control.asid = svm_data->next_asid++;
 }
 
 static void svm_invlpg(struct kvm_vcpu *vcpu, gva_t address)
 {
-       invlpga(address, vcpu->svm->vmcb->control.asid); // is needed?
+       invlpga(address, to_svm(vcpu)->vmcb->control.asid); // is needed?
 }
 
 static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr)
 {
-       return vcpu->svm->db_regs[dr];
+       return to_svm(vcpu)->db_regs[dr];
 }
 
 static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value,
                       int *exception)
 {
+       struct vcpu_svm *svm = to_svm(vcpu);
+
        *exception = 0;
 
-       if (vcpu->svm->vmcb->save.dr7 & DR7_GD_MASK) {
-               vcpu->svm->vmcb->save.dr7 &= ~DR7_GD_MASK;
-               vcpu->svm->vmcb->save.dr6 |= DR6_BD_MASK;
+       if (svm->vmcb->save.dr7 & DR7_GD_MASK) {
+               svm->vmcb->save.dr7 &= ~DR7_GD_MASK;
+               svm->vmcb->save.dr6 |= DR6_BD_MASK;
                *exception = DB_VECTOR;
                return;
        }
 
        switch (dr) {
        case 0 ... 3:
-               vcpu->svm->db_regs[dr] = value;
+               svm->db_regs[dr] = value;
                return;
        case 4 ... 5:
-               if (vcpu->cr4 & CR4_DE_MASK) {
+               if (vcpu->cr4 & X86_CR4_DE) {
                        *exception = UD_VECTOR;
                        return;
                }
@@ -882,7 +902,7 @@ static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value,
                        *exception = GP_VECTOR;
                        return;
                }
-               vcpu->svm->vmcb->save.dr7 = value;
+               svm->vmcb->save.dr7 = value;
                return;
        }
        default:
@@ -893,42 +913,44 @@ static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value,
        }
 }
 
-static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
 {
-       u32 exit_int_info = vcpu->svm->vmcb->control.exit_int_info;
+       u32 exit_int_info = svm->vmcb->control.exit_int_info;
+       struct kvm *kvm = svm->vcpu.kvm;
        u64 fault_address;
        u32 error_code;
        enum emulation_result er;
        int r;
 
-       if (is_external_interrupt(exit_int_info))
-               push_irq(vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK);
+       if (!irqchip_in_kernel(kvm) &&
+               is_external_interrupt(exit_int_info))
+               push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK);
 
-       spin_lock(&vcpu->kvm->lock);
+       mutex_lock(&kvm->lock);
 
-       fault_address  = vcpu->svm->vmcb->control.exit_info_2;
-       error_code = vcpu->svm->vmcb->control.exit_info_1;
-       r = kvm_mmu_page_fault(vcpu, fault_address, error_code);
+       fault_address  = svm->vmcb->control.exit_info_2;
+       error_code = svm->vmcb->control.exit_info_1;
+       r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
        if (r < 0) {
-               spin_unlock(&vcpu->kvm->lock);
+               mutex_unlock(&kvm->lock);
                return r;
        }
        if (!r) {
-               spin_unlock(&vcpu->kvm->lock);
+               mutex_unlock(&kvm->lock);
                return 1;
        }
-       er = emulate_instruction(vcpu, kvm_run, fault_address, error_code);
-       spin_unlock(&vcpu->kvm->lock);
+       er = emulate_instruction(&svm->vcpu, kvm_run, fault_address,
+                                error_code);
+       mutex_unlock(&kvm->lock);
 
        switch (er) {
        case EMULATE_DONE:
                return 1;
        case EMULATE_DO_MMIO:
-               ++vcpu->stat.mmio_exits;
-               kvm_run->exit_reason = KVM_EXIT_MMIO;
+               ++svm->vcpu.stat.mmio_exits;
                return 0;
        case EMULATE_FAIL:
-               vcpu_printf(vcpu, "%s: emulate fail\n", __FUNCTION__);
+               vcpu_printf(&svm->vcpu, "%s: emulate fail\n", __FUNCTION__);
                break;
        default:
                BUG();
@@ -938,257 +960,142 @@ static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        return 0;
 }
 
-static int nm_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static int nm_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
 {
-       vcpu->svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
-       if (!(vcpu->cr0 & CR0_TS_MASK))
-               vcpu->svm->vmcb->save.cr0 &= ~CR0_TS_MASK;
-       vcpu->fpu_active = 1;
+       svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
+       if (!(svm->vcpu.cr0 & X86_CR0_TS))
+               svm->vmcb->save.cr0 &= ~X86_CR0_TS;
+       svm->vcpu.fpu_active = 1;
 
-       return 1;
+       return 1;
 }
 
-static int shutdown_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static int shutdown_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
 {
        /*
         * VMCB is undefined after a SHUTDOWN intercept
         * so reinitialize it.
         */
-       memset(vcpu->svm->vmcb, 0, PAGE_SIZE);
-       init_vmcb(vcpu->svm->vmcb);
+       clear_page(svm->vmcb);
+       init_vmcb(svm->vmcb);
 
        kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
        return 0;
 }
 
-static int io_get_override(struct kvm_vcpu *vcpu,
-                         struct vmcb_seg **seg,
-                         int *addr_override)
-{
-       u8 inst[MAX_INST_SIZE];
-       unsigned ins_length;
-       gva_t rip;
-       int i;
-
-       rip =  vcpu->svm->vmcb->save.rip;
-       ins_length = vcpu->svm->next_rip - rip;
-       rip += vcpu->svm->vmcb->save.cs.base;
-
-       if (ins_length > MAX_INST_SIZE)
-               printk(KERN_DEBUG
-                      "%s: inst length err, cs base 0x%llx rip 0x%llx "
-                      "next rip 0x%llx ins_length %u\n",
-                      __FUNCTION__,
-                      vcpu->svm->vmcb->save.cs.base,
-                      vcpu->svm->vmcb->save.rip,
-                      vcpu->svm->vmcb->control.exit_info_2,
-                      ins_length);
-
-       if (kvm_read_guest(vcpu, rip, ins_length, inst) != ins_length)
-               /* #PF */
-               return 0;
-
-       *addr_override = 0;
-       *seg = NULL;
-       for (i = 0; i < ins_length; i++)
-               switch (inst[i]) {
-               case 0xf0:
-               case 0xf2:
-               case 0xf3:
-               case 0x66:
-                       continue;
-               case 0x67:
-                       *addr_override = 1;
-                       continue;
-               case 0x2e:
-                       *seg = &vcpu->svm->vmcb->save.cs;
-                       continue;
-               case 0x36:
-                       *seg = &vcpu->svm->vmcb->save.ss;
-                       continue;
-               case 0x3e:
-                       *seg = &vcpu->svm->vmcb->save.ds;
-                       continue;
-               case 0x26:
-                       *seg = &vcpu->svm->vmcb->save.es;
-                       continue;
-               case 0x64:
-                       *seg = &vcpu->svm->vmcb->save.fs;
-                       continue;
-               case 0x65:
-                       *seg = &vcpu->svm->vmcb->save.gs;
-                       continue;
-               default:
-                       return 1;
-               }
-       printk(KERN_DEBUG "%s: unexpected\n", __FUNCTION__);
-       return 0;
-}
-
-static unsigned long io_adress(struct kvm_vcpu *vcpu, int ins, gva_t *address)
+static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
 {
-       unsigned long addr_mask;
-       unsigned long *reg;
-       struct vmcb_seg *seg;
-       int addr_override;
-       struct vmcb_save_area *save_area = &vcpu->svm->vmcb->save;
-       u16 cs_attrib = save_area->cs.attrib;
-       unsigned addr_size = get_addr_size(vcpu);
-
-       if (!io_get_override(vcpu, &seg, &addr_override))
-               return 0;
-
-       if (addr_override)
-               addr_size = (addr_size == 2) ? 4: (addr_size >> 1);
+       u32 io_info = svm->vmcb->control.exit_info_1; //address size bug?
+       int size, down, in, string, rep;
+       unsigned port;
 
-       if (ins) {
-               reg = &vcpu->regs[VCPU_REGS_RDI];
-               seg = &vcpu->svm->vmcb->save.es;
-       } else {
-               reg = &vcpu->regs[VCPU_REGS_RSI];
-               seg = (seg) ? seg : &vcpu->svm->vmcb->save.ds;
-       }
+       ++svm->vcpu.stat.io_exits;
 
-       addr_mask = ~0ULL >> (64 - (addr_size * 8));
+       svm->next_rip = svm->vmcb->control.exit_info_2;
 
-       if ((cs_attrib & SVM_SELECTOR_L_MASK) &&
-           !(vcpu->svm->vmcb->save.rflags & X86_EFLAGS_VM)) {
-               *address = (*reg & addr_mask);
-               return addr_mask;
-       }
+       string = (io_info & SVM_IOIO_STR_MASK) != 0;
 
-       if (!(seg->attrib & SVM_SELECTOR_P_SHIFT)) {
-               svm_inject_gp(vcpu, 0);
-               return 0;
+       if (string) {
+               if (emulate_instruction(&svm->vcpu, kvm_run, 0, 0) == EMULATE_DO_MMIO)
+                       return 0;
+               return 1;
        }
 
-       *address = (*reg & addr_mask) + seg->base;
-       return addr_mask;
-}
-
-static int io_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
-       u32 io_info = vcpu->svm->vmcb->control.exit_info_1; //address size bug?
-       int size, down, in, string, rep;
-       unsigned port;
-       unsigned long count;
-       gva_t address = 0;
-
-       ++vcpu->stat.io_exits;
-
-       vcpu->svm->next_rip = vcpu->svm->vmcb->control.exit_info_2;
-
        in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
        port = io_info >> 16;
        size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
-       string = (io_info & SVM_IOIO_STR_MASK) != 0;
        rep = (io_info & SVM_IOIO_REP_MASK) != 0;
-       count = 1;
-       down = (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_DF) != 0;
-
-       if (string) {
-               unsigned addr_mask;
-
-               addr_mask = io_adress(vcpu, in, &address);
-               if (!addr_mask) {
-                       printk(KERN_DEBUG "%s: get io address failed\n",
-                              __FUNCTION__);
-                       return 1;
-               }
+       down = (svm->vmcb->save.rflags & X86_EFLAGS_DF) != 0;
 
-               if (rep)
-                       count = vcpu->regs[VCPU_REGS_RCX] & addr_mask;
-       }
-       return kvm_setup_pio(vcpu, kvm_run, in, size, count, string, down,
-                            address, rep, port);
+       return kvm_emulate_pio(&svm->vcpu, kvm_run, in, size, port);
 }
 
-static int nop_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static int nop_on_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
 {
        return 1;
 }
 
-static int halt_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static int halt_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
 {
-       vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 1;
-       skip_emulated_instruction(vcpu);
-       if (vcpu->irq_summary)
-               return 1;
-
-       kvm_run->exit_reason = KVM_EXIT_HLT;
-       ++vcpu->stat.halt_exits;
-       return 0;
+       svm->next_rip = svm->vmcb->save.rip + 1;
+       skip_emulated_instruction(&svm->vcpu);
+       return kvm_emulate_halt(&svm->vcpu);
 }
 
-static int vmmcall_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static int vmmcall_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
 {
-       vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 3;
-       skip_emulated_instruction(vcpu);
-       return kvm_hypercall(vcpu, kvm_run);
+       svm->next_rip = svm->vmcb->save.rip + 3;
+       skip_emulated_instruction(&svm->vcpu);
+       return kvm_hypercall(&svm->vcpu, kvm_run);
 }
 
-static int invalid_op_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static int invalid_op_interception(struct vcpu_svm *svm,
+                                  struct kvm_run *kvm_run)
 {
-       inject_ud(vcpu);
+       inject_ud(&svm->vcpu);
        return 1;
 }
 
-static int task_switch_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static int task_switch_interception(struct vcpu_svm *svm,
+                                   struct kvm_run *kvm_run)
 {
-       printk(KERN_DEBUG "%s: task swiche is unsupported\n", __FUNCTION__);
+       pr_unimpl(&svm->vcpu, "%s: task switch is unsupported\n", __FUNCTION__);
        kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
        return 0;
 }
 
-static int cpuid_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
 {
-       vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 2;
-       kvm_emulate_cpuid(vcpu);
+       svm->next_rip = svm->vmcb->save.rip + 2;
+       kvm_emulate_cpuid(&svm->vcpu);
        return 1;
 }
 
-static int emulate_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static int emulate_on_interception(struct vcpu_svm *svm,
+                                  struct kvm_run *kvm_run)
 {
-       if (emulate_instruction(vcpu, NULL, 0, 0) != EMULATE_DONE)
-               printk(KERN_ERR "%s: failed\n", __FUNCTION__);
+       if (emulate_instruction(&svm->vcpu, NULL, 0, 0) != EMULATE_DONE)
+               pr_unimpl(&svm->vcpu, "%s: failed\n", __FUNCTION__);
        return 1;
 }
 
 static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
 {
+       struct vcpu_svm *svm = to_svm(vcpu);
+
        switch (ecx) {
        case MSR_IA32_TIME_STAMP_COUNTER: {
                u64 tsc;
 
                rdtscll(tsc);
-               *data = vcpu->svm->vmcb->control.tsc_offset + tsc;
+               *data = svm->vmcb->control.tsc_offset + tsc;
                break;
        }
        case MSR_K6_STAR:
-               *data = vcpu->svm->vmcb->save.star;
+               *data = svm->vmcb->save.star;
                break;
 #ifdef CONFIG_X86_64
        case MSR_LSTAR:
-               *data = vcpu->svm->vmcb->save.lstar;
+               *data = svm->vmcb->save.lstar;
                break;
        case MSR_CSTAR:
-               *data = vcpu->svm->vmcb->save.cstar;
+               *data = svm->vmcb->save.cstar;
                break;
        case MSR_KERNEL_GS_BASE:
-               *data = vcpu->svm->vmcb->save.kernel_gs_base;
+               *data = svm->vmcb->save.kernel_gs_base;
                break;
        case MSR_SYSCALL_MASK:
-               *data = vcpu->svm->vmcb->save.sfmask;
+               *data = svm->vmcb->save.sfmask;
                break;
 #endif
        case MSR_IA32_SYSENTER_CS:
-               *data = vcpu->svm->vmcb->save.sysenter_cs;
+               *data = svm->vmcb->save.sysenter_cs;
                break;
        case MSR_IA32_SYSENTER_EIP:
-               *data = vcpu->svm->vmcb->save.sysenter_eip;
+               *data = svm->vmcb->save.sysenter_eip;
                break;
        case MSR_IA32_SYSENTER_ESP:
-               *data = vcpu->svm->vmcb->save.sysenter_esp;
+               *data = svm->vmcb->save.sysenter_esp;
                break;
        default:
                return kvm_get_msr_common(vcpu, ecx, data);
@@ -1196,57 +1103,59 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
        return 0;
 }
 
-static int rdmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
 {
-       u32 ecx = vcpu->regs[VCPU_REGS_RCX];
+       u32 ecx = svm->vcpu.regs[VCPU_REGS_RCX];
        u64 data;
 
-       if (svm_get_msr(vcpu, ecx, &data))
-               svm_inject_gp(vcpu, 0);
+       if (svm_get_msr(&svm->vcpu, ecx, &data))
+               svm_inject_gp(&svm->vcpu, 0);
        else {
-               vcpu->svm->vmcb->save.rax = data & 0xffffffff;
-               vcpu->regs[VCPU_REGS_RDX] = data >> 32;
-               vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 2;
-               skip_emulated_instruction(vcpu);
+               svm->vmcb->save.rax = data & 0xffffffff;
+               svm->vcpu.regs[VCPU_REGS_RDX] = data >> 32;
+               svm->next_rip = svm->vmcb->save.rip + 2;
+               skip_emulated_instruction(&svm->vcpu);
        }
        return 1;
 }
 
 static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
 {
+       struct vcpu_svm *svm = to_svm(vcpu);
+
        switch (ecx) {
        case MSR_IA32_TIME_STAMP_COUNTER: {
                u64 tsc;
 
                rdtscll(tsc);
-               vcpu->svm->vmcb->control.tsc_offset = data - tsc;
+               svm->vmcb->control.tsc_offset = data - tsc;
                break;
        }
        case MSR_K6_STAR:
-               vcpu->svm->vmcb->save.star = data;
+               svm->vmcb->save.star = data;
                break;
 #ifdef CONFIG_X86_64
        case MSR_LSTAR:
-               vcpu->svm->vmcb->save.lstar = data;
+               svm->vmcb->save.lstar = data;
                break;
        case MSR_CSTAR:
-               vcpu->svm->vmcb->save.cstar = data;
+               svm->vmcb->save.cstar = data;
                break;
        case MSR_KERNEL_GS_BASE:
-               vcpu->svm->vmcb->save.kernel_gs_base = data;
+               svm->vmcb->save.kernel_gs_base = data;
                break;
        case MSR_SYSCALL_MASK:
-               vcpu->svm->vmcb->save.sfmask = data;
+               svm->vmcb->save.sfmask = data;
                break;
 #endif
        case MSR_IA32_SYSENTER_CS:
-               vcpu->svm->vmcb->save.sysenter_cs = data;
+               svm->vmcb->save.sysenter_cs = data;
                break;
        case MSR_IA32_SYSENTER_EIP:
-               vcpu->svm->vmcb->save.sysenter_eip = data;
+               svm->vmcb->save.sysenter_eip = data;
                break;
        case MSR_IA32_SYSENTER_ESP:
-               vcpu->svm->vmcb->save.sysenter_esp = data;
+               svm->vmcb->save.sysenter_esp = data;
                break;
        default:
                return kvm_set_msr_common(vcpu, ecx, data);
@@ -1254,37 +1163,39 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
        return 0;
 }
 
-static int wrmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
 {
-       u32 ecx = vcpu->regs[VCPU_REGS_RCX];
-       u64 data = (vcpu->svm->vmcb->save.rax & -1u)
-               | ((u64)(vcpu->regs[VCPU_REGS_RDX] & -1u) << 32);
-       vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 2;
-       if (svm_set_msr(vcpu, ecx, data))
-               svm_inject_gp(vcpu, 0);
+       u32 ecx = svm->vcpu.regs[VCPU_REGS_RCX];
+       u64 data = (svm->vmcb->save.rax & -1u)
+               | ((u64)(svm->vcpu.regs[VCPU_REGS_RDX] & -1u) << 32);
+       svm->next_rip = svm->vmcb->save.rip + 2;
+       if (svm_set_msr(&svm->vcpu, ecx, data))
+               svm_inject_gp(&svm->vcpu, 0);
        else
-               skip_emulated_instruction(vcpu);
+               skip_emulated_instruction(&svm->vcpu);
        return 1;
 }
 
-static int msr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static int msr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
 {
-       if (vcpu->svm->vmcb->control.exit_info_1)
-               return wrmsr_interception(vcpu, kvm_run);
+       if (svm->vmcb->control.exit_info_1)
+               return wrmsr_interception(svm, kvm_run);
        else
-               return rdmsr_interception(vcpu, kvm_run);
+               return rdmsr_interception(svm, kvm_run);
 }
 
-static int interrupt_window_interception(struct kvm_vcpu *vcpu,
+static int interrupt_window_interception(struct vcpu_svm *svm,
                                   struct kvm_run *kvm_run)
 {
+       svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR);
+       svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
        /*
         * If the user space waits to inject interrupts, exit as soon as
         * possible
         */
        if (kvm_run->request_interrupt_window &&
-           !vcpu->irq_summary) {
-               ++vcpu->stat.irq_window_exits;
+           !svm->vcpu.irq_summary) {
+               ++svm->vcpu.stat.irq_window_exits;
                kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
                return 0;
        }
@@ -1292,7 +1203,7 @@ static int interrupt_window_interception(struct kvm_vcpu *vcpu,
        return 1;
 }
 
-static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu,
+static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
                                      struct kvm_run *kvm_run) = {
        [SVM_EXIT_READ_CR0]                     = emulate_on_interception,
        [SVM_EXIT_READ_CR3]                     = emulate_on_interception,
@@ -1339,15 +1250,15 @@ static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu,
 };
 
 
-static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static int handle_exit(struct vcpu_svm *svm, struct kvm_run *kvm_run)
 {
-       u32 exit_code = vcpu->svm->vmcb->control.exit_code;
+       u32 exit_code = svm->vmcb->control.exit_code;
 
-       if (is_external_interrupt(vcpu->svm->vmcb->control.exit_int_info) &&
+       if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
            exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR)
                printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x "
                       "exit_code 0x%x\n",
-                      __FUNCTION__, vcpu->svm->vmcb->control.exit_int_info,
+                      __FUNCTION__, svm->vmcb->control.exit_int_info,
                       exit_code);
 
        if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
@@ -1357,7 +1268,7 @@ static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                return 0;
        }
 
-       return svm_exit_handlers[exit_code](vcpu, kvm_run);
+       return svm_exit_handlers[exit_code](svm, kvm_run);
 }
 
 static void reload_tss(struct kvm_vcpu *vcpu)
@@ -1369,76 +1280,124 @@ static void reload_tss(struct kvm_vcpu *vcpu)
        load_TR_desc();
 }
 
-static void pre_svm_run(struct kvm_vcpu *vcpu)
+static void pre_svm_run(struct vcpu_svm *svm)
 {
        int cpu = raw_smp_processor_id();
 
        struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
 
-       vcpu->svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
-       if (vcpu->cpu != cpu ||
-           vcpu->svm->asid_generation != svm_data->asid_generation)
-               new_asid(vcpu, svm_data);
+       svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
+       if (svm->vcpu.cpu != cpu ||
+           svm->asid_generation != svm_data->asid_generation)
+               new_asid(svm, svm_data);
 }
 
 
-static inline void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
+static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
 {
        struct vmcb_control_area *control;
 
-       control = &vcpu->svm->vmcb->control;
-       control->int_vector = pop_irq(vcpu);
+       control = &svm->vmcb->control;
+       control->int_vector = irq;
        control->int_ctl &= ~V_INTR_PRIO_MASK;
        control->int_ctl |= V_IRQ_MASK |
                ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
 }
 
-static void kvm_reput_irq(struct kvm_vcpu *vcpu)
+static void svm_intr_assist(struct vcpu_svm *svm)
 {
-       struct vmcb_control_area *control = &vcpu->svm->vmcb->control;
+       struct vmcb *vmcb = svm->vmcb;
+       int intr_vector = -1;
 
-       if (control->int_ctl & V_IRQ_MASK) {
+       if ((vmcb->control.exit_int_info & SVM_EVTINJ_VALID) &&
+           ((vmcb->control.exit_int_info & SVM_EVTINJ_TYPE_MASK) == 0)) {
+               intr_vector = vmcb->control.exit_int_info &
+                             SVM_EVTINJ_VEC_MASK;
+               vmcb->control.exit_int_info = 0;
+               svm_inject_irq(svm, intr_vector);
+               return;
+       }
+
+       if (vmcb->control.int_ctl & V_IRQ_MASK)
+               return;
+
+       if (!kvm_cpu_has_interrupt(&svm->vcpu))
+               return;
+
+       if (!(vmcb->save.rflags & X86_EFLAGS_IF) ||
+           (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) ||
+           (vmcb->control.event_inj & SVM_EVTINJ_VALID)) {
+               /* unable to deliver irq, set pending irq */
+               vmcb->control.intercept |= (1ULL << INTERCEPT_VINTR);
+               svm_inject_irq(svm, 0x0);
+               return;
+       }
+       /* Okay, we can deliver the interrupt: grab it and update PIC state. */
+       intr_vector = kvm_cpu_get_interrupt(&svm->vcpu);
+       svm_inject_irq(svm, intr_vector);
+}
+
+static void kvm_reput_irq(struct vcpu_svm *svm)
+{
+       struct kvm_vcpu *vcpu = &svm->vcpu;
+       struct vmcb_control_area *control = &svm->vmcb->control;
+
+       if ((control->int_ctl & V_IRQ_MASK) && !irqchip_in_kernel(vcpu->kvm)) {
                control->int_ctl &= ~V_IRQ_MASK;
-               push_irq(vcpu, control->int_vector);
+               push_irq(&svm->vcpu, control->int_vector);
        }
 
-       vcpu->interrupt_window_open =
+       svm->vcpu.interrupt_window_open =
                !(control->int_state & SVM_INTERRUPT_SHADOW_MASK);
 }
 
-static void do_interrupt_requests(struct kvm_vcpu *vcpu,
+static void svm_do_inject_vector(struct vcpu_svm *svm)
+{
+       struct kvm_vcpu *vcpu = &svm->vcpu;
+       int word_index = __ffs(vcpu->irq_summary);
+       int bit_index = __ffs(vcpu->irq_pending[word_index]);
+       int irq = word_index * BITS_PER_LONG + bit_index;
+
+       clear_bit(bit_index, &vcpu->irq_pending[word_index]);
+       if (!vcpu->irq_pending[word_index])
+               clear_bit(word_index, &vcpu->irq_summary);
+       svm_inject_irq(svm, irq);
+}
+
+static void do_interrupt_requests(struct vcpu_svm *svm,
                                       struct kvm_run *kvm_run)
 {
-       struct vmcb_control_area *control = &vcpu->svm->vmcb->control;
+       struct vmcb_control_area *control = &svm->vmcb->control;
 
-       vcpu->interrupt_window_open =
+       svm->vcpu.interrupt_window_open =
                (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) &&
-                (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF));
+                (svm->vmcb->save.rflags & X86_EFLAGS_IF));
 
-       if (vcpu->interrupt_window_open && vcpu->irq_summary)
+       if (svm->vcpu.interrupt_window_open && svm->vcpu.irq_summary)
                /*
                 * If interrupts enabled, and not blocked by sti or mov ss. Good.
                 */
-               kvm_do_inject_irq(vcpu);
+               svm_do_inject_vector(svm);
 
        /*
         * Interrupts blocked.  Wait for unblock.
         */
-       if (!vcpu->interrupt_window_open &&
-           (vcpu->irq_summary || kvm_run->request_interrupt_window)) {
+       if (!svm->vcpu.interrupt_window_open &&
+           (svm->vcpu.irq_summary || kvm_run->request_interrupt_window)) {
                control->intercept |= 1ULL << INTERCEPT_VINTR;
        } else
                control->intercept &= ~(1ULL << INTERCEPT_VINTR);
 }
 
-static void post_kvm_run_save(struct kvm_vcpu *vcpu,
+static void post_kvm_run_save(struct vcpu_svm *svm,
                              struct kvm_run *kvm_run)
 {
-       kvm_run->ready_for_interrupt_injection = (vcpu->interrupt_window_open &&
-                                                 vcpu->irq_summary == 0);
-       kvm_run->if_flag = (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF) != 0;
-       kvm_run->cr8 = vcpu->cr8;
-       kvm_run->apic_base = vcpu->apic_base;
+       kvm_run->ready_for_interrupt_injection
+               = (svm->vcpu.interrupt_window_open &&
+                  svm->vcpu.irq_summary == 0);
+       kvm_run->if_flag = (svm->vmcb->save.rflags & X86_EFLAGS_IF) != 0;
+       kvm_run->cr8 = svm->vcpu.cr8;
+       kvm_run->apic_base = svm->vcpu.apic_base;
 }
 
 /*
@@ -1447,13 +1406,13 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu,
  *
  * No need to exit to userspace if we already have an interrupt queued.
  */
-static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
+static int dm_request_for_irq_injection(struct vcpu_svm *svm,
                                          struct kvm_run *kvm_run)
 {
-       return (!vcpu->irq_summary &&
+       return (!svm->vcpu.irq_summary &&
                kvm_run->request_interrupt_window &&
-               vcpu->interrupt_window_open &&
-               (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF));
+               svm->vcpu.interrupt_window_open &&
+               (svm->vmcb->save.rflags & X86_EFLAGS_IF));
 }
 
 static void save_db_regs(unsigned long *db_regs)
@@ -1472,39 +1431,64 @@ static void load_db_regs(unsigned long *db_regs)
        asm volatile ("mov %0, %%dr3" : : "r"(db_regs[3]));
 }
 
+static void svm_flush_tlb(struct kvm_vcpu *vcpu)
+{
+       force_new_asid(vcpu);
+}
+
 static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
+       struct vcpu_svm *svm = to_svm(vcpu);
        u16 fs_selector;
        u16 gs_selector;
        u16 ldt_selector;
        int r;
 
 again:
-       if (!vcpu->mmio_read_completed)
-               do_interrupt_requests(vcpu, kvm_run);
+       r = kvm_mmu_reload(vcpu);
+       if (unlikely(r))
+               return r;
 
        clgi();
 
-       pre_svm_run(vcpu);
+       if (signal_pending(current)) {
+               stgi();
+               ++vcpu->stat.signal_exits;
+               post_kvm_run_save(svm, kvm_run);
+               kvm_run->exit_reason = KVM_EXIT_INTR;
+               return -EINTR;
+       }
+
+       if (irqchip_in_kernel(vcpu->kvm))
+               svm_intr_assist(svm);
+       else if (!vcpu->mmio_read_completed)
+               do_interrupt_requests(svm, kvm_run);
+
+       vcpu->guest_mode = 1;
+       if (vcpu->requests)
+               if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
+                   svm_flush_tlb(vcpu);
+
+       pre_svm_run(svm);
 
        save_host_msrs(vcpu);
        fs_selector = read_fs();
        gs_selector = read_gs();
        ldt_selector = read_ldt();
-       vcpu->svm->host_cr2 = kvm_read_cr2();
-       vcpu->svm->host_dr6 = read_dr6();
-       vcpu->svm->host_dr7 = read_dr7();
-       vcpu->svm->vmcb->save.cr2 = vcpu->cr2;
+       svm->host_cr2 = kvm_read_cr2();
+       svm->host_dr6 = read_dr6();
+       svm->host_dr7 = read_dr7();
+       svm->vmcb->save.cr2 = vcpu->cr2;
 
-       if (vcpu->svm->vmcb->save.dr7 & 0xff) {
+       if (svm->vmcb->save.dr7 & 0xff) {
                write_dr7(0);
-               save_db_regs(vcpu->svm->host_db_regs);
-               load_db_regs(vcpu->svm->db_regs);
+               save_db_regs(svm->host_db_regs);
+               load_db_regs(svm->db_regs);
        }
 
        if (vcpu->fpu_active) {
-               fx_save(vcpu->host_fx_image);
-               fx_restore(vcpu->guest_fx_image);
+               fx_save(&vcpu->host_fx_image);
+               fx_restore(&vcpu->guest_fx_image);
        }
 
        asm volatile (
@@ -1519,34 +1503,33 @@ again:
 #endif
 
 #ifdef CONFIG_X86_64
-               "mov %c[rbx](%[vcpu]), %%rbx \n\t"
-               "mov %c[rcx](%[vcpu]), %%rcx \n\t"
-               "mov %c[rdx](%[vcpu]), %%rdx \n\t"
-               "mov %c[rsi](%[vcpu]), %%rsi \n\t"
-               "mov %c[rdi](%[vcpu]), %%rdi \n\t"
-               "mov %c[rbp](%[vcpu]), %%rbp \n\t"
-               "mov %c[r8](%[vcpu]),  %%r8  \n\t"
-               "mov %c[r9](%[vcpu]),  %%r9  \n\t"
-               "mov %c[r10](%[vcpu]), %%r10 \n\t"
-               "mov %c[r11](%[vcpu]), %%r11 \n\t"
-               "mov %c[r12](%[vcpu]), %%r12 \n\t"
-               "mov %c[r13](%[vcpu]), %%r13 \n\t"
-               "mov %c[r14](%[vcpu]), %%r14 \n\t"
-               "mov %c[r15](%[vcpu]), %%r15 \n\t"
+               "mov %c[rbx](%[svm]), %%rbx \n\t"
+               "mov %c[rcx](%[svm]), %%rcx \n\t"
+               "mov %c[rdx](%[svm]), %%rdx \n\t"
+               "mov %c[rsi](%[svm]), %%rsi \n\t"
+               "mov %c[rdi](%[svm]), %%rdi \n\t"
+               "mov %c[rbp](%[svm]), %%rbp \n\t"
+               "mov %c[r8](%[svm]),  %%r8  \n\t"
+               "mov %c[r9](%[svm]),  %%r9  \n\t"
+               "mov %c[r10](%[svm]), %%r10 \n\t"
+               "mov %c[r11](%[svm]), %%r11 \n\t"
+               "mov %c[r12](%[svm]), %%r12 \n\t"
+               "mov %c[r13](%[svm]), %%r13 \n\t"
+               "mov %c[r14](%[svm]), %%r14 \n\t"
+               "mov %c[r15](%[svm]), %%r15 \n\t"
 #else
-               "mov %c[rbx](%[vcpu]), %%ebx \n\t"
-               "mov %c[rcx](%[vcpu]), %%ecx \n\t"
-               "mov %c[rdx](%[vcpu]), %%edx \n\t"
-               "mov %c[rsi](%[vcpu]), %%esi \n\t"
-               "mov %c[rdi](%[vcpu]), %%edi \n\t"
-               "mov %c[rbp](%[vcpu]), %%ebp \n\t"
+               "mov %c[rbx](%[svm]), %%ebx \n\t"
+               "mov %c[rcx](%[svm]), %%ecx \n\t"
+               "mov %c[rdx](%[svm]), %%edx \n\t"
+               "mov %c[rsi](%[svm]), %%esi \n\t"
+               "mov %c[rdi](%[svm]), %%edi \n\t"
+               "mov %c[rbp](%[svm]), %%ebp \n\t"
 #endif
 
 #ifdef CONFIG_X86_64
                /* Enter guest mode */
                "push %%rax \n\t"
-               "mov %c[svm](%[vcpu]), %%rax \n\t"
-               "mov %c[vmcb](%%rax), %%rax \n\t"
+               "mov %c[vmcb](%[svm]), %%rax \n\t"
                SVM_VMLOAD "\n\t"
                SVM_VMRUN "\n\t"
                SVM_VMSAVE "\n\t"
@@ -1554,8 +1537,7 @@ again:
 #else
                /* Enter guest mode */
                "push %%eax \n\t"
-               "mov %c[svm](%[vcpu]), %%eax \n\t"
-               "mov %c[vmcb](%%eax), %%eax \n\t"
+               "mov %c[vmcb](%[svm]), %%eax \n\t"
                SVM_VMLOAD "\n\t"
                SVM_VMRUN "\n\t"
                SVM_VMSAVE "\n\t"
@@ -1564,71 +1546,72 @@ again:
 
                /* Save guest registers, load host registers */
 #ifdef CONFIG_X86_64
-               "mov %%rbx, %c[rbx](%[vcpu]) \n\t"
-               "mov %%rcx, %c[rcx](%[vcpu]) \n\t"
-               "mov %%rdx, %c[rdx](%[vcpu]) \n\t"
-               "mov %%rsi, %c[rsi](%[vcpu]) \n\t"
-               "mov %%rdi, %c[rdi](%[vcpu]) \n\t"
-               "mov %%rbp, %c[rbp](%[vcpu]) \n\t"
-               "mov %%r8,  %c[r8](%[vcpu]) \n\t"
-               "mov %%r9,  %c[r9](%[vcpu]) \n\t"
-               "mov %%r10, %c[r10](%[vcpu]) \n\t"
-               "mov %%r11, %c[r11](%[vcpu]) \n\t"
-               "mov %%r12, %c[r12](%[vcpu]) \n\t"
-               "mov %%r13, %c[r13](%[vcpu]) \n\t"
-               "mov %%r14, %c[r14](%[vcpu]) \n\t"
-               "mov %%r15, %c[r15](%[vcpu]) \n\t"
+               "mov %%rbx, %c[rbx](%[svm]) \n\t"
+               "mov %%rcx, %c[rcx](%[svm]) \n\t"
+               "mov %%rdx, %c[rdx](%[svm]) \n\t"
+               "mov %%rsi, %c[rsi](%[svm]) \n\t"
+               "mov %%rdi, %c[rdi](%[svm]) \n\t"
+               "mov %%rbp, %c[rbp](%[svm]) \n\t"
+               "mov %%r8,  %c[r8](%[svm]) \n\t"
+               "mov %%r9,  %c[r9](%[svm]) \n\t"
+               "mov %%r10, %c[r10](%[svm]) \n\t"
+               "mov %%r11, %c[r11](%[svm]) \n\t"
+               "mov %%r12, %c[r12](%[svm]) \n\t"
+               "mov %%r13, %c[r13](%[svm]) \n\t"
+               "mov %%r14, %c[r14](%[svm]) \n\t"
+               "mov %%r15, %c[r15](%[svm]) \n\t"
 
                "pop  %%r15; pop  %%r14; pop  %%r13; pop  %%r12;"
                "pop  %%r11; pop  %%r10; pop  %%r9;  pop  %%r8;"
                "pop  %%rbp; pop  %%rdi; pop  %%rsi;"
                "pop  %%rdx; pop  %%rcx; pop  %%rbx; \n\t"
 #else
-               "mov %%ebx, %c[rbx](%[vcpu]) \n\t"
-               "mov %%ecx, %c[rcx](%[vcpu]) \n\t"
-               "mov %%edx, %c[rdx](%[vcpu]) \n\t"
-               "mov %%esi, %c[rsi](%[vcpu]) \n\t"
-               "mov %%edi, %c[rdi](%[vcpu]) \n\t"
-               "mov %%ebp, %c[rbp](%[vcpu]) \n\t"
+               "mov %%ebx, %c[rbx](%[svm]) \n\t"
+               "mov %%ecx, %c[rcx](%[svm]) \n\t"
+               "mov %%edx, %c[rdx](%[svm]) \n\t"
+               "mov %%esi, %c[rsi](%[svm]) \n\t"
+               "mov %%edi, %c[rdi](%[svm]) \n\t"
+               "mov %%ebp, %c[rbp](%[svm]) \n\t"
 
                "pop  %%ebp; pop  %%edi; pop  %%esi;"
                "pop  %%edx; pop  %%ecx; pop  %%ebx; \n\t"
 #endif
                :
-               : [vcpu]"a"(vcpu),
-                 [svm]"i"(offsetof(struct kvm_vcpu, svm)),
+               : [svm]"a"(svm),
                  [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
-                 [rbx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBX])),
-                 [rcx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RCX])),
-                 [rdx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDX])),
-                 [rsi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RSI])),
-                 [rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])),
-                 [rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP]))
+                 [rbx]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RBX])),
+                 [rcx]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RCX])),
+                 [rdx]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RDX])),
+                 [rsi]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RSI])),
+                 [rdi]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RDI])),
+                 [rbp]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RBP]))
 #ifdef CONFIG_X86_64
-                 ,[r8 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8 ])),
-                 [r9 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9 ])),
-                 [r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])),
-                 [r11]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R11])),
-                 [r12]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R12])),
-                 [r13]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R13])),
-                 [r14]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R14])),
-                 [r15]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R15]))
+                 ,[r8 ]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R8])),
+                 [r9 ]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R9 ])),
+                 [r10]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R10])),
+                 [r11]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R11])),
+                 [r12]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R12])),
+                 [r13]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R13])),
+                 [r14]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R14])),
+                 [r15]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R15]))
 #endif
                : "cc", "memory" );
 
+       vcpu->guest_mode = 0;
+
        if (vcpu->fpu_active) {
-               fx_save(vcpu->guest_fx_image);
-               fx_restore(vcpu->host_fx_image);
+               fx_save(&vcpu->guest_fx_image);
+               fx_restore(&vcpu->host_fx_image);
        }
 
-       if ((vcpu->svm->vmcb->save.dr7 & 0xff))
-               load_db_regs(vcpu->svm->host_db_regs);
+       if ((svm->vmcb->save.dr7 & 0xff))
+               load_db_regs(svm->host_db_regs);
 
-       vcpu->cr2 = vcpu->svm->vmcb->save.cr2;
+       vcpu->cr2 = svm->vmcb->save.cr2;
 
-       write_dr6(vcpu->svm->host_dr6);
-       write_dr7(vcpu->svm->host_dr7);
-       kvm_write_cr2(vcpu->svm->host_cr2);
+       write_dr6(svm->host_dr6);
+       write_dr7(svm->host_dr7);
+       kvm_write_cr2(svm->host_cr2);
 
        load_fs(fs_selector);
        load_gs(gs_selector);
@@ -1642,57 +1625,47 @@ again:
         */
        if (unlikely(prof_on == KVM_PROFILING))
                profile_hit(KVM_PROFILING,
-                       (void *)(unsigned long)vcpu->svm->vmcb->save.rip);
+                       (void *)(unsigned long)svm->vmcb->save.rip);
 
        stgi();
 
-       kvm_reput_irq(vcpu);
+       kvm_reput_irq(svm);
 
-       vcpu->svm->next_rip = 0;
+       svm->next_rip = 0;
 
-       if (vcpu->svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
+       if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
                kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
                kvm_run->fail_entry.hardware_entry_failure_reason
-                       = vcpu->svm->vmcb->control.exit_code;
-               post_kvm_run_save(vcpu, kvm_run);
+                       = svm->vmcb->control.exit_code;
+               post_kvm_run_save(svm, kvm_run);
                return 0;
        }
 
-       r = handle_exit(vcpu, kvm_run);
+       r = handle_exit(svm, kvm_run);
        if (r > 0) {
-               if (signal_pending(current)) {
-                       ++vcpu->stat.signal_exits;
-                       post_kvm_run_save(vcpu, kvm_run);
-                       kvm_run->exit_reason = KVM_EXIT_INTR;
-                       return -EINTR;
-               }
-
-               if (dm_request_for_irq_injection(vcpu, kvm_run)) {
+               if (dm_request_for_irq_injection(svm, kvm_run)) {
                        ++vcpu->stat.request_irq_exits;
-                       post_kvm_run_save(vcpu, kvm_run);
+                       post_kvm_run_save(svm, kvm_run);
                        kvm_run->exit_reason = KVM_EXIT_INTR;
                        return -EINTR;
                }
                kvm_resched(vcpu);
                goto again;
        }
-       post_kvm_run_save(vcpu, kvm_run);
+       post_kvm_run_save(svm, kvm_run);
        return r;
 }
 
-static void svm_flush_tlb(struct kvm_vcpu *vcpu)
-{
-       force_new_asid(vcpu);
-}
-
 static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
 {
-       vcpu->svm->vmcb->save.cr3 = root;
+       struct vcpu_svm *svm = to_svm(vcpu);
+
+       svm->vmcb->save.cr3 = root;
        force_new_asid(vcpu);
 
        if (vcpu->fpu_active) {
-               vcpu->svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR);
-               vcpu->svm->vmcb->save.cr0 |= CR0_TS_MASK;
+               svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR);
+               svm->vmcb->save.cr0 |= X86_CR0_TS;
                vcpu->fpu_active = 0;
        }
 }
@@ -1701,31 +1674,38 @@ static void svm_inject_page_fault(struct kvm_vcpu *vcpu,
                                  unsigned long  addr,
                                  uint32_t err_code)
 {
-       uint32_t exit_int_info = vcpu->svm->vmcb->control.exit_int_info;
+       struct vcpu_svm *svm = to_svm(vcpu);
+       uint32_t exit_int_info = svm->vmcb->control.exit_int_info;
 
        ++vcpu->stat.pf_guest;
 
        if (is_page_fault(exit_int_info)) {
 
-               vcpu->svm->vmcb->control.event_inj_err = 0;
-               vcpu->svm->vmcb->control.event_inj =    SVM_EVTINJ_VALID |
-                                                       SVM_EVTINJ_VALID_ERR |
-                                                       SVM_EVTINJ_TYPE_EXEPT |
-                                                       DF_VECTOR;
+               svm->vmcb->control.event_inj_err = 0;
+               svm->vmcb->control.event_inj =  SVM_EVTINJ_VALID |
+                                               SVM_EVTINJ_VALID_ERR |
+                                               SVM_EVTINJ_TYPE_EXEPT |
+                                               DF_VECTOR;
                return;
        }
        vcpu->cr2 = addr;
-       vcpu->svm->vmcb->save.cr2 = addr;
-       vcpu->svm->vmcb->control.event_inj =    SVM_EVTINJ_VALID |
-                                               SVM_EVTINJ_VALID_ERR |
-                                               SVM_EVTINJ_TYPE_EXEPT |
-                                               PF_VECTOR;
-       vcpu->svm->vmcb->control.event_inj_err = err_code;
+       svm->vmcb->save.cr2 = addr;
+       svm->vmcb->control.event_inj =  SVM_EVTINJ_VALID |
+                                       SVM_EVTINJ_VALID_ERR |
+                                       SVM_EVTINJ_TYPE_EXEPT |
+                                       PF_VECTOR;
+       svm->vmcb->control.event_inj_err = err_code;
 }
 
 
 static int is_disabled(void)
 {
+       u64 vm_cr;
+
+       rdmsrl(MSR_VM_CR, vm_cr);
+       if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
+               return 1;
+
        return 0;
 }
 
@@ -1741,11 +1721,17 @@ svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
        hypercall[3] = 0xc3;
 }
 
+static void svm_check_processor_compat(void *rtn)
+{
+       *(int *)rtn = 0;
+}
+
 static struct kvm_arch_ops svm_arch_ops = {
        .cpu_has_kvm_support = has_svm,
        .disabled_by_bios = is_disabled,
        .hardware_setup = svm_hardware_setup,
        .hardware_unsetup = svm_hardware_unsetup,
+       .check_processor_compatibility = svm_check_processor_compat,
        .hardware_enable = svm_hardware_enable,
        .hardware_disable = svm_hardware_disable,
 
@@ -1787,13 +1773,13 @@ static struct kvm_arch_ops svm_arch_ops = {
 
        .run = svm_vcpu_run,
        .skip_emulated_instruction = skip_emulated_instruction,
-       .vcpu_setup = svm_vcpu_setup,
        .patch_hypercall = svm_patch_hypercall,
 };
 
 static int __init svm_init(void)
 {
-       return kvm_init_arch(&svm_arch_ops, THIS_MODULE);
+       return kvm_init_arch(&svm_arch_ops, sizeof(struct vcpu_svm),
+                             THIS_MODULE);
 }
 
 static void __exit svm_exit(void)