OSDN Git Service

KVM: PPC: Move nip/ctr/lr/xer registers to pt_regs in kvm_vcpu_arch
authorSimon Guo <wei.guo.simon@gmail.com>
Mon, 7 May 2018 06:20:08 +0000 (14:20 +0800)
committerPaul Mackerras <paulus@ozlabs.org>
Fri, 18 May 2018 05:38:23 +0000 (15:38 +1000)
This patch moves nip/ctr/lr/xer registers from scattered places in
kvm_vcpu_arch to pt_regs structure.

cr register is "unsigned long" in pt_regs and u32 in vcpu->arch.
It will need more consideration and may move in later patches.

Signed-off-by: Simon Guo <wei.guo.simon@gmail.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
14 files changed:
arch/powerpc/include/asm/kvm_book3s.h
arch/powerpc/include/asm/kvm_book3s_64.h
arch/powerpc/include/asm/kvm_booke.h
arch/powerpc/include/asm/kvm_host.h
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kvm/book3s_32_mmu.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv_tm.c
arch/powerpc/kvm/book3s_hv_tm_builtin.c
arch/powerpc/kvm/book3s_pr.c
arch/powerpc/kvm/booke.c
arch/powerpc/kvm/booke_emulate.c
arch/powerpc/kvm/e500_emulate.c
arch/powerpc/kvm/e500_mmu.c

index e3182f7..20d3d5a 100644 (file)
@@ -295,42 +295,42 @@ static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
 
 static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
 {
-       vcpu->arch.xer = val;
+       vcpu->arch.regs.xer = val;
 }
 
 static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
 {
-       return vcpu->arch.xer;
+       return vcpu->arch.regs.xer;
 }
 
 static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
 {
-       vcpu->arch.ctr = val;
+       vcpu->arch.regs.ctr = val;
 }
 
 static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
 {
-       return vcpu->arch.ctr;
+       return vcpu->arch.regs.ctr;
 }
 
 static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
 {
-       vcpu->arch.lr = val;
+       vcpu->arch.regs.link = val;
 }
 
 static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
 {
-       return vcpu->arch.lr;
+       return vcpu->arch.regs.link;
 }
 
 static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
 {
-       vcpu->arch.pc = val;
+       vcpu->arch.regs.nip = val;
 }
 
 static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
 {
-       return vcpu->arch.pc;
+       return vcpu->arch.regs.nip;
 }
 
 static inline u64 kvmppc_get_msr(struct kvm_vcpu *vcpu);
index 38dbcad..dc435a5 100644 (file)
@@ -483,9 +483,9 @@ static inline u64 sanitize_msr(u64 msr)
 static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu)
 {
        vcpu->arch.cr  = vcpu->arch.cr_tm;
-       vcpu->arch.xer = vcpu->arch.xer_tm;
-       vcpu->arch.lr  = vcpu->arch.lr_tm;
-       vcpu->arch.ctr = vcpu->arch.ctr_tm;
+       vcpu->arch.regs.xer = vcpu->arch.xer_tm;
+       vcpu->arch.regs.link  = vcpu->arch.lr_tm;
+       vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
        vcpu->arch.amr = vcpu->arch.amr_tm;
        vcpu->arch.ppr = vcpu->arch.ppr_tm;
        vcpu->arch.dscr = vcpu->arch.dscr_tm;
@@ -500,9 +500,9 @@ static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu)
 static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu)
 {
        vcpu->arch.cr_tm  = vcpu->arch.cr;
-       vcpu->arch.xer_tm = vcpu->arch.xer;
-       vcpu->arch.lr_tm  = vcpu->arch.lr;
-       vcpu->arch.ctr_tm = vcpu->arch.ctr;
+       vcpu->arch.xer_tm = vcpu->arch.regs.xer;
+       vcpu->arch.lr_tm  = vcpu->arch.regs.link;
+       vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
        vcpu->arch.amr_tm = vcpu->arch.amr;
        vcpu->arch.ppr_tm = vcpu->arch.ppr;
        vcpu->arch.dscr_tm = vcpu->arch.dscr;
index f5fc956..d513e3e 100644 (file)
@@ -56,12 +56,12 @@ static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
 
 static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
 {
-       vcpu->arch.xer = val;
+       vcpu->arch.regs.xer = val;
 }
 
 static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
 {
-       return vcpu->arch.xer;
+       return vcpu->arch.regs.xer;
 }
 
 static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
@@ -72,32 +72,32 @@ static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
 
 static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
 {
-       vcpu->arch.ctr = val;
+       vcpu->arch.regs.ctr = val;
 }
 
 static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
 {
-       return vcpu->arch.ctr;
+       return vcpu->arch.regs.ctr;
 }
 
 static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
 {
-       vcpu->arch.lr = val;
+       vcpu->arch.regs.link = val;
 }
 
 static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
 {
-       return vcpu->arch.lr;
+       return vcpu->arch.regs.link;
 }
 
 static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
 {
-       vcpu->arch.pc = val;
+       vcpu->arch.regs.nip = val;
 }
 
 static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
 {
-       return vcpu->arch.pc;
+       return vcpu->arch.regs.nip;
 }
 
 static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
index a75443a..8b0ee5e 100644 (file)
@@ -521,14 +521,10 @@ struct kvm_vcpu_arch {
        u32 qpr[32];
 #endif
 
-       ulong pc;
-       ulong ctr;
-       ulong lr;
 #ifdef CONFIG_PPC_BOOK3S
        ulong tar;
 #endif
 
-       ulong xer;
        u32 cr;
 
 #ifdef CONFIG_PPC_BOOK3S
index 774c6a8..70a345c 100644 (file)
@@ -431,14 +431,14 @@ int main(void)
 #ifdef CONFIG_ALTIVEC
        OFFSET(VCPU_VRS, kvm_vcpu, arch.vr.vr);
 #endif
-       OFFSET(VCPU_XER, kvm_vcpu, arch.xer);
-       OFFSET(VCPU_CTR, kvm_vcpu, arch.ctr);
-       OFFSET(VCPU_LR, kvm_vcpu, arch.lr);
+       OFFSET(VCPU_XER, kvm_vcpu, arch.regs.xer);
+       OFFSET(VCPU_CTR, kvm_vcpu, arch.regs.ctr);
+       OFFSET(VCPU_LR, kvm_vcpu, arch.regs.link);
 #ifdef CONFIG_PPC_BOOK3S
        OFFSET(VCPU_TAR, kvm_vcpu, arch.tar);
 #endif
        OFFSET(VCPU_CR, kvm_vcpu, arch.cr);
-       OFFSET(VCPU_PC, kvm_vcpu, arch.pc);
+       OFFSET(VCPU_PC, kvm_vcpu, arch.regs.nip);
 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
        OFFSET(VCPU_MSR, kvm_vcpu, arch.shregs.msr);
        OFFSET(VCPU_SRR0, kvm_vcpu, arch.shregs.srr0);
@@ -695,10 +695,10 @@ int main(void)
 
 #else /* CONFIG_PPC_BOOK3S */
        OFFSET(VCPU_CR, kvm_vcpu, arch.cr);
-       OFFSET(VCPU_XER, kvm_vcpu, arch.xer);
-       OFFSET(VCPU_LR, kvm_vcpu, arch.lr);
-       OFFSET(VCPU_CTR, kvm_vcpu, arch.ctr);
-       OFFSET(VCPU_PC, kvm_vcpu, arch.pc);
+       OFFSET(VCPU_XER, kvm_vcpu, arch.regs.xer);
+       OFFSET(VCPU_LR, kvm_vcpu, arch.regs.link);
+       OFFSET(VCPU_CTR, kvm_vcpu, arch.regs.ctr);
+       OFFSET(VCPU_PC, kvm_vcpu, arch.regs.nip);
        OFFSET(VCPU_SPRG9, kvm_vcpu, arch.sprg9);
        OFFSET(VCPU_LAST_INST, kvm_vcpu, arch.last_inst);
        OFFSET(VCPU_FAULT_DEAR, kvm_vcpu, arch.fault_dear);
index 1992676..45c8ea4 100644 (file)
@@ -52,7 +52,7 @@
 static inline bool check_debug_ip(struct kvm_vcpu *vcpu)
 {
 #ifdef DEBUG_MMU_PTE_IP
-       return vcpu->arch.pc == DEBUG_MMU_PTE_IP;
+       return vcpu->arch.regs.nip == DEBUG_MMU_PTE_IP;
 #else
        return true;
 #endif
index f61dd9e..336e346 100644 (file)
@@ -397,13 +397,13 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
 
        pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id);
        pr_err("pc  = %.16lx  msr = %.16llx  trap = %x\n",
-              vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap);
+              vcpu->arch.regs.nip, vcpu->arch.shregs.msr, vcpu->arch.trap);
        for (r = 0; r < 16; ++r)
                pr_err("r%2d = %.16lx  r%d = %.16lx\n",
                       r, kvmppc_get_gpr(vcpu, r),
                       r+16, kvmppc_get_gpr(vcpu, r+16));
        pr_err("ctr = %.16lx  lr  = %.16lx\n",
-              vcpu->arch.ctr, vcpu->arch.lr);
+              vcpu->arch.regs.ctr, vcpu->arch.regs.link);
        pr_err("srr0 = %.16llx srr1 = %.16llx\n",
               vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1);
        pr_err("sprg0 = %.16llx sprg1 = %.16llx\n",
@@ -411,7 +411,7 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
        pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
               vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
        pr_err("cr = %.8x  xer = %.16lx  dsisr = %.8x\n",
-              vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr);
+              vcpu->arch.cr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr);
        pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
        pr_err("fault dar = %.16lx dsisr = %.8x\n",
               vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
index bf710ad..0082850 100644 (file)
@@ -19,7 +19,7 @@ static void emulate_tx_failure(struct kvm_vcpu *vcpu, u64 failure_cause)
        u64 texasr, tfiar;
        u64 msr = vcpu->arch.shregs.msr;
 
-       tfiar = vcpu->arch.pc & ~0x3ull;
+       tfiar = vcpu->arch.regs.nip & ~0x3ull;
        texasr = (failure_cause << 56) | TEXASR_ABORT | TEXASR_FS | TEXASR_EXACT;
        if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr))
                texasr |= TEXASR_SUSP;
@@ -57,8 +57,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
                               (newmsr & MSR_TM)));
                newmsr = sanitize_msr(newmsr);
                vcpu->arch.shregs.msr = newmsr;
-               vcpu->arch.cfar = vcpu->arch.pc - 4;
-               vcpu->arch.pc = vcpu->arch.shregs.srr0;
+               vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
+               vcpu->arch.regs.nip = vcpu->arch.shregs.srr0;
                return RESUME_GUEST;
 
        case PPC_INST_RFEBB:
@@ -90,8 +90,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
                vcpu->arch.bescr = bescr;
                msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
                vcpu->arch.shregs.msr = msr;
-               vcpu->arch.cfar = vcpu->arch.pc - 4;
-               vcpu->arch.pc = vcpu->arch.ebbrr;
+               vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
+               vcpu->arch.regs.nip = vcpu->arch.ebbrr;
                return RESUME_GUEST;
 
        case PPC_INST_MTMSRD:
index d98ccfd..b2c7c6f 100644 (file)
@@ -35,8 +35,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
                        return 0;
                newmsr = sanitize_msr(newmsr);
                vcpu->arch.shregs.msr = newmsr;
-               vcpu->arch.cfar = vcpu->arch.pc - 4;
-               vcpu->arch.pc = vcpu->arch.shregs.srr0;
+               vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
+               vcpu->arch.regs.nip = vcpu->arch.shregs.srr0;
                return 1;
 
        case PPC_INST_RFEBB:
@@ -58,8 +58,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
                mtspr(SPRN_BESCR, bescr);
                msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
                vcpu->arch.shregs.msr = msr;
-               vcpu->arch.cfar = vcpu->arch.pc - 4;
-               vcpu->arch.pc = mfspr(SPRN_EBBRR);
+               vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
+               vcpu->arch.regs.nip = mfspr(SPRN_EBBRR);
                return 1;
 
        case PPC_INST_MTMSRD:
@@ -103,7 +103,7 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
 void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu)
 {
        vcpu->arch.shregs.msr &= ~MSR_TS_MASK;  /* go to N state */
-       vcpu->arch.pc = vcpu->arch.tfhar;
+       vcpu->arch.regs.nip = vcpu->arch.tfhar;
        copy_from_checkpoint(vcpu);
        vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 0xa0000000;
 }
index 899bc9a..67061d3 100644 (file)
@@ -162,10 +162,10 @@ void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu)
        svcpu->gpr[12] = vcpu->arch.regs.gpr[12];
        svcpu->gpr[13] = vcpu->arch.regs.gpr[13];
        svcpu->cr  = vcpu->arch.cr;
-       svcpu->xer = vcpu->arch.xer;
-       svcpu->ctr = vcpu->arch.ctr;
-       svcpu->lr  = vcpu->arch.lr;
-       svcpu->pc  = vcpu->arch.pc;
+       svcpu->xer = vcpu->arch.regs.xer;
+       svcpu->ctr = vcpu->arch.regs.ctr;
+       svcpu->lr  = vcpu->arch.regs.link;
+       svcpu->pc  = vcpu->arch.regs.nip;
 #ifdef CONFIG_PPC_BOOK3S_64
        svcpu->shadow_fscr = vcpu->arch.shadow_fscr;
 #endif
@@ -209,10 +209,10 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu)
        vcpu->arch.regs.gpr[12] = svcpu->gpr[12];
        vcpu->arch.regs.gpr[13] = svcpu->gpr[13];
        vcpu->arch.cr  = svcpu->cr;
-       vcpu->arch.xer = svcpu->xer;
-       vcpu->arch.ctr = svcpu->ctr;
-       vcpu->arch.lr  = svcpu->lr;
-       vcpu->arch.pc  = svcpu->pc;
+       vcpu->arch.regs.xer = svcpu->xer;
+       vcpu->arch.regs.ctr = svcpu->ctr;
+       vcpu->arch.regs.link  = svcpu->lr;
+       vcpu->arch.regs.nip  = svcpu->pc;
        vcpu->arch.shadow_srr1 = svcpu->shadow_srr1;
        vcpu->arch.fault_dar   = svcpu->fault_dar;
        vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
index 876d4f2..a9ca016 100644 (file)
@@ -77,8 +77,10 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
 {
        int i;
 
-       printk("pc:   %08lx msr:  %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr);
-       printk("lr:   %08lx ctr:  %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
+       printk("pc:   %08lx msr:  %08llx\n", vcpu->arch.regs.nip,
+                       vcpu->arch.shared->msr);
+       printk("lr:   %08lx ctr:  %08lx\n", vcpu->arch.regs.link,
+                       vcpu->arch.regs.ctr);
        printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
                                            vcpu->arch.shared->srr1);
 
@@ -491,24 +493,25 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
        if (allowed) {
                switch (int_class) {
                case INT_CLASS_NONCRIT:
-                       set_guest_srr(vcpu, vcpu->arch.pc,
+                       set_guest_srr(vcpu, vcpu->arch.regs.nip,
                                      vcpu->arch.shared->msr);
                        break;
                case INT_CLASS_CRIT:
-                       set_guest_csrr(vcpu, vcpu->arch.pc,
+                       set_guest_csrr(vcpu, vcpu->arch.regs.nip,
                                       vcpu->arch.shared->msr);
                        break;
                case INT_CLASS_DBG:
-                       set_guest_dsrr(vcpu, vcpu->arch.pc,
+                       set_guest_dsrr(vcpu, vcpu->arch.regs.nip,
                                       vcpu->arch.shared->msr);
                        break;
                case INT_CLASS_MC:
-                       set_guest_mcsrr(vcpu, vcpu->arch.pc,
+                       set_guest_mcsrr(vcpu, vcpu->arch.regs.nip,
                                        vcpu->arch.shared->msr);
                        break;
                }
 
-               vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
+               vcpu->arch.regs.nip = vcpu->arch.ivpr |
+                                       vcpu->arch.ivor[priority];
                if (update_esr == true)
                        kvmppc_set_esr(vcpu, vcpu->arch.queued_esr);
                if (update_dear == true)
@@ -826,7 +829,7 @@ static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
 
        case EMULATE_FAIL:
                printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
-                      __func__, vcpu->arch.pc, vcpu->arch.last_inst);
+                      __func__, vcpu->arch.regs.nip, vcpu->arch.last_inst);
                /* For debugging, encode the failing instruction and
                 * report it to userspace. */
                run->hw.hardware_exit_reason = ~0ULL << 32;
@@ -875,7 +878,7 @@ static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu)
         */
        vcpu->arch.dbsr = 0;
        run->debug.arch.status = 0;
-       run->debug.arch.address = vcpu->arch.pc;
+       run->debug.arch.address = vcpu->arch.regs.nip;
 
        if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) {
                run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT;
@@ -971,7 +974,7 @@ static int kvmppc_resume_inst_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
 
        case EMULATE_FAIL:
                pr_debug("%s: load instruction from guest address %lx failed\n",
-                      __func__, vcpu->arch.pc);
+                      __func__, vcpu->arch.regs.nip);
                /* For debugging, encode the failing instruction and
                 * report it to userspace. */
                run->hw.hardware_exit_reason = ~0ULL << 32;
@@ -1169,7 +1172,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
        case BOOKE_INTERRUPT_SPE_FP_DATA:
        case BOOKE_INTERRUPT_SPE_FP_ROUND:
                printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
-                      __func__, exit_nr, vcpu->arch.pc);
+                      __func__, exit_nr, vcpu->arch.regs.nip);
                run->hw.hardware_exit_reason = exit_nr;
                r = RESUME_HOST;
                break;
@@ -1299,7 +1302,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
        }
 
        case BOOKE_INTERRUPT_ITLB_MISS: {
-               unsigned long eaddr = vcpu->arch.pc;
+               unsigned long eaddr = vcpu->arch.regs.nip;
                gpa_t gpaddr;
                gfn_t gfn;
                int gtlb_index;
@@ -1391,7 +1394,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
        int i;
        int r;
 
-       vcpu->arch.pc = 0;
+       vcpu->arch.regs.nip = 0;
        vcpu->arch.shared->pir = vcpu->vcpu_id;
        kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
        kvmppc_set_msr(vcpu, 0);
@@ -1440,10 +1443,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 
        vcpu_load(vcpu);
 
-       regs->pc = vcpu->arch.pc;
+       regs->pc = vcpu->arch.regs.nip;
        regs->cr = kvmppc_get_cr(vcpu);
-       regs->ctr = vcpu->arch.ctr;
-       regs->lr = vcpu->arch.lr;
+       regs->ctr = vcpu->arch.regs.ctr;
+       regs->lr = vcpu->arch.regs.link;
        regs->xer = kvmppc_get_xer(vcpu);
        regs->msr = vcpu->arch.shared->msr;
        regs->srr0 = kvmppc_get_srr0(vcpu);
@@ -1471,10 +1474,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 
        vcpu_load(vcpu);
 
-       vcpu->arch.pc = regs->pc;
+       vcpu->arch.regs.nip = regs->pc;
        kvmppc_set_cr(vcpu, regs->cr);
-       vcpu->arch.ctr = regs->ctr;
-       vcpu->arch.lr = regs->lr;
+       vcpu->arch.regs.ctr = regs->ctr;
+       vcpu->arch.regs.link = regs->lr;
        kvmppc_set_xer(vcpu, regs->xer);
        kvmppc_set_msr(vcpu, regs->msr);
        kvmppc_set_srr0(vcpu, regs->srr0);
index a82f645..d23e582 100644 (file)
 
 static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu)
 {
-       vcpu->arch.pc = vcpu->arch.shared->srr0;
+       vcpu->arch.regs.nip = vcpu->arch.shared->srr0;
        kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1);
 }
 
 static void kvmppc_emul_rfdi(struct kvm_vcpu *vcpu)
 {
-       vcpu->arch.pc = vcpu->arch.dsrr0;
+       vcpu->arch.regs.nip = vcpu->arch.dsrr0;
        kvmppc_set_msr(vcpu, vcpu->arch.dsrr1);
 }
 
 static void kvmppc_emul_rfci(struct kvm_vcpu *vcpu)
 {
-       vcpu->arch.pc = vcpu->arch.csrr0;
+       vcpu->arch.regs.nip = vcpu->arch.csrr0;
        kvmppc_set_msr(vcpu, vcpu->arch.csrr1);
 }
 
index 8f871fb..3f8189e 100644 (file)
@@ -94,7 +94,7 @@ static int kvmppc_e500_emul_ehpriv(struct kvm_run *run, struct kvm_vcpu *vcpu,
        switch (get_oc(inst)) {
        case EHPRIV_OC_DEBUG:
                run->exit_reason = KVM_EXIT_DEBUG;
-               run->debug.arch.address = vcpu->arch.pc;
+               run->debug.arch.address = vcpu->arch.regs.nip;
                run->debug.arch.status = 0;
                kvmppc_account_exit(vcpu, DEBUG_EXITS);
                emulated = EMULATE_EXIT_USER;
index ddbf8f0..24296f4 100644 (file)
@@ -513,7 +513,7 @@ void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu)
 {
        unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
 
-       kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as);
+       kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.regs.nip, as);
 }
 
 void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)