2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
13 * Derived from book3s_rmhandlers.S and other files, which are:
15 * Copyright SUSE Linux Products GmbH 2009
17 * Authors: Alexander Graf <agraf@suse.de>
20 #include <asm/ppc_asm.h>
21 #include <asm/kvm_asm.h>
25 #include <asm/ptrace.h>
26 #include <asm/hvcall.h>
27 #include <asm/asm-offsets.h>
28 #include <asm/exception-64s.h>
29 #include <asm/kvm_book3s_asm.h>
30 #include <asm/mmu-hash64.h>
33 #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
35 /* Values in HSTATE_NAPPING(r13) */
36 #define NAPPING_CEDE 1
37 #define NAPPING_NOVCPU 2
40 * Call kvmppc_hv_entry in real mode.
41 * Must be called with interrupts hard-disabled.
45 * LR = return address to continue at after eventually re-enabling MMU
47 _GLOBAL_TOC(kvmppc_hv_entry_trampoline)
49 std r0, PPC_LR_STKOFF(r1)
52 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
57 mtmsrd r0,1 /* clear RI in MSR */
63 ld r4, HSTATE_KVM_VCPU(r13)
66 /* Back from guest - restore host state and return to caller */
69 /* Restore host DABR and DABRX */
70 ld r5,HSTATE_DABR(r13)
74 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
77 ld r3,PACA_SPRG_VDSO(r13)
78 mtspr SPRN_SPRG_VDSO_WRITE,r3
80 /* Reload the host's PMU registers */
81 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
82 lbz r4, LPPACA_PMCINUSE(r3)
84 beq 23f /* skip if not */
86 ld r3, HSTATE_MMCR0(r13)
87 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
90 END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
91 lwz r3, HSTATE_PMC1(r13)
92 lwz r4, HSTATE_PMC2(r13)
93 lwz r5, HSTATE_PMC3(r13)
94 lwz r6, HSTATE_PMC4(r13)
95 lwz r8, HSTATE_PMC5(r13)
96 lwz r9, HSTATE_PMC6(r13)
103 ld r3, HSTATE_MMCR0(r13)
104 ld r4, HSTATE_MMCR1(r13)
105 ld r5, HSTATE_MMCRA(r13)
106 ld r6, HSTATE_SIAR(r13)
107 ld r7, HSTATE_SDAR(r13)
113 ld r8, HSTATE_MMCR2(r13)
114 ld r9, HSTATE_SIER(r13)
117 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
123 * Reload DEC. HDEC interrupts were disabled when
124 * we reloaded the host's LPCR value.
126 ld r3, HSTATE_DECEXP(r13)
131 /* hwthread_req may have got set by cede or no vcpu, so clear it */
133 stb r0, HSTATE_HWTHREAD_REQ(r13)
136 * For external and machine check interrupts, we need
137 * to call the Linux handler to process the interrupt.
138 * We do that by jumping to absolute address 0x500 for
139 * external interrupts, or the machine_check_fwnmi label
140 * for machine checks (since firmware might have patched
141 * the vector area at 0x200). The [h]rfid at the end of the
142 * handler will return to the book3s_hv_interrupts.S code.
143 * For other interrupts we do the rfid to get back
144 * to the book3s_hv_interrupts.S code here.
146 ld r8, 112+PPC_LR_STKOFF(r1)
148 ld r7, HSTATE_HOST_MSR(r13)
150 cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
151 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
153 cmpwi cr2, r12, BOOK3S_INTERRUPT_HMI
154 beq cr2, 14f /* HMI check */
156 /* RFI into the highmem handler, or branch to interrupt handler */
160 mtmsrd r6, 1 /* Clear RI in MSR */
163 beq cr1, 13f /* machine check */
166 /* On POWER7, we have external interrupts set to use HSRR0/1 */
167 11: mtspr SPRN_HSRR0, r8
171 13: b machine_check_fwnmi
173 14: mtspr SPRN_HSRR0, r8
175 b hmi_exception_after_realmode
177 kvmppc_primary_no_guest:
178 /* We handle this much like a ceded vcpu */
179 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
183 * Make sure the primary has finished the MMU switch.
184 * We should never get here on a secondary thread, but
185 * check it for robustness' sake.
187 ld r5, HSTATE_KVM_VCORE(r13)
188 65: lbz r0, VCORE_IN_GUEST(r5)
195 /* set our bit in napping_threads */
196 ld r5, HSTATE_KVM_VCORE(r13)
197 lbz r7, HSTATE_PTID(r13)
200 addi r6, r5, VCORE_NAPPING_THREADS
205 /* order napping_threads update vs testing entry_exit_map */
208 lwz r7, VCORE_ENTRY_EXIT(r5)
210 bge kvm_novcpu_exit /* another thread already exiting */
211 li r3, NAPPING_NOVCPU
212 stb r3, HSTATE_NAPPING(r13)
214 li r3, 0 /* Don't wake on privileged (OS) doorbell */
218 ld r1, HSTATE_HOST_R1(r13)
219 ld r5, HSTATE_KVM_VCORE(r13)
221 stb r0, HSTATE_NAPPING(r13)
223 /* check the wake reason */
224 bl kvmppc_check_wake_reason
226 /* see if any other thread is already exiting */
227 lwz r0, VCORE_ENTRY_EXIT(r5)
231 /* clear our bit in napping_threads */
232 lbz r7, HSTATE_PTID(r13)
235 addi r6, r5, VCORE_NAPPING_THREADS
241 /* See if the wake reason means we need to exit */
245 /* See if our timeslice has expired (HDEC is negative) */
247 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
251 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
252 ld r4, HSTATE_KVM_VCPU(r13)
254 beq kvmppc_primary_no_guest
256 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
257 addi r3, r4, VCPU_TB_RMENTRY
258 bl kvmhv_start_timing
263 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
264 ld r4, HSTATE_KVM_VCPU(r13)
267 addi r3, r4, VCPU_TB_RMEXIT
268 bl kvmhv_accumulate_time
272 bl kvmhv_commence_exit
275 b kvmhv_switch_to_host
278 * We come in here when wakened from nap mode.
279 * Relocation is off and most register values are lost.
280 * r13 points to the PACA.
282 .globl kvm_start_guest
285 /* Set runlatch bit the minute you wake up from nap */
292 li r0,KVM_HWTHREAD_IN_KVM
293 stb r0,HSTATE_HWTHREAD_STATE(r13)
295 /* NV GPR values from power7_idle() will no longer be valid */
297 stb r0,PACA_NAPSTATELOST(r13)
299 /* were we napping due to cede? */
300 lbz r0,HSTATE_NAPPING(r13)
301 cmpwi r0,NAPPING_CEDE
303 cmpwi r0,NAPPING_NOVCPU
304 beq kvm_novcpu_wakeup
306 ld r1,PACAEMERGSP(r13)
307 subi r1,r1,STACK_FRAME_OVERHEAD
310 * We weren't napping due to cede, so this must be a secondary
311 * thread being woken up to run a guest, or being woken up due
312 * to a stray IPI. (Or due to some machine check or hypervisor
313 * maintenance interrupt while the core is in KVM.)
316 /* Check the wake reason in SRR1 to see why we got here */
317 bl kvmppc_check_wake_reason
321 /* get vcore pointer, NULL if we have nothing to run */
322 ld r5,HSTATE_KVM_VCORE(r13)
324 /* if we have no vcore to run, go back to sleep */
327 kvm_secondary_got_guest:
329 /* Set HSTATE_DSCR(r13) to something sensible */
330 ld r6, PACA_DSCR_DEFAULT(r13)
331 std r6, HSTATE_DSCR(r13)
333 /* On thread 0 of a subcore, set HDEC to max */
334 lbz r4, HSTATE_PTID(r13)
340 /* and set per-LPAR registers, if doing dynamic micro-threading */
341 ld r6, HSTATE_SPLIT_MODE(r13)
344 ld r0, KVM_SPLIT_RPR(r6)
346 ld r0, KVM_SPLIT_PMMAR(r6)
348 ld r0, KVM_SPLIT_LDBAR(r6)
352 /* Order load of vcpu after load of vcore */
354 ld r4, HSTATE_KVM_VCPU(r13)
357 /* Back from the guest, go back to nap */
358 /* Clear our vcpu and vcore pointers so we don't come back in early */
360 std r0, HSTATE_KVM_VCPU(r13)
362 * Once we clear HSTATE_KVM_VCORE(r13), the code in
363 * kvmppc_run_core() is going to assume that all our vcpu
364 * state is visible in memory. This lwsync makes sure
368 std r0, HSTATE_KVM_VCORE(r13)
371 * At this point we have finished executing in the guest.
372 * We need to wait for hwthread_req to become zero, since
373 * we may not turn on the MMU while hwthread_req is non-zero.
374 * While waiting we also need to check if we get given a vcpu to run.
377 lbz r3, HSTATE_HWTHREAD_REQ(r13)
381 li r0, KVM_HWTHREAD_IN_KERNEL
382 stb r0, HSTATE_HWTHREAD_STATE(r13)
383 /* need to recheck hwthread_req after a barrier, to avoid race */
385 lbz r3, HSTATE_HWTHREAD_REQ(r13)
389 * We jump to power7_wakeup_loss, which will return to the caller
390 * of power7_nap in the powernv cpu offline loop. The value we
391 * put in r3 becomes the return value for power7_nap.
395 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
401 ld r5, HSTATE_KVM_VCORE(r13)
404 ld r3, HSTATE_SPLIT_MODE(r13)
407 lbz r0, KVM_SPLIT_DO_NAP(r3)
413 b kvm_secondary_got_guest
415 54: li r0, KVM_HWTHREAD_IN_KVM
416 stb r0, HSTATE_HWTHREAD_STATE(r13)
420 * Here the primary thread is trying to return the core to
421 * whole-core mode, so we need to nap.
425 * Ensure that secondary doesn't nap when it has
426 * its vcore pointer set.
428 sync /* matches smp_mb() before setting split_info.do_nap */
429 ld r0, HSTATE_KVM_VCORE(r13)
432 /* clear any pending message */
434 lis r6, (PPC_DBELL_SERVER << (63-36))@h
436 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
437 /* Set kvm_split_mode.napped[tid] = 1 */
438 ld r3, HSTATE_SPLIT_MODE(r13)
440 lhz r4, PACAPACAINDEX(r13)
441 clrldi r4, r4, 61 /* micro-threading => P8 => 8 threads/core */
442 addi r4, r4, KVM_SPLIT_NAPPED
444 /* Check the do_nap flag again after setting napped[] */
446 lbz r0, KVM_SPLIT_DO_NAP(r3)
449 li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4
451 rlwimi r4, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
454 std r0, HSTATE_SCRATCH0(r13)
456 ld r0, HSTATE_SCRATCH0(r13)
466 /******************************************************************************
470 *****************************************************************************/
472 .global kvmppc_hv_entry
477 * R4 = vcpu pointer (or NULL)
482 * all other volatile GPRS = free
485 std r0, PPC_LR_STKOFF(r1)
488 /* Save R1 in the PACA */
489 std r1, HSTATE_HOST_R1(r13)
491 li r6, KVM_GUEST_MODE_HOST_HV
492 stb r6, HSTATE_IN_GUEST(r13)
494 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
495 /* Store initial timestamp */
498 addi r3, r4, VCPU_TB_RMENTRY
499 bl kvmhv_start_timing
509 * POWER7/POWER8 host -> guest partition switch code.
510 * We don't have to lock against concurrent tlbies,
511 * but we do have to coordinate across hardware threads.
513 /* Set bit in entry map iff exit map is zero. */
514 ld r5, HSTATE_KVM_VCORE(r13)
516 lbz r6, HSTATE_PTID(r13)
518 addi r9, r5, VCORE_ENTRY_EXIT
520 cmpwi r3, 0x100 /* any threads starting to exit? */
521 bge secondary_too_late /* if so we're too late to the party */
526 /* Primary thread switches to guest partition. */
527 ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
532 li r0,LPID_RSVD /* switch to reserved LPID */
535 mtspr SPRN_SDR1,r6 /* switch to partition page table */
539 /* See if we need to flush the TLB */
540 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
541 clrldi r7,r6,64-6 /* extract bit number (6 bits) */
542 srdi r6,r6,6 /* doubleword number */
543 sldi r6,r6,3 /* address offset */
545 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
551 23: ldarx r7,0,r6 /* if set, clear the bit */
555 /* Flush the TLB of any entries for this LPID */
556 /* use arch 2.07S as a proxy for POWER8 */
558 li r6,512 /* POWER8 has 512 sets */
560 li r6,128 /* POWER7 has 128 sets */
561 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
563 li r7,0x800 /* IS field = 0b10 */
570 /* Add timebase offset onto timebase */
571 22: ld r8,VCORE_TB_OFFSET(r5)
574 mftb r6 /* current host timebase */
576 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
577 mftb r7 /* check if lower 24 bits overflowed */
582 addis r8,r8,0x100 /* if so, increment upper 40 bits */
585 /* Load guest PCR value to select appropriate compat mode */
586 37: ld r7, VCORE_PCR(r5)
593 /* DPDES is shared between threads */
594 ld r8, VCORE_DPDES(r5)
596 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
599 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
601 /* Do we have a guest vcpu to run? */
603 beq kvmppc_primary_no_guest
606 /* Load up guest SLB entries */
607 lwz r5,VCPU_SLB_MAX(r4)
612 1: ld r8,VCPU_SLB_E(r6)
615 addi r6,r6,VCPU_SLB_SIZE
618 /* Increment yield count if they have a VPA */
622 li r6, LPPACA_YIELDCOUNT
627 stb r6, VCPU_VPA_DIRTY(r4)
630 /* Save purr/spurr */
633 std r5,HSTATE_PURR(r13)
634 std r6,HSTATE_SPURR(r13)
641 /* Set partition DABR */
642 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
643 lwz r5,VCPU_DABRX(r4)
648 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
650 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
653 END_FTR_SECTION_IFCLR(CPU_FTR_TM)
655 /* Turn on TM/FP/VSX/VMX so we can restore them. */
661 oris r5, r5, (MSR_VEC | MSR_VSX)@h
665 * The user may change these outside of a transaction, so they must
666 * always be context switched.
668 ld r5, VCPU_TFHAR(r4)
669 ld r6, VCPU_TFIAR(r4)
670 ld r7, VCPU_TEXASR(r4)
673 mtspr SPRN_TEXASR, r7
676 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
677 beq skip_tm /* TM not active in guest */
679 /* Make sure the failure summary is set, otherwise we'll program check
680 * when we trechkpt. It's possible that this might have been not set
681 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
684 oris r7, r7, (TEXASR_FS)@h
685 mtspr SPRN_TEXASR, r7
688 * We need to load up the checkpointed state for the guest.
689 * We need to do this early as it will blow away any GPRs, VSRs and
694 addi r3, r31, VCPU_FPRS_TM
696 addi r3, r31, VCPU_VRS_TM
699 lwz r7, VCPU_VRSAVE_TM(r4)
700 mtspr SPRN_VRSAVE, r7
702 ld r5, VCPU_LR_TM(r4)
703 lwz r6, VCPU_CR_TM(r4)
704 ld r7, VCPU_CTR_TM(r4)
705 ld r8, VCPU_AMR_TM(r4)
706 ld r9, VCPU_TAR_TM(r4)
714 * Load up PPR and DSCR values but don't put them in the actual SPRs
715 * till the last moment to avoid running with userspace PPR and DSCR for
718 ld r29, VCPU_DSCR_TM(r4)
719 ld r30, VCPU_PPR_TM(r4)
721 std r2, PACATMSCRATCH(r13) /* Save TOC */
723 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
727 /* Load GPRs r0-r28 */
730 ld reg, VCPU_GPRS_TM(reg)(r31)
737 /* Load final GPRs */
738 ld 29, VCPU_GPRS_TM(29)(r31)
739 ld 30, VCPU_GPRS_TM(30)(r31)
740 ld 31, VCPU_GPRS_TM(31)(r31)
742 /* TM checkpointed state is now setup. All GPRs are now volatile. */
745 /* Now let's get back the state we need. */
748 ld r29, HSTATE_DSCR(r13)
750 ld r4, HSTATE_KVM_VCPU(r13)
751 ld r1, HSTATE_HOST_R1(r13)
752 ld r2, PACATMSCRATCH(r13)
754 /* Set the MSR RI since we have our registers back. */
760 /* Load guest PMU registers */
761 /* R4 is live here (vcpu pointer) */
763 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
764 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
768 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
771 END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
772 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
773 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
774 lwz r6, VCPU_PMC + 8(r4)
775 lwz r7, VCPU_PMC + 12(r4)
776 lwz r8, VCPU_PMC + 16(r4)
777 lwz r9, VCPU_PMC + 20(r4)
785 ld r5, VCPU_MMCR + 8(r4)
786 ld r6, VCPU_MMCR + 16(r4)
794 ld r5, VCPU_MMCR + 24(r4)
796 lwz r7, VCPU_PMC + 24(r4)
797 lwz r8, VCPU_PMC + 28(r4)
798 ld r9, VCPU_MMCR + 32(r4)
804 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
808 /* Load up FP, VMX and VSX registers */
811 ld r14, VCPU_GPR(R14)(r4)
812 ld r15, VCPU_GPR(R15)(r4)
813 ld r16, VCPU_GPR(R16)(r4)
814 ld r17, VCPU_GPR(R17)(r4)
815 ld r18, VCPU_GPR(R18)(r4)
816 ld r19, VCPU_GPR(R19)(r4)
817 ld r20, VCPU_GPR(R20)(r4)
818 ld r21, VCPU_GPR(R21)(r4)
819 ld r22, VCPU_GPR(R22)(r4)
820 ld r23, VCPU_GPR(R23)(r4)
821 ld r24, VCPU_GPR(R24)(r4)
822 ld r25, VCPU_GPR(R25)(r4)
823 ld r26, VCPU_GPR(R26)(r4)
824 ld r27, VCPU_GPR(R27)(r4)
825 ld r28, VCPU_GPR(R28)(r4)
826 ld r29, VCPU_GPR(R29)(r4)
827 ld r30, VCPU_GPR(R30)(r4)
828 ld r31, VCPU_GPR(R31)(r4)
830 /* Switch DSCR to guest value */
835 /* Skip next section on POWER7 */
837 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
838 /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
841 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
844 /* Load up POWER8-specific registers */
846 lwz r6, VCPU_PSPB(r4)
852 ld r6, VCPU_DAWRX(r4)
853 ld r7, VCPU_CIABR(r4)
863 ld r8, VCPU_EBBHR(r4)
865 ld r5, VCPU_EBBRR(r4)
866 ld r6, VCPU_BESCR(r4)
867 ld r7, VCPU_CSIGR(r4)
873 ld r5, VCPU_TCSCR(r4)
875 lwz r7, VCPU_GUEST_PID(r4)
884 * Set the decrementer to the guest decrementer.
886 ld r8,VCPU_DEC_EXPIRES(r4)
887 /* r8 is a host timebase value here, convert to guest TB */
888 ld r5,HSTATE_KVM_VCORE(r13)
889 ld r6,VCORE_TB_OFFSET(r5)
896 ld r5, VCPU_SPRG0(r4)
897 ld r6, VCPU_SPRG1(r4)
898 ld r7, VCPU_SPRG2(r4)
899 ld r8, VCPU_SPRG3(r4)
905 /* Load up DAR and DSISR */
907 lwz r6, VCPU_DSISR(r4)
911 /* Restore AMR and UAMOR, set AMOR to all 1s */
919 /* Restore state of CTRL run bit; assume 1 on entry */
927 /* Secondary threads wait for primary to have done partition switch */
928 ld r5, HSTATE_KVM_VCORE(r13)
929 lbz r6, HSTATE_PTID(r13)
932 lbz r0, VCORE_IN_GUEST(r5)
936 20: lwz r3, VCORE_ENTRY_EXIT(r5)
939 lbz r0, VCORE_IN_GUEST(r5)
949 /* Check if HDEC expires soon */
951 cmpwi r3, 512 /* 1 microsecond */
960 kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
968 deliver_guest_interrupt:
969 /* r11 = vcpu->arch.msr & ~MSR_HV */
970 rldicl r11, r11, 63 - MSR_HV_LG, 1
971 rotldi r11, r11, 1 + MSR_HV_LG
974 /* Check if we can deliver an external or decrementer interrupt now */
975 ld r0, VCPU_PENDING_EXC(r4)
976 rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
978 andi. r8, r11, MSR_EE
980 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
981 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
985 li r0, BOOK3S_INTERRUPT_EXTERNAL
989 li r0, BOOK3S_INTERRUPT_DECREMENTER
992 12: mtspr SPRN_SRR0, r10
996 bl kvmppc_msr_interrupt
1002 * R10: value for HSRR0
1003 * R11: value for HSRR1
1008 stb r0,VCPU_CEDED(r4) /* cancel cede */
1009 mtspr SPRN_HSRR0,r10
1010 mtspr SPRN_HSRR1,r11
1012 /* Activate guest mode, so faults get handled by KVM */
1013 li r9, KVM_GUEST_MODE_GUEST_HV
1014 stb r9, HSTATE_IN_GUEST(r13)
1016 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1017 /* Accumulate timing */
1018 addi r3, r4, VCPU_TB_GUEST
1019 bl kvmhv_accumulate_time
1025 ld r5, VCPU_CFAR(r4)
1027 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1030 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1037 ld r1, VCPU_GPR(R1)(r4)
1038 ld r2, VCPU_GPR(R2)(r4)
1039 ld r3, VCPU_GPR(R3)(r4)
1040 ld r5, VCPU_GPR(R5)(r4)
1041 ld r6, VCPU_GPR(R6)(r4)
1042 ld r7, VCPU_GPR(R7)(r4)
1043 ld r8, VCPU_GPR(R8)(r4)
1044 ld r9, VCPU_GPR(R9)(r4)
1045 ld r10, VCPU_GPR(R10)(r4)
1046 ld r11, VCPU_GPR(R11)(r4)
1047 ld r12, VCPU_GPR(R12)(r4)
1048 ld r13, VCPU_GPR(R13)(r4)
1052 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1053 ld r0, VCPU_GPR(R0)(r4)
1054 ld r4, VCPU_GPR(R4)(r4)
1063 stw r12, VCPU_TRAP(r4)
1064 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1065 addi r3, r4, VCPU_TB_RMEXIT
1066 bl kvmhv_accumulate_time
1068 11: b kvmhv_switch_to_host
1075 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
1076 12: stw r12, VCPU_TRAP(r4)
1078 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1079 addi r3, r4, VCPU_TB_RMEXIT
1080 bl kvmhv_accumulate_time
1084 /******************************************************************************
1088 *****************************************************************************/
1091 * We come here from the first-level interrupt handlers.
1093 .globl kvmppc_interrupt_hv
1094 kvmppc_interrupt_hv:
1096 * Register contents:
1097 * R12 = interrupt vector
1099 * guest CR, R12 saved in shadow VCPU SCRATCH1/0
1100 * guest R13 saved in SPRN_SCRATCH0
1102 std r9, HSTATE_SCRATCH2(r13)
1104 lbz r9, HSTATE_IN_GUEST(r13)
1105 cmpwi r9, KVM_GUEST_MODE_HOST_HV
1106 beq kvmppc_bad_host_intr
1107 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1108 cmpwi r9, KVM_GUEST_MODE_GUEST
1109 ld r9, HSTATE_SCRATCH2(r13)
1110 beq kvmppc_interrupt_pr
1112 /* We're now back in the host but in guest MMU context */
1113 li r9, KVM_GUEST_MODE_HOST_HV
1114 stb r9, HSTATE_IN_GUEST(r13)
1116 ld r9, HSTATE_KVM_VCPU(r13)
1118 /* Save registers */
1120 std r0, VCPU_GPR(R0)(r9)
1121 std r1, VCPU_GPR(R1)(r9)
1122 std r2, VCPU_GPR(R2)(r9)
1123 std r3, VCPU_GPR(R3)(r9)
1124 std r4, VCPU_GPR(R4)(r9)
1125 std r5, VCPU_GPR(R5)(r9)
1126 std r6, VCPU_GPR(R6)(r9)
1127 std r7, VCPU_GPR(R7)(r9)
1128 std r8, VCPU_GPR(R8)(r9)
1129 ld r0, HSTATE_SCRATCH2(r13)
1130 std r0, VCPU_GPR(R9)(r9)
1131 std r10, VCPU_GPR(R10)(r9)
1132 std r11, VCPU_GPR(R11)(r9)
1133 ld r3, HSTATE_SCRATCH0(r13)
1134 lwz r4, HSTATE_SCRATCH1(r13)
1135 std r3, VCPU_GPR(R12)(r9)
1138 ld r3, HSTATE_CFAR(r13)
1139 std r3, VCPU_CFAR(r9)
1140 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1142 ld r4, HSTATE_PPR(r13)
1143 std r4, VCPU_PPR(r9)
1144 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1146 /* Restore R1/R2 so we can handle faults */
1147 ld r1, HSTATE_HOST_R1(r13)
1150 mfspr r10, SPRN_SRR0
1151 mfspr r11, SPRN_SRR1
1152 std r10, VCPU_SRR0(r9)
1153 std r11, VCPU_SRR1(r9)
1154 andi. r0, r12, 2 /* need to read HSRR0/1? */
1156 mfspr r10, SPRN_HSRR0
1157 mfspr r11, SPRN_HSRR1
1159 1: std r10, VCPU_PC(r9)
1160 std r11, VCPU_MSR(r9)
1164 std r3, VCPU_GPR(R13)(r9)
1167 stw r12,VCPU_TRAP(r9)
1169 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1170 addi r3, r9, VCPU_TB_RMINTR
1172 bl kvmhv_accumulate_time
1173 ld r5, VCPU_GPR(R5)(r9)
1174 ld r6, VCPU_GPR(R6)(r9)
1175 ld r7, VCPU_GPR(R7)(r9)
1176 ld r8, VCPU_GPR(R8)(r9)
1179 /* Save HEIR (HV emulation assist reg) in emul_inst
1180 if this is an HEI (HV emulation interrupt, e40) */
1181 li r3,KVM_INST_FETCH_FAILED
1182 stw r3,VCPU_LAST_INST(r9)
1183 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1186 11: stw r3,VCPU_HEIR(r9)
1188 /* these are volatile across C function calls */
1191 std r3, VCPU_CTR(r9)
1192 std r4, VCPU_XER(r9)
1194 /* If this is a page table miss then see if it's theirs or ours */
1195 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1197 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1200 /* See if this is a leftover HDEC interrupt */
1201 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1206 bge fast_guest_return
1208 /* See if this is an hcall we can handle in real mode */
1209 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
1210 beq hcall_try_real_mode
1212 /* Hypervisor doorbell - exit only if host IPI flag set */
1213 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
1215 lbz r0, HSTATE_HOST_IPI(r13)
1220 /* External interrupt ? */
1221 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
1222 bne+ guest_exit_cont
1224 /* External interrupt, first check for host_ipi. If this is
1225 * set, we know the host wants us out so let's do it now
1231 /* Check if any CPU is heading out to the host, if so head out too */
1232 4: ld r5, HSTATE_KVM_VCORE(r13)
1233 lwz r0, VCORE_ENTRY_EXIT(r5)
1236 blt deliver_guest_interrupt
1238 guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
1239 /* Save more register state */
1242 std r6, VCPU_DAR(r9)
1243 stw r7, VCPU_DSISR(r9)
1244 /* don't overwrite fault_dar/fault_dsisr if HDSI */
1245 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
1247 std r6, VCPU_FAULT_DAR(r9)
1248 stw r7, VCPU_FAULT_DSISR(r9)
1250 /* See if it is a machine check */
1251 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1252 beq machine_check_realmode
1254 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1255 addi r3, r9, VCPU_TB_RMEXIT
1257 bl kvmhv_accumulate_time
1260 /* Increment exit count, poke other threads to exit */
1261 bl kvmhv_commence_exit
1263 ld r9, HSTATE_KVM_VCPU(r13)
1264 lwz r12, VCPU_TRAP(r9)
1266 /* Stop others sending VCPU interrupts to this physical CPU */
1268 stw r0, VCPU_CPU(r9)
1269 stw r0, VCPU_THREAD_CPU(r9)
1271 /* Save guest CTRL register, set runlatch to 1 */
1273 stw r6,VCPU_CTRL(r9)
1279 /* Read the guest SLB and save it away */
1280 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
1286 andis. r0,r8,SLB_ESID_V@h
1288 add r8,r8,r6 /* put index in */
1290 std r8,VCPU_SLB_E(r7)
1291 std r3,VCPU_SLB_V(r7)
1292 addi r7,r7,VCPU_SLB_SIZE
1296 stw r5,VCPU_SLB_MAX(r9)
1299 * Save the guest PURR/SPURR
1304 ld r8,VCPU_SPURR(r9)
1305 std r5,VCPU_PURR(r9)
1306 std r6,VCPU_SPURR(r9)
1311 * Restore host PURR/SPURR and add guest times
1312 * so that the time in the guest gets accounted.
1314 ld r3,HSTATE_PURR(r13)
1315 ld r4,HSTATE_SPURR(r13)
1326 /* r5 is a guest timebase value here, convert to host TB */
1327 ld r3,HSTATE_KVM_VCORE(r13)
1328 ld r4,VCORE_TB_OFFSET(r3)
1330 std r5,VCPU_DEC_EXPIRES(r9)
1334 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1335 /* Save POWER8-specific registers */
1339 std r5, VCPU_IAMR(r9)
1340 stw r6, VCPU_PSPB(r9)
1341 std r7, VCPU_FSCR(r9)
1346 std r6, VCPU_VTB(r9)
1347 std r7, VCPU_TAR(r9)
1348 mfspr r8, SPRN_EBBHR
1349 std r8, VCPU_EBBHR(r9)
1350 mfspr r5, SPRN_EBBRR
1351 mfspr r6, SPRN_BESCR
1352 mfspr r7, SPRN_CSIGR
1354 std r5, VCPU_EBBRR(r9)
1355 std r6, VCPU_BESCR(r9)
1356 std r7, VCPU_CSIGR(r9)
1357 std r8, VCPU_TACR(r9)
1358 mfspr r5, SPRN_TCSCR
1362 std r5, VCPU_TCSCR(r9)
1363 std r6, VCPU_ACOP(r9)
1364 stw r7, VCPU_GUEST_PID(r9)
1365 std r8, VCPU_WORT(r9)
1368 /* Save and reset AMR and UAMOR before turning on the MMU */
1372 std r6,VCPU_UAMOR(r9)
1376 /* Switch DSCR back to host value */
1378 ld r7, HSTATE_DSCR(r13)
1379 std r8, VCPU_DSCR(r9)
1382 /* Save non-volatile GPRs */
1383 std r14, VCPU_GPR(R14)(r9)
1384 std r15, VCPU_GPR(R15)(r9)
1385 std r16, VCPU_GPR(R16)(r9)
1386 std r17, VCPU_GPR(R17)(r9)
1387 std r18, VCPU_GPR(R18)(r9)
1388 std r19, VCPU_GPR(R19)(r9)
1389 std r20, VCPU_GPR(R20)(r9)
1390 std r21, VCPU_GPR(R21)(r9)
1391 std r22, VCPU_GPR(R22)(r9)
1392 std r23, VCPU_GPR(R23)(r9)
1393 std r24, VCPU_GPR(R24)(r9)
1394 std r25, VCPU_GPR(R25)(r9)
1395 std r26, VCPU_GPR(R26)(r9)
1396 std r27, VCPU_GPR(R27)(r9)
1397 std r28, VCPU_GPR(R28)(r9)
1398 std r29, VCPU_GPR(R29)(r9)
1399 std r30, VCPU_GPR(R30)(r9)
1400 std r31, VCPU_GPR(R31)(r9)
1403 mfspr r3, SPRN_SPRG0
1404 mfspr r4, SPRN_SPRG1
1405 mfspr r5, SPRN_SPRG2
1406 mfspr r6, SPRN_SPRG3
1407 std r3, VCPU_SPRG0(r9)
1408 std r4, VCPU_SPRG1(r9)
1409 std r5, VCPU_SPRG2(r9)
1410 std r6, VCPU_SPRG3(r9)
1416 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1419 END_FTR_SECTION_IFCLR(CPU_FTR_TM)
1423 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
1427 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
1428 beq 1f /* TM not active in guest. */
1430 li r3, TM_CAUSE_KVM_RESCHED
1432 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
1436 /* All GPRs are volatile at this point. */
1439 /* Temporarily store r13 and r9 so we have some regs to play with */
1442 std r9, PACATMSCRATCH(r13)
1443 ld r9, HSTATE_KVM_VCPU(r13)
1445 /* Get a few more GPRs free. */
1446 std r29, VCPU_GPRS_TM(29)(r9)
1447 std r30, VCPU_GPRS_TM(30)(r9)
1448 std r31, VCPU_GPRS_TM(31)(r9)
1450 /* Save away PPR and DSCR soon so don't run with user values. */
1453 mfspr r30, SPRN_DSCR
1454 ld r29, HSTATE_DSCR(r13)
1455 mtspr SPRN_DSCR, r29
1457 /* Save all but r9, r13 & r29-r31 */
1460 .if (reg != 9) && (reg != 13)
1461 std reg, VCPU_GPRS_TM(reg)(r9)
1465 /* ... now save r13 */
1467 std r4, VCPU_GPRS_TM(13)(r9)
1468 /* ... and save r9 */
1469 ld r4, PACATMSCRATCH(r13)
1470 std r4, VCPU_GPRS_TM(9)(r9)
1472 /* Reload stack pointer and TOC. */
1473 ld r1, HSTATE_HOST_R1(r13)
1476 /* Set MSR RI now we have r1 and r13 back. */
1480 /* Save away checkpinted SPRs. */
1481 std r31, VCPU_PPR_TM(r9)
1482 std r30, VCPU_DSCR_TM(r9)
1488 std r5, VCPU_LR_TM(r9)
1489 stw r6, VCPU_CR_TM(r9)
1490 std r7, VCPU_CTR_TM(r9)
1491 std r8, VCPU_AMR_TM(r9)
1492 std r10, VCPU_TAR_TM(r9)
1494 /* Restore r12 as trap number. */
1495 lwz r12, VCPU_TRAP(r9)
1498 addi r3, r9, VCPU_FPRS_TM
1500 addi r3, r9, VCPU_VRS_TM
1502 mfspr r6, SPRN_VRSAVE
1503 stw r6, VCPU_VRSAVE_TM(r9)
1506 * We need to save these SPRs after the treclaim so that the software
1507 * error code is recorded correctly in the TEXASR. Also the user may
1508 * change these outside of a transaction, so they must always be
1511 mfspr r5, SPRN_TFHAR
1512 mfspr r6, SPRN_TFIAR
1513 mfspr r7, SPRN_TEXASR
1514 std r5, VCPU_TFHAR(r9)
1515 std r6, VCPU_TFIAR(r9)
1516 std r7, VCPU_TEXASR(r9)
1520 /* Increment yield count if they have a VPA */
1521 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1524 li r4, LPPACA_YIELDCOUNT
1529 stb r3, VCPU_VPA_DIRTY(r9)
1531 /* Save PMU registers if requested */
1532 /* r8 and cr0.eq are live here */
1535 * POWER8 seems to have a hardware bug where setting
1536 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
1537 * when some counters are already negative doesn't seem
1538 * to cause a performance monitor alert (and hence interrupt).
1539 * The effect of this is that when saving the PMU state,
1540 * if there is no PMU alert pending when we read MMCR0
1541 * before freezing the counters, but one becomes pending
1542 * before we read the counters, we lose it.
1543 * To work around this, we need a way to freeze the counters
1544 * before reading MMCR0. Normally, freezing the counters
1545 * is done by writing MMCR0 (to set MMCR0[FC]) which
1546 * unavoidably writes MMCR0[PMA0] as well. On POWER8,
1547 * we can also freeze the counters using MMCR2, by writing
1548 * 1s to all the counter freeze condition bits (there are
1549 * 9 bits each for 6 counters).
1551 li r3, -1 /* set all freeze bits */
1553 mfspr r10, SPRN_MMCR2
1554 mtspr SPRN_MMCR2, r3
1556 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1558 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1559 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1560 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
1561 mfspr r6, SPRN_MMCRA
1562 /* Clear MMCRA in order to disable SDAR updates */
1564 mtspr SPRN_MMCRA, r7
1566 beq 21f /* if no VPA, save PMU stuff anyway */
1567 lbz r7, LPPACA_PMCINUSE(r8)
1568 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1570 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1572 21: mfspr r5, SPRN_MMCR1
1575 std r4, VCPU_MMCR(r9)
1576 std r5, VCPU_MMCR + 8(r9)
1577 std r6, VCPU_MMCR + 16(r9)
1579 std r10, VCPU_MMCR + 24(r9)
1580 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1581 std r7, VCPU_SIAR(r9)
1582 std r8, VCPU_SDAR(r9)
1589 stw r3, VCPU_PMC(r9)
1590 stw r4, VCPU_PMC + 4(r9)
1591 stw r5, VCPU_PMC + 8(r9)
1592 stw r6, VCPU_PMC + 12(r9)
1593 stw r7, VCPU_PMC + 16(r9)
1594 stw r8, VCPU_PMC + 20(r9)
1597 mfspr r6, SPRN_SPMC1
1598 mfspr r7, SPRN_SPMC2
1599 mfspr r8, SPRN_MMCRS
1600 std r5, VCPU_SIER(r9)
1601 stw r6, VCPU_PMC + 24(r9)
1602 stw r7, VCPU_PMC + 28(r9)
1603 std r8, VCPU_MMCR + 32(r9)
1605 mtspr SPRN_MMCRS, r4
1606 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1615 * POWER7/POWER8 guest -> host partition switch code.
1616 * We don't have to lock against tlbies but we do
1617 * have to coordinate the hardware threads.
1619 kvmhv_switch_to_host:
1620 /* Secondary threads wait for primary to do partition switch */
1621 ld r5,HSTATE_KVM_VCORE(r13)
1622 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1623 lbz r3,HSTATE_PTID(r13)
1627 13: lbz r3,VCORE_IN_GUEST(r5)
1633 /* Primary thread waits for all the secondaries to exit guest */
1634 15: lwz r3,VCORE_ENTRY_EXIT(r5)
1635 rlwinm r0,r3,32-8,0xff
1641 /* Did we actually switch to the guest at all? */
1642 lbz r6, VCORE_IN_GUEST(r5)
1646 /* Primary thread switches back to host partition */
1647 ld r6,KVM_HOST_SDR1(r4)
1648 lwz r7,KVM_HOST_LPID(r4)
1649 li r8,LPID_RSVD /* switch to reserved LPID */
1652 mtspr SPRN_SDR1,r6 /* switch to partition page table */
1657 /* DPDES is shared between threads */
1658 mfspr r7, SPRN_DPDES
1659 std r7, VCORE_DPDES(r5)
1660 /* clear DPDES so we don't get guest doorbells in the host */
1662 mtspr SPRN_DPDES, r8
1663 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1665 /* Subtract timebase offset from timebase */
1666 ld r8,VCORE_TB_OFFSET(r5)
1669 mftb r6 /* current guest timebase */
1671 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
1672 mftb r7 /* check if lower 24 bits overflowed */
1677 addis r8,r8,0x100 /* if so, increment upper 40 bits */
1681 17: ld r0, VCORE_PCR(r5)
1687 /* Signal secondary CPUs to continue */
1688 stb r0,VCORE_IN_GUEST(r5)
1689 19: lis r8,0x7fff /* MAX_INT@h */
1692 16: ld r8,KVM_HOST_LPCR(r4)
1696 /* load host SLB entries */
1697 ld r8,PACA_SLBSHADOWPTR(r13)
1699 .rept SLB_NUM_BOLTED
1700 li r3, SLBSHADOW_SAVEAREA
1704 andis. r7,r5,SLB_ESID_V@h
1710 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1711 /* Finish timing, if we have a vcpu */
1712 ld r4, HSTATE_KVM_VCPU(r13)
1716 bl kvmhv_accumulate_time
1719 /* Unset guest mode */
1720 li r0, KVM_GUEST_MODE_NONE
1721 stb r0, HSTATE_IN_GUEST(r13)
1723 ld r0, 112+PPC_LR_STKOFF(r1)
1729 * Check whether an HDSI is an HPTE not found fault or something else.
1730 * If it is an HPTE not found fault that is due to the guest accessing
1731 * a page that they have mapped but which we have paged out, then
1732 * we continue on with the guest exit path. In all other cases,
1733 * reflect the HDSI to the guest as a DSI.
1737 mfspr r6, SPRN_HDSISR
1738 /* HPTE not found fault or protection fault? */
1739 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
1740 beq 1f /* if not, send it to the guest */
1741 andi. r0, r11, MSR_DR /* data relocation enabled? */
1744 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
1745 bne 1f /* if no SLB entry found */
1746 4: std r4, VCPU_FAULT_DAR(r9)
1747 stw r6, VCPU_FAULT_DSISR(r9)
1749 /* Search the hash table. */
1750 mr r3, r9 /* vcpu pointer */
1751 li r7, 1 /* data fault */
1752 bl kvmppc_hpte_hv_fault
1753 ld r9, HSTATE_KVM_VCPU(r13)
1755 ld r11, VCPU_MSR(r9)
1756 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1757 cmpdi r3, 0 /* retry the instruction */
1759 cmpdi r3, -1 /* handle in kernel mode */
1761 cmpdi r3, -2 /* MMIO emulation; need instr word */
1764 /* Synthesize a DSI for the guest */
1765 ld r4, VCPU_FAULT_DAR(r9)
1767 1: mtspr SPRN_DAR, r4
1768 mtspr SPRN_DSISR, r6
1769 mtspr SPRN_SRR0, r10
1770 mtspr SPRN_SRR1, r11
1771 li r10, BOOK3S_INTERRUPT_DATA_STORAGE
1772 bl kvmppc_msr_interrupt
1773 fast_interrupt_c_return:
1774 6: ld r7, VCPU_CTR(r9)
1781 3: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
1782 ld r5, KVM_VRMA_SLB_V(r5)
1785 /* If this is for emulated MMIO, load the instruction word */
1786 2: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
1788 /* Set guest mode to 'jump over instruction' so if lwz faults
1789 * we'll just continue at the next IP. */
1790 li r0, KVM_GUEST_MODE_SKIP
1791 stb r0, HSTATE_IN_GUEST(r13)
1793 /* Do the access with MSR:DR enabled */
1795 ori r4, r3, MSR_DR /* Enable paging for data */
1800 /* Store the result */
1801 stw r8, VCPU_LAST_INST(r9)
1803 /* Unset guest mode. */
1804 li r0, KVM_GUEST_MODE_HOST_HV
1805 stb r0, HSTATE_IN_GUEST(r13)
1809 * Similarly for an HISI, reflect it to the guest as an ISI unless
1810 * it is an HPTE not found fault for a page that we have paged out.
1813 andis. r0, r11, SRR1_ISI_NOPT@h
1815 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
1818 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
1819 bne 1f /* if no SLB entry found */
1821 /* Search the hash table. */
1822 mr r3, r9 /* vcpu pointer */
1825 li r7, 0 /* instruction fault */
1826 bl kvmppc_hpte_hv_fault
1827 ld r9, HSTATE_KVM_VCPU(r13)
1829 ld r11, VCPU_MSR(r9)
1830 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1831 cmpdi r3, 0 /* retry the instruction */
1832 beq fast_interrupt_c_return
1833 cmpdi r3, -1 /* handle in kernel mode */
1836 /* Synthesize an ISI for the guest */
1838 1: mtspr SPRN_SRR0, r10
1839 mtspr SPRN_SRR1, r11
1840 li r10, BOOK3S_INTERRUPT_INST_STORAGE
1841 bl kvmppc_msr_interrupt
1842 b fast_interrupt_c_return
1844 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
1845 ld r5, KVM_VRMA_SLB_V(r6)
1849 * Try to handle an hcall in real mode.
1850 * Returns to the guest if we handle it, or continues on up to
1851 * the kernel if we can't (i.e. if we don't have a handler for
1852 * it, or if the handler returns H_TOO_HARD).
1854 * r5 - r8 contain hcall args,
1855 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca
1857 hcall_try_real_mode:
1858 ld r3,VCPU_GPR(R3)(r9)
1860 /* sc 1 from userspace - reflect to guest syscall */
1861 bne sc_1_fast_return
1863 cmpldi r3,hcall_real_table_end - hcall_real_table
1865 /* See if this hcall is enabled for in-kernel handling */
1867 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */
1868 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */
1870 ld r0, KVM_ENABLED_HCALLS(r4)
1871 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */
1875 /* Get pointer to handler, if any, and call it */
1876 LOAD_REG_ADDR(r4, hcall_real_table)
1882 mr r3,r9 /* get vcpu pointer */
1883 ld r4,VCPU_GPR(R4)(r9)
1886 beq hcall_real_fallback
1887 ld r4,HSTATE_KVM_VCPU(r13)
1888 std r3,VCPU_GPR(R3)(r4)
1896 li r10, BOOK3S_INTERRUPT_SYSCALL
1897 bl kvmppc_msr_interrupt
1901 /* We've attempted a real mode hcall, but it's punted it back
1902 * to userspace. We need to restore some clobbered volatiles
1903 * before resuming the pass-it-to-qemu path */
1904 hcall_real_fallback:
1905 li r12,BOOK3S_INTERRUPT_SYSCALL
1906 ld r9, HSTATE_KVM_VCPU(r13)
1910 .globl hcall_real_table
1912 .long 0 /* 0 - unused */
1913 .long DOTSYM(kvmppc_h_remove) - hcall_real_table
1914 .long DOTSYM(kvmppc_h_enter) - hcall_real_table
1915 .long DOTSYM(kvmppc_h_read) - hcall_real_table
1916 .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table
1917 .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table
1918 .long DOTSYM(kvmppc_h_protect) - hcall_real_table
1919 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table
1920 .long DOTSYM(kvmppc_h_put_tce) - hcall_real_table
1921 .long 0 /* 0x24 - H_SET_SPRG0 */
1922 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
1937 #ifdef CONFIG_KVM_XICS
1938 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
1939 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
1940 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
1941 .long 0 /* 0x70 - H_IPOLL */
1942 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
1944 .long 0 /* 0x64 - H_EOI */
1945 .long 0 /* 0x68 - H_CPPR */
1946 .long 0 /* 0x6c - H_IPI */
1947 .long 0 /* 0x70 - H_IPOLL */
1948 .long 0 /* 0x74 - H_XIRR */
1976 .long DOTSYM(kvmppc_h_cede) - hcall_real_table
1977 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table
1993 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
1997 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
2112 .long DOTSYM(kvmppc_h_random) - hcall_real_table
2113 .globl hcall_real_table_end
2114 hcall_real_table_end:
2116 _GLOBAL(kvmppc_h_set_xdabr)
2117 andi. r0, r5, DABRX_USER | DABRX_KERNEL
2119 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
2122 6: li r3, H_PARAMETER
2125 _GLOBAL(kvmppc_h_set_dabr)
2126 li r5, DABRX_USER | DABRX_KERNEL
2130 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2131 std r4,VCPU_DABR(r3)
2132 stw r5, VCPU_DABRX(r3)
2133 mtspr SPRN_DABRX, r5
2134 /* Work around P7 bug where DABR can get corrupted on mtspr */
2135 1: mtspr SPRN_DABR,r4
2143 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
2144 2: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
2145 rlwimi r5, r4, 1, DAWRX_WT
2147 std r4, VCPU_DAWR(r3)
2148 std r5, VCPU_DAWRX(r3)
2150 mtspr SPRN_DAWRX, r5
2154 _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
2156 std r11,VCPU_MSR(r3)
2158 stb r0,VCPU_CEDED(r3)
2159 sync /* order setting ceded vs. testing prodded */
2160 lbz r5,VCPU_PRODDED(r3)
2162 bne kvm_cede_prodded
2163 li r12,0 /* set trap to 0 to say hcall is handled */
2164 stw r12,VCPU_TRAP(r3)
2166 std r0,VCPU_GPR(R3)(r3)
2169 * Set our bit in the bitmask of napping threads unless all the
2170 * other threads are already napping, in which case we send this
2173 ld r5,HSTATE_KVM_VCORE(r13)
2174 lbz r6,HSTATE_PTID(r13)
2175 lwz r8,VCORE_ENTRY_EXIT(r5)
2179 addi r6,r5,VCORE_NAPPING_THREADS
2186 /* order napping_threads update vs testing entry_exit_map */
2189 stb r0,HSTATE_NAPPING(r13)
2190 lwz r7,VCORE_ENTRY_EXIT(r5)
2192 bge 33f /* another thread already exiting */
2195 * Although not specifically required by the architecture, POWER7
2196 * preserves the following registers in nap mode, even if an SMT mode
2197 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
2198 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
2200 /* Save non-volatile GPRs */
2201 std r14, VCPU_GPR(R14)(r3)
2202 std r15, VCPU_GPR(R15)(r3)
2203 std r16, VCPU_GPR(R16)(r3)
2204 std r17, VCPU_GPR(R17)(r3)
2205 std r18, VCPU_GPR(R18)(r3)
2206 std r19, VCPU_GPR(R19)(r3)
2207 std r20, VCPU_GPR(R20)(r3)
2208 std r21, VCPU_GPR(R21)(r3)
2209 std r22, VCPU_GPR(R22)(r3)
2210 std r23, VCPU_GPR(R23)(r3)
2211 std r24, VCPU_GPR(R24)(r3)
2212 std r25, VCPU_GPR(R25)(r3)
2213 std r26, VCPU_GPR(R26)(r3)
2214 std r27, VCPU_GPR(R27)(r3)
2215 std r28, VCPU_GPR(R28)(r3)
2216 std r29, VCPU_GPR(R29)(r3)
2217 std r30, VCPU_GPR(R30)(r3)
2218 std r31, VCPU_GPR(R31)(r3)
2224 * Set DEC to the smaller of DEC and HDEC, so that we wake
2225 * no later than the end of our timeslice (HDEC interrupts
2226 * don't wake us from nap).
2235 /* save expiry time of guest decrementer */
2238 ld r4, HSTATE_KVM_VCPU(r13)
2239 ld r5, HSTATE_KVM_VCORE(r13)
2240 ld r6, VCORE_TB_OFFSET(r5)
2241 subf r3, r6, r3 /* convert to host TB value */
2242 std r3, VCPU_DEC_EXPIRES(r4)
2244 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2245 ld r4, HSTATE_KVM_VCPU(r13)
2246 addi r3, r4, VCPU_TB_CEDE
2247 bl kvmhv_accumulate_time
2250 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */
2253 * Take a nap until a decrementer or external or doobell interrupt
2254 * occurs, with PECE1 and PECE0 set in LPCR.
2255 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP.
2256 * Also clear the runlatch bit before napping.
2259 mfspr r0, SPRN_CTRLF
2261 mtspr SPRN_CTRLT, r0
2264 stb r0,HSTATE_HWTHREAD_REQ(r13)
2266 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
2268 ori r5, r5, LPCR_PECEDH
2269 rlwimi r5, r3, 0, LPCR_PECEDP
2270 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2274 std r0, HSTATE_SCRATCH0(r13)
2276 ld r0, HSTATE_SCRATCH0(r13)
2288 /* get vcpu pointer */
2289 ld r4, HSTATE_KVM_VCPU(r13)
2291 /* Woken by external or decrementer interrupt */
2292 ld r1, HSTATE_HOST_R1(r13)
2294 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2295 addi r3, r4, VCPU_TB_RMINTR
2296 bl kvmhv_accumulate_time
2299 /* load up FP state */
2302 /* Restore guest decrementer */
2303 ld r3, VCPU_DEC_EXPIRES(r4)
2304 ld r5, HSTATE_KVM_VCORE(r13)
2305 ld r6, VCORE_TB_OFFSET(r5)
2306 add r3, r3, r6 /* convert host TB to guest TB value */
2312 ld r14, VCPU_GPR(R14)(r4)
2313 ld r15, VCPU_GPR(R15)(r4)
2314 ld r16, VCPU_GPR(R16)(r4)
2315 ld r17, VCPU_GPR(R17)(r4)
2316 ld r18, VCPU_GPR(R18)(r4)
2317 ld r19, VCPU_GPR(R19)(r4)
2318 ld r20, VCPU_GPR(R20)(r4)
2319 ld r21, VCPU_GPR(R21)(r4)
2320 ld r22, VCPU_GPR(R22)(r4)
2321 ld r23, VCPU_GPR(R23)(r4)
2322 ld r24, VCPU_GPR(R24)(r4)
2323 ld r25, VCPU_GPR(R25)(r4)
2324 ld r26, VCPU_GPR(R26)(r4)
2325 ld r27, VCPU_GPR(R27)(r4)
2326 ld r28, VCPU_GPR(R28)(r4)
2327 ld r29, VCPU_GPR(R29)(r4)
2328 ld r30, VCPU_GPR(R30)(r4)
2329 ld r31, VCPU_GPR(R31)(r4)
2331 /* Check the wake reason in SRR1 to see why we got here */
2332 bl kvmppc_check_wake_reason
2334 /* clear our bit in vcore->napping_threads */
2335 34: ld r5,HSTATE_KVM_VCORE(r13)
2336 lbz r7,HSTATE_PTID(r13)
2339 addi r6,r5,VCORE_NAPPING_THREADS
2345 stb r0,HSTATE_NAPPING(r13)
2347 /* See if the wake reason means we need to exit */
2348 stw r12, VCPU_TRAP(r4)
2353 /* see if any other thread is already exiting */
2354 lwz r0,VCORE_ENTRY_EXIT(r5)
2358 b kvmppc_cede_reentry /* if not go back to guest */
2360 /* cede when already previously prodded case */
2363 stb r0,VCPU_PRODDED(r3)
2364 sync /* order testing prodded vs. clearing ceded */
2365 stb r0,VCPU_CEDED(r3)
2369 /* we've ceded but we want to give control to the host */
2371 ld r9, HSTATE_KVM_VCPU(r13)
2374 /* Try to handle a machine check in real mode */
2375 machine_check_realmode:
2376 mr r3, r9 /* get vcpu pointer */
2377 bl kvmppc_realmode_machine_check
2379 cmpdi r3, 0 /* Did we handle MCE ? */
2380 ld r9, HSTATE_KVM_VCPU(r13)
2381 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
2383 * Deliver unhandled/fatal (e.g. UE) MCE errors to guest through
2384 * machine check interrupt (set HSRR0 to 0x200). And for handled
2385 * errors (no-fatal), just go back to guest execution with current
2386 * HSRR0 instead of exiting guest. This new approach will inject
2387 * machine check to guest for fatal error causing guest to crash.
2389 * The old code used to return to host for unhandled errors which
2390 * was causing guest to hang with soft lockups inside guest and
2391 * makes it difficult to recover guest instance.
2394 ld r11, VCPU_MSR(r9)
2395 bne 2f /* Continue guest execution. */
2396 /* If not, deliver a machine check. SRR0/1 are already set */
2397 li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
2398 ld r11, VCPU_MSR(r9)
2399 bl kvmppc_msr_interrupt
2400 2: b fast_interrupt_c_return
2403 * Check the reason we woke from nap, and take appropriate action.
2405 * 0 if nothing needs to be done
2406 * 1 if something happened that needs to be handled by the host
2407 * -1 if there was a guest wakeup (IPI or msgsnd)
2409 * Also sets r12 to the interrupt vector for any interrupt that needs
2410 * to be handled now by the host (0x500 for external interrupt), or zero.
2411 * Modifies r0, r6, r7, r8.
2413 kvmppc_check_wake_reason:
2416 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
2418 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
2419 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
2420 cmpwi r6, 8 /* was it an external interrupt? */
2421 li r12, BOOK3S_INTERRUPT_EXTERNAL
2422 beq kvmppc_read_intr /* if so, see what it was */
2425 cmpwi r6, 6 /* was it the decrementer? */
2428 cmpwi r6, 5 /* privileged doorbell? */
2430 cmpwi r6, 3 /* hypervisor doorbell? */
2432 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2433 li r3, 1 /* anything else, return 1 */
2436 /* hypervisor doorbell */
2437 3: li r12, BOOK3S_INTERRUPT_H_DOORBELL
2438 /* see if it's a host IPI */
2440 lbz r0, HSTATE_HOST_IPI(r13)
2443 /* if not, clear it and return -1 */
2444 lis r6, (PPC_DBELL_SERVER << (63-36))@h
2450 * Determine what sort of external interrupt is pending (if any).
2452 * 0 if no interrupt is pending
2453 * 1 if an interrupt is pending that needs to be handled by the host
2454 * -1 if there was a guest wakeup IPI (which has now been cleared)
2455 * Modifies r0, r6, r7, r8, returns value in r3.
2458 /* see if a host IPI is pending */
2460 lbz r0, HSTATE_HOST_IPI(r13)
2464 /* Now read the interrupt from the ICP */
2465 ld r6, HSTATE_XICS_PHYS(r13)
2471 * Save XIRR for later. Since we get in in reverse endian on LE
2472 * systems, save it byte reversed and fetch it back in host endian.
2474 li r3, HSTATE_SAVED_XIRR
2476 #ifdef __LITTLE_ENDIAN__
2477 lwz r3, HSTATE_SAVED_XIRR(r13)
2481 rlwinm. r3, r3, 0, 0xffffff
2483 beq 1f /* if nothing pending in the ICP */
2485 /* We found something in the ICP...
2487 * If it's not an IPI, stash it in the PACA and return to
2488 * the host, we don't (yet) handle directing real external
2489 * interrupts directly to the guest
2491 cmpwi r3, XICS_IPI /* if there is, is it an IPI? */
2494 /* It's an IPI, clear the MFRR and EOI it */
2497 stbcix r3, r6, r8 /* clear the IPI */
2498 stwcix r0, r6, r7 /* EOI it */
2501 /* We need to re-check host IPI now in case it got set in the
2502 * meantime. If it's clear, we bounce the interrupt to the
2505 lbz r0, HSTATE_HOST_IPI(r13)
2509 /* OK, it's an IPI for us */
2514 42: /* It's not an IPI and it's for the host. We saved a copy of XIRR in
2515 * the PACA earlier, it will be picked up by the host ICP driver
2520 43: /* We raced with the host, we need to resend that IPI, bummer */
2522 stbcix r0, r6, r8 /* set the IPI */
2528 * Save away FP, VMX and VSX registers.
2530 * N.B. r30 and r31 are volatile across this function,
2531 * thus it is not callable from C.
2538 #ifdef CONFIG_ALTIVEC
2540 oris r8,r8,MSR_VEC@h
2541 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2545 oris r8,r8,MSR_VSX@h
2546 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2549 addi r3,r3,VCPU_FPRS
2551 #ifdef CONFIG_ALTIVEC
2553 addi r3,r31,VCPU_VRS
2555 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2557 mfspr r6,SPRN_VRSAVE
2558 stw r6,VCPU_VRSAVE(r31)
2563 * Load up FP, VMX and VSX registers
2565 * N.B. r30 and r31 are volatile across this function,
2566 * thus it is not callable from C.
2573 #ifdef CONFIG_ALTIVEC
2575 oris r8,r8,MSR_VEC@h
2576 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2580 oris r8,r8,MSR_VSX@h
2581 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2584 addi r3,r4,VCPU_FPRS
2586 #ifdef CONFIG_ALTIVEC
2588 addi r3,r31,VCPU_VRS
2590 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2592 lwz r7,VCPU_VRSAVE(r31)
2593 mtspr SPRN_VRSAVE,r7
2599 * We come here if we get any exception or interrupt while we are
2600 * executing host real mode code while in guest MMU context.
2601 * For now just spin, but we should do something better.
2603 kvmppc_bad_host_intr:
2607 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
2608 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
2609 * r11 has the guest MSR value (in/out)
2610 * r9 has a vcpu pointer (in)
2611 * r0 is used as a scratch register
2613 kvmppc_msr_interrupt:
2614 rldicl r0, r11, 64 - MSR_TS_S_LG, 62
2615 cmpwi r0, 2 /* Check if we are in transactional state.. */
2616 ld r11, VCPU_INTR_MSR(r9)
2618 /* ... if transactional, change to suspended */
2620 1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
2624 * This works around a hardware bug on POWER8E processors, where
2625 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
2626 * performance monitor interrupt. Instead, when we need to have
2627 * an interrupt pending, we have to arrange for a counter to overflow.
2631 mtspr SPRN_MMCR2, r3
2632 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h
2633 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
2634 mtspr SPRN_MMCR0, r3
2641 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2643 * Start timing an activity
2644 * r3 = pointer to time accumulation struct, r4 = vcpu
2647 ld r5, HSTATE_KVM_VCORE(r13)
2648 lbz r6, VCORE_IN_GUEST(r5)
2650 beq 5f /* if in guest, need to */
2651 ld r6, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
2654 std r3, VCPU_CUR_ACTIVITY(r4)
2655 std r5, VCPU_ACTIVITY_START(r4)
2659 * Accumulate time to one activity and start another.
2660 * r3 = pointer to new time accumulation struct, r4 = vcpu
2662 kvmhv_accumulate_time:
2663 ld r5, HSTATE_KVM_VCORE(r13)
2664 lbz r8, VCORE_IN_GUEST(r5)
2666 beq 4f /* if in guest, need to */
2667 ld r8, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
2668 4: ld r5, VCPU_CUR_ACTIVITY(r4)
2669 ld r6, VCPU_ACTIVITY_START(r4)
2670 std r3, VCPU_CUR_ACTIVITY(r4)
2673 std r7, VCPU_ACTIVITY_START(r4)
2677 ld r8, TAS_SEQCOUNT(r5)
2680 std r8, TAS_SEQCOUNT(r5)
2682 ld r7, TAS_TOTAL(r5)
2684 std r7, TAS_TOTAL(r5)
2690 3: std r3, TAS_MIN(r5)
2696 std r8, TAS_SEQCOUNT(r5)