OSDN Git Service

Merge remote-tracking branch 'kvm/master' into HEAD
authorPaolo Bonzini <pbonzini@redhat.com>
Tue, 21 Dec 2021 17:51:09 +0000 (12:51 -0500)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 21 Dec 2021 17:51:09 +0000 (12:51 -0500)
Pick commit fdba608f15e2 ("KVM: VMX: Wake vCPU when delivering posted
IRQ even if vCPU == this vCPU").  In addition to fixing a bug, it
also aligns the non-nested and nested usage of triggering posted
interrupts, allowing for additional cleanups.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1  2 
Documentation/admin-guide/kernel-parameters.txt
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/hyperv.c
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/tdp_mmu.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c

Simple merge
Simple merge
Simple merge
Simple merge
@@@ -1594,18 -1585,21 +1594,27 @@@ static void svm_set_rflags(struct kvm_v
        to_svm(vcpu)->vmcb->save.rflags = rflags;
  }
  
+ static bool svm_get_if_flag(struct kvm_vcpu *vcpu)
+ {
+       struct vmcb *vmcb = to_svm(vcpu)->vmcb;
+       return sev_es_guest(vcpu->kvm)
+               ? vmcb->control.int_state & SVM_GUEST_INTERRUPT_MASK
+               : kvm_get_rflags(vcpu) & X86_EFLAGS_IF;
+ }
  static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
  {
 +      kvm_register_mark_available(vcpu, reg);
 +
        switch (reg) {
        case VCPU_EXREG_PDPTR:
 -              BUG_ON(!npt_enabled);
 -              load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
 +              /*
 +               * When !npt_enabled, mmu->pdptrs[] is already available since
 +               * it is always updated per SDM when moving to CRs.
 +               */
 +              if (npt_enabled)
 +                      load_pdptrs(vcpu, kvm_read_cr3(vcpu));
                break;
        default:
                KVM_BUG_ON(1, vcpu->kvm);
@@@ -3989,14 -3964,7 +3994,13 @@@ static int vmx_deliver_posted_interrupt
        if (pi_test_and_set_on(&vmx->pi_desc))
                return 0;
  
-       if (vcpu != kvm_get_running_vcpu() &&
-           !kvm_vcpu_trigger_posted_interrupt(vcpu, false))
 +      /*
 +       * The implied barrier in pi_test_and_set_on() pairs with the smp_mb_*()
 +       * after setting vcpu->mode in vcpu_enter_guest(), thus the vCPU is
 +       * guaranteed to see PID.ON=1 and sync the PIR to IRR if triggering a
 +       * posted interrupt "fails" because vcpu->mode != IN_GUEST_MODE.
 +       */
+       if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false))
                kvm_vcpu_kick(vcpu);
  
        return 0;
@@@ -902,11 -886,12 +902,12 @@@ int kvm_set_cr0(struct kvm_vcpu *vcpu, 
        }
  #endif
        if (!(vcpu->arch.efer & EFER_LME) && (cr0 & X86_CR0_PG) &&
 -          is_pae(vcpu) && ((cr0 ^ old_cr0) & pdptr_bits) &&
 -          !load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)))
 +          is_pae(vcpu) && ((cr0 ^ old_cr0) & X86_CR0_PDPTR_BITS) &&
 +          !load_pdptrs(vcpu, kvm_read_cr3(vcpu)))
                return 1;
  
-       if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))
+       if (!(cr0 & X86_CR0_PG) &&
+           (is_64_bit_mode(vcpu) || kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)))
                return 1;
  
        static_call(kvm_x86_set_cr0)(vcpu, cr0);