OSDN Git Service

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 9 Apr 2018 18:42:31 +0000 (11:42 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 9 Apr 2018 18:42:31 +0000 (11:42 -0700)
Pull kvm updates from Paolo Bonzini:
 "ARM:
   - VHE optimizations

   - EL2 address space randomization

   - speculative execution mitigations ("variant 3a", aka execution past
     invalid privilege register access)

   - bugfixes and cleanups

  PPC:
   - improvements for the radix page fault handler for HV KVM on POWER9

  s390:
   - more kvm stat counters

   - virtio gpu plumbing

   - documentation

   - facilities improvements

  x86:
   - support for VMware magic I/O port and pseudo-PMCs

   - AMD pause loop exiting

   - support for AMD core performance extensions

   - support for synchronous register access

   - expose nVMX capabilities to userspace

   - support for Hyper-V signaling via eventfd

   - use Enlightened VMCS when running on Hyper-V

   - allow userspace to disable MWAIT/HLT/PAUSE vmexits

   - usual roundup of optimizations and nested virtualization bugfixes

  Generic:
   - API selftest infrastructure (though the only tests are for x86 as
     of now)"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (174 commits)
  kvm: x86: fix a prototype warning
  kvm: selftests: add sync_regs_test
  kvm: selftests: add API testing infrastructure
  kvm: x86: fix a compile warning
  KVM: X86: Add Force Emulation Prefix for "emulate the next instruction"
  KVM: X86: Introduce handle_ud()
  KVM: vmx: unify adjacent #ifdefs
  x86: kvm: hide the unused 'cpu' variable
  KVM: VMX: remove bogus WARN_ON in handle_ept_misconfig
  Revert "KVM: X86: Fix SMRAM accessing even if VM is shutdown"
  kvm: Add emulation for movups/movupd
  KVM: VMX: raise internal error for exception during invalid protected mode state
  KVM: nVMX: Optimization: Dont set KVM_REQ_EVENT when VMExit with nested_run_pending
  KVM: nVMX: Require immediate-exit when event reinjected to L2 and L1 event pending
  KVM: x86: Fix misleading comments on handling pending exceptions
  KVM: x86: Rename interrupt.pending to interrupt.injected
  KVM: VMX: No need to clear pending NMI/interrupt on inject realmode interrupt
  x86/kvm: use Enlightened VMCS when running on Hyper-V
  x86/hyper-v: detect nested features
  x86/hyper-v: define struct hv_enlightened_vmcs and clean field bits
  ...

25 files changed:
1  2 
Documentation/admin-guide/kernel-parameters.txt
MAINTAINERS
arch/arm64/Kconfig
arch/arm64/include/asm/cpucaps.h
arch/arm64/include/asm/sysreg.h
arch/arm64/kernel/Makefile
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/cpufeature.c
arch/powerpc/include/asm/kvm_host.h
arch/powerpc/include/asm/kvm_ppc.h
arch/powerpc/kvm/book3s_64_mmu_radix.c
arch/powerpc/kvm/book3s_hv.c
arch/s390/include/asm/mmu_context.h
arch/x86/include/asm/hyperv-tlfs.h
arch/x86/include/asm/mshyperv.h
arch/x86/kernel/cpu/mshyperv.c
arch/x86/kernel/kvm.c
arch/x86/kvm/mmu.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
drivers/hv/hv.c
drivers/hv/hyperv_vmbus.h
include/linux/hyperv.h
tools/include/uapi/linux/kvm.h
tools/testing/selftests/Makefile

diff --cc MAINTAINERS
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
@@@ -76,8 -57,11 +76,10 @@@ cpu_enable_trap_ctr_access(const struc
  {
        /* Clear SCTLR_EL1.UCT */
        config_sctlr_el1(SCTLR_EL1_UCT, 0);
 -      return 0;
  }
  
+ atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
  #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
  #include <asm/mmu_context.h>
  #include <asm/cacheflush.h>
@@@ -243,111 -225,26 +242,115 @@@ qcom_enable_link_stack_sanitization(con
  }
  #endif        /* CONFIG_HARDEN_BRANCH_PREDICTOR */
  
 -#define MIDR_RANGE(model, min, max) \
 -      .def_scope = SCOPE_LOCAL_CPU, \
 -      .matches = is_affected_midr_range, \
 -      .midr_model = model, \
 -      .midr_range_min = min, \
 -      .midr_range_max = max
 +#define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)     \
 +      .matches = is_affected_midr_range,                      \
 +      .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
 +
 +#define CAP_MIDR_ALL_VERSIONS(model)                                  \
 +      .matches = is_affected_midr_range,                              \
 +      .midr_range = MIDR_ALL_VERSIONS(model)
 +
 +#define MIDR_FIXED(rev, revidr_mask) \
 +      .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
 +
 +#define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max)          \
 +      .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                         \
 +      CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
 +
 +#define CAP_MIDR_RANGE_LIST(list)                             \
 +      .matches = is_affected_midr_range_list,                 \
 +      .midr_range_list = list
 +
 +/* Errata affecting a range of revisions of  given model variant */
 +#define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max)    \
 +      ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
 +
 +/* Errata affecting a single variant/revision of a model */
 +#define ERRATA_MIDR_REV(model, var, rev)      \
 +      ERRATA_MIDR_RANGE(model, var, rev, var, rev)
 +
 +/* Errata affecting all variants/revisions of a given a model */
 +#define ERRATA_MIDR_ALL_VERSIONS(model)                               \
 +      .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                 \
 +      CAP_MIDR_ALL_VERSIONS(model)
 +
 +/* Errata affecting a list of midr ranges, with same work around */
 +#define ERRATA_MIDR_RANGE_LIST(midr_list)                     \
 +      .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                 \
 +      CAP_MIDR_RANGE_LIST(midr_list)
 +
 +/*
 + * Generic helper for handling capabilties with multiple (match,enable) pairs
 + * of call backs, sharing the same capability bit.
 + * Iterate over each entry to see if at least one matches.
 + */
 +static bool __maybe_unused
 +multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry, int scope)
 +{
 +      const struct arm64_cpu_capabilities *caps;
 +
 +      for (caps = entry->match_list; caps->matches; caps++)
 +              if (caps->matches(caps, scope))
 +                      return true;
 +
 +      return false;
 +}
 +
 +/*
 + * Take appropriate action for all matching entries in the shared capability
 + * entry.
 + */
 +static void __maybe_unused
 +multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry)
 +{
 +      const struct arm64_cpu_capabilities *caps;
  
 -#define MIDR_ALL_VERSIONS(model) \
 -      .def_scope = SCOPE_LOCAL_CPU, \
 -      .matches = is_affected_midr_range, \
 -      .midr_model = model, \
 -      .midr_range_min = 0, \
 -      .midr_range_max = (MIDR_VARIANT_MASK | MIDR_REVISION_MASK)
 +      for (caps = entry->match_list; caps->matches; caps++)
 +              if (caps->matches(caps, SCOPE_LOCAL_CPU) &&
 +                  caps->cpu_enable)
 +                      caps->cpu_enable(caps);
 +}
 +
 +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
 +
 +/*
 + * List of CPUs where we need to issue a psci call to
 + * harden the branch predictor.
 + */
 +static const struct midr_range arm64_bp_harden_smccc_cpus[] = {
 +      MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
 +      MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
 +      MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
 +      MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
 +      MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
 +      MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
 +      {},
 +};
 +
 +static const struct midr_range qcom_bp_harden_cpus[] = {
 +      MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
 +      MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
 +      {},
 +};
 +
 +static const struct arm64_cpu_capabilities arm64_bp_harden_list[] = {
 +      {
 +              CAP_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus),
 +              .cpu_enable = enable_smccc_arch_workaround_1,
 +      },
 +      {
 +              CAP_MIDR_RANGE_LIST(qcom_bp_harden_cpus),
 +              .cpu_enable = qcom_enable_link_stack_sanitization,
 +      },
 +      {},
 +};
 +
 +#endif
  
+ #ifndef ERRATA_MIDR_ALL_VERSIONS
+ #define       ERRATA_MIDR_ALL_VERSIONS(x)     MIDR_ALL_VERSIONS(x)
+ #endif
  const struct arm64_cpu_capabilities arm64_errata[] = {
  #if   defined(CONFIG_ARM64_ERRATUM_826319) || \
        defined(CONFIG_ARM64_ERRATUM_827319) || \
        },
        {
                .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
 -              MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
 -      },
 -      {
 -              .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
 -              MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
 -              .enable = qcom_enable_link_stack_sanitization,
 -      },
 -      {
 -              .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
 -              MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
 -      },
 -      {
 -              .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
 -              MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
 -              .enable = enable_smccc_arch_workaround_1,
 -      },
 -      {
 -              .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
 -              MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
 -              .enable = enable_smccc_arch_workaround_1,
 +              ERRATA_MIDR_RANGE_LIST(qcom_bp_harden_cpus),
        },
  #endif
+ #ifdef CONFIG_HARDEN_EL2_VECTORS
+       {
+               .desc = "Cortex-A57 EL2 vector hardening",
+               .capability = ARM64_HARDEN_EL2_VECTORS,
+               ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
+       },
+       {
+               .desc = "Cortex-A72 EL2 vector hardening",
+               .capability = ARM64_HARDEN_EL2_VECTORS,
+               ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+       },
+ #endif
        {
        }
  };
@@@ -838,19 -826,11 +838,6 @@@ static bool has_no_hw_prefetch(const st
                MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
  }
  
- static bool hyp_offset_low(const struct arm64_cpu_capabilities *entry,
-                          int __unused)
 -static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused)
--{
-       phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
-       /*
-        * Activate the lower HYP offset only if:
-        * - the idmap doesn't clash with it,
-        * - the kernel is not running at EL2.
-        */
-       return idmap_addr > GENMASK(VA_BITS - 2, 0) && !is_kernel_in_hyp_mode();
 -      return is_kernel_in_hyp_mode();
--}
--
  static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unused)
  {
        u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
@@@ -546,7 -553,8 +553,8 @@@ static void __init kvm_guest_init(void
        }
  
        if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
 -          !kvm_para_has_feature(KVM_FEATURE_STEAL_TIME))
+           !kvm_para_has_hint(KVM_HINTS_DEDICATED) &&
 +          kvm_para_has_feature(KVM_FEATURE_STEAL_TIME))
                pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others;
  
        if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
@@@ -635,7 -649,8 +649,8 @@@ static __init int kvm_setup_pv_tlb_flus
        int cpu;
  
        if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
 -          !kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
+           !kvm_para_has_hint(KVM_HINTS_DEDICATED) &&
 +          kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
                for_each_possible_cpu(cpu) {
                        zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu),
                                GFP_KERNEL, cpu_to_node(cpu));
Simple merge
Simple merge
@@@ -51,7 -51,9 +51,8 @@@
  #include <asm/apic.h>
  #include <asm/irq_remapping.h>
  #include <asm/mmu_context.h>
 -#include <asm/microcode.h>
  #include <asm/nospec-branch.h>
+ #include <asm/mshyperv.h>
  
  #include "trace.h"
  #include "pmu.h"
@@@ -10952,6 -11238,21 +11249,16 @@@ static int prepare_vmcs02(struct kvm_vc
        /* Note: modifies VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
        vmx_set_efer(vcpu, vcpu->arch.efer);
  
 -      if (vmx->nested.dirty_vmcs12) {
 -              prepare_vmcs02_full(vcpu, vmcs12, from_vmentry);
 -              vmx->nested.dirty_vmcs12 = false;
 -      }
 -
+       /*
+        * Guest state is invalid and unrestricted guest is disabled,
+        * which means L1 attempted VMEntry to L2 with invalid state.
+        * Fail the VMEntry.
+        */
+       if (vmx->emulation_required) {
+               *entry_failure_code = ENTRY_FAIL_DEFAULT;
+               return 1;
+       }
        /* Shadow page tables on either EPT or shadow page tables. */
        if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
                                entry_failure_code))
diff --cc drivers/hv/hv.c
@@@ -27,9 -27,8 +27,8 @@@
  #include <linux/vmalloc.h>
  #include <linux/hyperv.h>
  #include <linux/version.h>
 -#include <linux/interrupt.h>
 +#include <linux/random.h>
  #include <linux/clockchips.h>
- #include <asm/hyperv.h>
  #include <asm/mshyperv.h>
  #include "hyperv_vmbus.h"
  
Simple merge
Simple merge
Simple merge
Simple merge