1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 * Yaniv Kamay <yaniv@qumranet.com>
12 * Avi Kivity <avi@qumranet.com>
18 #include <linux/kvm_types.h>
19 #include <linux/kvm_host.h>
20 #include <linux/bits.h>
23 #include <asm/sev-common.h>
25 #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
27 #define IOPM_SIZE PAGE_SIZE * 3
28 #define MSRPM_SIZE PAGE_SIZE * 2
30 #define MAX_DIRECT_ACCESS_MSRS 20
31 #define MSRPM_OFFSETS 16
32 extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
33 extern bool npt_enabled;
34 extern bool intercept_smi;
38 * VMCB_ALL_CLEAN_MASK might also need to
39 * be updated if this enum is modified.
42 VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
44 VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */
46 VMCB_INTR, /* int_ctl, int_vector */
47 VMCB_NPT, /* npt_en, nCR3, gPAT */
48 VMCB_CR, /* CR0, CR3, CR4, EFER */
49 VMCB_DR, /* DR6, DR7 */
50 VMCB_DT, /* GDT, IDT */
51 VMCB_SEG, /* CS, DS, SS, ES, CPL */
52 VMCB_CR2, /* CR2 only */
53 VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
54 VMCB_AVIC, /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE,
55 * AVIC PHYSICAL_TABLE pointer,
56 * AVIC LOGICAL_TABLE pointer
58 VMCB_SW = 31, /* Reserved for hypervisor/software use */
61 #define VMCB_ALL_CLEAN_MASK ( \
62 (1U << VMCB_INTERCEPTS) | (1U << VMCB_PERM_MAP) | \
63 (1U << VMCB_ASID) | (1U << VMCB_INTR) | \
64 (1U << VMCB_NPT) | (1U << VMCB_CR) | (1U << VMCB_DR) | \
65 (1U << VMCB_DT) | (1U << VMCB_SEG) | (1U << VMCB_CR2) | \
66 (1U << VMCB_LBR) | (1U << VMCB_AVIC) | \
69 /* TPR and CR2 are always written before VMRUN */
70 #define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
73 bool active; /* SEV enabled guest */
74 bool es_active; /* SEV-ES enabled guest */
75 unsigned int asid; /* ASID used for this guest */
76 unsigned int handle; /* SEV firmware handle */
77 int fd; /* SEV device fd */
78 unsigned long pages_locked; /* Number of pages locked */
79 struct list_head regions_list; /* List of registered regions */
80 u64 ap_jump_table; /* SEV-ES AP Jump Table address */
81 struct kvm *enc_context_owner; /* Owner of copied encryption context */
82 struct list_head mirror_vms; /* List of VMs mirroring */
83 struct list_head mirror_entry; /* Use as a list entry of mirrors */
84 struct misc_cg *misc_cg; /* For misc cgroup accounting */
85 atomic_t migration_in_progress;
91 /* Struct members for AVIC */
93 struct page *avic_logical_id_table_page;
94 struct page *avic_physical_id_table_page;
95 struct hlist_node hnode;
97 struct kvm_sev_info sev_info;
102 struct kvm_vmcb_info {
106 uint64_t asid_generation;
109 struct vmcb_save_area_cached {
118 struct vmcb_ctrl_area_cached {
119 u32 intercepts[MAX_INTERCEPT];
120 u16 pause_filter_thresh;
121 u16 pause_filter_count;
135 u32 exit_int_info_err;
145 struct svm_nested_state {
146 struct kvm_vmcb_info vmcb02;
152 /* These are the merged vectors */
155 /* A VMRUN has started but has not yet been performed, so
156 * we cannot inject a nested vmexit yet. */
157 bool nested_run_pending;
159 /* cache for control fields of the guest */
160 struct vmcb_ctrl_area_cached ctl;
163 * Note: this struct is not kept up-to-date while L2 runs; it is only
164 * valid within nested_svm_vmrun.
166 struct vmcb_save_area_cached save;
171 * Indicates whether MSR bitmap for L2 needs to be rebuilt due to
172 * changes in MSR bitmap for L1 or switching to a different L2. Note,
173 * this flag can only be used reliably in conjunction with a paravirt L1
174 * which informs L0 whether any changes to MSR bitmap for L2 were done
177 bool force_msr_bitmap_recalc;
180 struct vcpu_sev_es_state {
182 struct vmcb_save_area *vmsa;
184 struct kvm_host_map ghcb_map;
185 bool received_first_sipi;
187 /* SEV-ES scratch area support */
195 struct kvm_vcpu vcpu;
196 /* vmcb always points at current_vmcb->ptr, it's purely a shorthand. */
198 struct kvm_vmcb_info vmcb01;
199 struct kvm_vmcb_info *current_vmcb;
200 struct svm_cpu_data *svm_data;
214 * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
215 * translated into the appropriate L2_CFG bits on the host to
216 * perform speculative control.
224 struct svm_nested_state nested;
227 u64 nmi_singlestep_guest_rflags;
229 unsigned int3_injected;
230 unsigned long int3_rip;
232 /* cached guest cpuid flags for faster access */
233 bool nrips_enabled : 1;
234 bool tsc_scaling_enabled : 1;
238 struct page *avic_backing_page;
239 u64 *avic_physical_id_cache;
242 * Per-vcpu list of struct amd_svm_iommu_ir:
243 * This is used mainly to store interrupt remapping information used
244 * when update the vcpu affinity. This avoids the need to scan for
245 * IRTE and try to match ga_tag in the IOMMU driver.
247 struct list_head ir_list;
248 spinlock_t ir_list_lock;
250 /* Save desired MSR intercept (read: pass-through) state */
252 DECLARE_BITMAP(read, MAX_DIRECT_ACCESS_MSRS);
253 DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS);
254 } shadow_msr_intercept;
256 struct vcpu_sev_es_state sev_es;
258 bool guest_state_loaded;
261 struct svm_cpu_data {
268 struct kvm_ldttss_desc *tss_desc;
270 struct page *save_area;
271 struct vmcb *current_vmcb;
273 /* index = sev_asid, value = vmcb pointer */
274 struct vmcb **sev_vmcbs;
277 DECLARE_PER_CPU(struct svm_cpu_data *, svm_data);
279 void recalc_intercepts(struct vcpu_svm *svm);
281 static __always_inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
283 return container_of(kvm, struct kvm_svm, kvm);
286 static __always_inline bool sev_guest(struct kvm *kvm)
288 #ifdef CONFIG_KVM_AMD_SEV
289 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
297 static __always_inline bool sev_es_guest(struct kvm *kvm)
299 #ifdef CONFIG_KVM_AMD_SEV
300 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
302 return sev->es_active && !WARN_ON_ONCE(!sev->active);
308 static inline void vmcb_mark_all_dirty(struct vmcb *vmcb)
310 vmcb->control.clean = 0;
313 static inline void vmcb_mark_all_clean(struct vmcb *vmcb)
315 vmcb->control.clean = VMCB_ALL_CLEAN_MASK
316 & ~VMCB_ALWAYS_DIRTY_MASK;
319 static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit)
321 vmcb->control.clean &= ~(1 << bit);
324 static inline bool vmcb_is_dirty(struct vmcb *vmcb, int bit)
326 return !test_bit(bit, (unsigned long *)&vmcb->control.clean);
329 static __always_inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
331 return container_of(vcpu, struct vcpu_svm, vcpu);
335 * Only the PDPTRs are loaded on demand into the shadow MMU. All other
336 * fields are synchronized on VM-Exit, because accessing the VMCB is cheap.
338 * CR3 might be out of date in the VMCB but it is not marked dirty; instead,
339 * KVM_REQ_LOAD_MMU_PGD is always requested when the cached vcpu->arch.cr3
340 * is changed. svm_load_mmu_pgd() then syncs the new CR3 value into the VMCB.
342 #define SVM_REGS_LAZY_LOAD_SET (1 << VCPU_EXREG_PDPTR)
344 static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit)
346 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
347 __set_bit(bit, (unsigned long *)&control->intercepts);
350 static inline void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit)
352 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
353 __clear_bit(bit, (unsigned long *)&control->intercepts);
356 static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit)
358 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
359 return test_bit(bit, (unsigned long *)&control->intercepts);
362 static inline bool vmcb12_is_intercept(struct vmcb_ctrl_area_cached *control, u32 bit)
364 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
365 return test_bit(bit, (unsigned long *)&control->intercepts);
368 static inline void set_dr_intercepts(struct vcpu_svm *svm)
370 struct vmcb *vmcb = svm->vmcb01.ptr;
372 if (!sev_es_guest(svm->vcpu.kvm)) {
373 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ);
374 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ);
375 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ);
376 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ);
377 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ);
378 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ);
379 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ);
380 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE);
381 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE);
382 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE);
383 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE);
384 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE);
385 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE);
386 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE);
389 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
390 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
392 recalc_intercepts(svm);
395 static inline void clr_dr_intercepts(struct vcpu_svm *svm)
397 struct vmcb *vmcb = svm->vmcb01.ptr;
399 vmcb->control.intercepts[INTERCEPT_DR] = 0;
401 /* DR7 access must remain intercepted for an SEV-ES guest */
402 if (sev_es_guest(svm->vcpu.kvm)) {
403 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
404 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
407 recalc_intercepts(svm);
410 static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit)
412 struct vmcb *vmcb = svm->vmcb01.ptr;
414 WARN_ON_ONCE(bit >= 32);
415 vmcb_set_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
417 recalc_intercepts(svm);
420 static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit)
422 struct vmcb *vmcb = svm->vmcb01.ptr;
424 WARN_ON_ONCE(bit >= 32);
425 vmcb_clr_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
427 recalc_intercepts(svm);
430 static inline void svm_set_intercept(struct vcpu_svm *svm, int bit)
432 struct vmcb *vmcb = svm->vmcb01.ptr;
434 vmcb_set_intercept(&vmcb->control, bit);
436 recalc_intercepts(svm);
439 static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit)
441 struct vmcb *vmcb = svm->vmcb01.ptr;
443 vmcb_clr_intercept(&vmcb->control, bit);
445 recalc_intercepts(svm);
448 static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit)
450 return vmcb_is_intercept(&svm->vmcb->control, bit);
453 static inline bool vgif_enabled(struct vcpu_svm *svm)
455 return !!(svm->vmcb->control.int_ctl & V_GIF_ENABLE_MASK);
458 static inline void enable_gif(struct vcpu_svm *svm)
460 if (vgif_enabled(svm))
461 svm->vmcb->control.int_ctl |= V_GIF_MASK;
463 svm->vcpu.arch.hflags |= HF_GIF_MASK;
466 static inline void disable_gif(struct vcpu_svm *svm)
468 if (vgif_enabled(svm))
469 svm->vmcb->control.int_ctl &= ~V_GIF_MASK;
471 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
474 static inline bool gif_set(struct vcpu_svm *svm)
476 if (vgif_enabled(svm))
477 return !!(svm->vmcb->control.int_ctl & V_GIF_MASK);
479 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
483 #define MSR_INVALID 0xffffffffU
485 extern bool dump_invalid_vmcb;
487 u32 svm_msrpm_offset(u32 msr);
488 u32 *svm_vcpu_alloc_msrpm(void);
489 void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm);
490 void svm_vcpu_free_msrpm(u32 *msrpm);
492 int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
493 void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
494 void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
495 void disable_nmi_singlestep(struct vcpu_svm *svm);
496 bool svm_smi_blocked(struct kvm_vcpu *vcpu);
497 bool svm_nmi_blocked(struct kvm_vcpu *vcpu);
498 bool svm_interrupt_blocked(struct kvm_vcpu *vcpu);
499 void svm_set_gif(struct vcpu_svm *svm, bool value);
500 int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code);
501 void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
502 int read, int write);
503 void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
504 int trig_mode, int vec);
508 #define NESTED_EXIT_HOST 0 /* Exit handled on host level */
509 #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
510 #define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
512 static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu)
514 struct vcpu_svm *svm = to_svm(vcpu);
516 return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK);
519 static inline bool nested_exit_on_smi(struct vcpu_svm *svm)
521 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SMI);
524 static inline bool nested_exit_on_intr(struct vcpu_svm *svm)
526 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INTR);
529 static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
531 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
534 int enter_svm_guest_mode(struct kvm_vcpu *vcpu,
535 u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun);
536 void svm_leave_nested(struct kvm_vcpu *vcpu);
537 void svm_free_nested(struct vcpu_svm *svm);
538 int svm_allocate_nested(struct vcpu_svm *svm);
539 int nested_svm_vmrun(struct kvm_vcpu *vcpu);
540 void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
541 struct vmcb_save_area *from_save);
542 void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
543 int nested_svm_vmexit(struct vcpu_svm *svm);
545 static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code)
547 svm->vmcb->control.exit_code = exit_code;
548 svm->vmcb->control.exit_info_1 = 0;
549 svm->vmcb->control.exit_info_2 = 0;
550 return nested_svm_vmexit(svm);
553 int nested_svm_exit_handled(struct vcpu_svm *svm);
554 int nested_svm_check_permissions(struct kvm_vcpu *vcpu);
555 int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
556 bool has_error_code, u32 error_code);
557 int nested_svm_exit_special(struct vcpu_svm *svm);
558 void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu);
559 void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier);
560 void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
561 struct vmcb_control_area *control);
562 void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
563 struct vmcb_save_area *save);
564 void nested_sync_control_from_vmcb02(struct vcpu_svm *svm);
565 void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm);
566 void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb);
568 extern struct kvm_x86_nested_ops svm_nested_ops;
572 #define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
573 #define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31
574 #define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
576 #define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK GENMASK_ULL(11, 0)
577 #define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12)
578 #define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62)
579 #define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63)
581 #define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
583 int avic_ga_log_notifier(u32 ga_tag);
584 void avic_vm_destroy(struct kvm *kvm);
585 int avic_vm_init(struct kvm *kvm);
586 void avic_init_vmcb(struct vcpu_svm *svm);
587 int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu);
588 int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu);
589 int avic_init_vcpu(struct vcpu_svm *svm);
590 void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
591 void __avic_vcpu_put(struct kvm_vcpu *vcpu);
592 void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu);
593 void avic_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
594 void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
595 bool avic_check_apicv_inhibit_reasons(ulong bit);
596 void avic_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
597 void avic_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr);
598 bool avic_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu);
599 int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
600 uint32_t guest_irq, bool set);
601 void avic_vcpu_blocking(struct kvm_vcpu *vcpu);
602 void avic_vcpu_unblocking(struct kvm_vcpu *vcpu);
603 void avic_ring_doorbell(struct kvm_vcpu *vcpu);
607 #define GHCB_VERSION_MAX 1ULL
608 #define GHCB_VERSION_MIN 1ULL
611 extern unsigned int max_sev_asid;
613 void sev_vm_destroy(struct kvm *kvm);
614 int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp);
615 int sev_mem_enc_register_region(struct kvm *kvm,
616 struct kvm_enc_region *range);
617 int sev_mem_enc_unregister_region(struct kvm *kvm,
618 struct kvm_enc_region *range);
619 int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd);
620 int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd);
621 void pre_sev_run(struct vcpu_svm *svm, int cpu);
622 void __init sev_set_cpu_caps(void);
623 void __init sev_hardware_setup(void);
624 void sev_hardware_unsetup(void);
625 int sev_cpu_init(struct svm_cpu_data *sd);
626 void sev_free_vcpu(struct kvm_vcpu *vcpu);
627 int sev_handle_vmgexit(struct kvm_vcpu *vcpu);
628 int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
629 void sev_es_init_vmcb(struct vcpu_svm *svm);
630 void sev_es_vcpu_reset(struct vcpu_svm *svm);
631 void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
632 void sev_es_prepare_switch_to_guest(struct vmcb_save_area *hostsa);
633 void sev_es_unmap_ghcb(struct vcpu_svm *svm);
637 void __svm_sev_es_vcpu_run(unsigned long vmcb_pa);
638 void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);