OSDN Git Service

KVM: nVMX: Emulate MTF when performing instruction emulation
[tomoyo/tomoyo-test1.git] / arch / x86 / kvm / vmx / vmx.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_VMX_H
3 #define __KVM_X86_VMX_H
4
5 #include <linux/kvm_host.h>
6
7 #include <asm/kvm.h>
8 #include <asm/intel_pt.h>
9
10 #include "capabilities.h"
11 #include "ops.h"
12 #include "vmcs.h"
13
14 extern const u32 vmx_msr_index[];
15 extern u64 host_efer;
16
17 extern u32 get_umwait_control_msr(void);
18
19 #define MSR_TYPE_R      1
20 #define MSR_TYPE_W      2
21 #define MSR_TYPE_RW     3
22
23 #define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4))
24
25 #ifdef CONFIG_X86_64
26 #define NR_SHARED_MSRS  7
27 #else
28 #define NR_SHARED_MSRS  4
29 #endif
30
31 #define NR_LOADSTORE_MSRS 8
32
33 struct vmx_msrs {
34         unsigned int            nr;
35         struct vmx_msr_entry    val[NR_LOADSTORE_MSRS];
36 };
37
38 struct shared_msr_entry {
39         unsigned index;
40         u64 data;
41         u64 mask;
42 };
43
44 enum segment_cache_field {
45         SEG_FIELD_SEL = 0,
46         SEG_FIELD_BASE = 1,
47         SEG_FIELD_LIMIT = 2,
48         SEG_FIELD_AR = 3,
49
50         SEG_FIELD_NR = 4
51 };
52
53 /* Posted-Interrupt Descriptor */
54 struct pi_desc {
55         u32 pir[8];     /* Posted interrupt requested */
56         union {
57                 struct {
58                                 /* bit 256 - Outstanding Notification */
59                         u16     on      : 1,
60                                 /* bit 257 - Suppress Notification */
61                                 sn      : 1,
62                                 /* bit 271:258 - Reserved */
63                                 rsvd_1  : 14;
64                                 /* bit 279:272 - Notification Vector */
65                         u8      nv;
66                                 /* bit 287:280 - Reserved */
67                         u8      rsvd_2;
68                                 /* bit 319:288 - Notification Destination */
69                         u32     ndst;
70                 };
71                 u64 control;
72         };
73         u32 rsvd[6];
74 } __aligned(64);
75
76 #define RTIT_ADDR_RANGE         4
77
78 struct pt_ctx {
79         u64 ctl;
80         u64 status;
81         u64 output_base;
82         u64 output_mask;
83         u64 cr3_match;
84         u64 addr_a[RTIT_ADDR_RANGE];
85         u64 addr_b[RTIT_ADDR_RANGE];
86 };
87
88 struct pt_desc {
89         u64 ctl_bitmask;
90         u32 addr_range;
91         u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES];
92         struct pt_ctx host;
93         struct pt_ctx guest;
94 };
95
96 /*
97  * The nested_vmx structure is part of vcpu_vmx, and holds information we need
98  * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
99  */
100 struct nested_vmx {
101         /* Has the level1 guest done vmxon? */
102         bool vmxon;
103         gpa_t vmxon_ptr;
104         bool pml_full;
105
106         /* The guest-physical address of the current VMCS L1 keeps for L2 */
107         gpa_t current_vmptr;
108         /*
109          * Cache of the guest's VMCS, existing outside of guest memory.
110          * Loaded from guest memory during VMPTRLD. Flushed to guest
111          * memory during VMCLEAR and VMPTRLD.
112          */
113         struct vmcs12 *cached_vmcs12;
114         /*
115          * Cache of the guest's shadow VMCS, existing outside of guest
116          * memory. Loaded from guest memory during VM entry. Flushed
117          * to guest memory during VM exit.
118          */
119         struct vmcs12 *cached_shadow_vmcs12;
120
121         /*
122          * Indicates if the shadow vmcs or enlightened vmcs must be updated
123          * with the data held by struct vmcs12.
124          */
125         bool need_vmcs12_to_shadow_sync;
126         bool dirty_vmcs12;
127
128         /*
129          * Indicates lazily loaded guest state has not yet been decached from
130          * vmcs02.
131          */
132         bool need_sync_vmcs02_to_vmcs12_rare;
133
134         /*
135          * vmcs02 has been initialized, i.e. state that is constant for
136          * vmcs02 has been written to the backing VMCS.  Initialization
137          * is delayed until L1 actually attempts to run a nested VM.
138          */
139         bool vmcs02_initialized;
140
141         bool change_vmcs01_virtual_apic_mode;
142
143         /*
144          * Enlightened VMCS has been enabled. It does not mean that L1 has to
145          * use it. However, VMX features available to L1 will be limited based
146          * on what the enlightened VMCS supports.
147          */
148         bool enlightened_vmcs_enabled;
149
150         /* L2 must run next, and mustn't decide to exit to L1. */
151         bool nested_run_pending;
152
153         /* Pending MTF VM-exit into L1.  */
154         bool mtf_pending;
155
156         struct loaded_vmcs vmcs02;
157
158         /*
159          * Guest pages referred to in the vmcs02 with host-physical
160          * pointers, so we must keep them pinned while L2 runs.
161          */
162         struct page *apic_access_page;
163         struct kvm_host_map virtual_apic_map;
164         struct kvm_host_map pi_desc_map;
165
166         struct kvm_host_map msr_bitmap_map;
167
168         struct pi_desc *pi_desc;
169         bool pi_pending;
170         u16 posted_intr_nv;
171
172         struct hrtimer preemption_timer;
173         bool preemption_timer_expired;
174
175         /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
176         u64 vmcs01_debugctl;
177         u64 vmcs01_guest_bndcfgs;
178
179         /* to migrate it to L1 if L2 writes to L1's CR8 directly */
180         int l1_tpr_threshold;
181
182         u16 vpid02;
183         u16 last_vpid;
184
185         struct nested_vmx_msrs msrs;
186
187         /* SMM related state */
188         struct {
189                 /* in VMX operation on SMM entry? */
190                 bool vmxon;
191                 /* in guest mode on SMM entry? */
192                 bool guest_mode;
193         } smm;
194
195         gpa_t hv_evmcs_vmptr;
196         struct kvm_host_map hv_evmcs_map;
197         struct hv_enlightened_vmcs *hv_evmcs;
198 };
199
200 struct vcpu_vmx {
201         struct kvm_vcpu       vcpu;
202         u8                    fail;
203         u8                    msr_bitmap_mode;
204
205         /*
206          * If true, host state has been stored in vmx->loaded_vmcs for
207          * the CPU registers that only need to be switched when transitioning
208          * to/from the kernel, and the registers have been loaded with guest
209          * values.  If false, host state is loaded in the CPU registers
210          * and vmx->loaded_vmcs->host_state is invalid.
211          */
212         bool                  guest_state_loaded;
213
214         u32                   exit_intr_info;
215         u32                   idt_vectoring_info;
216         ulong                 rflags;
217
218         struct shared_msr_entry guest_msrs[NR_SHARED_MSRS];
219         int                   nmsrs;
220         int                   save_nmsrs;
221         bool                  guest_msrs_ready;
222 #ifdef CONFIG_X86_64
223         u64                   msr_host_kernel_gs_base;
224         u64                   msr_guest_kernel_gs_base;
225 #endif
226
227         u64                   spec_ctrl;
228         u32                   msr_ia32_umwait_control;
229
230         u32 secondary_exec_control;
231
232         /*
233          * loaded_vmcs points to the VMCS currently used in this vcpu. For a
234          * non-nested (L1) guest, it always points to vmcs01. For a nested
235          * guest (L2), it points to a different VMCS.
236          */
237         struct loaded_vmcs    vmcs01;
238         struct loaded_vmcs   *loaded_vmcs;
239
240         struct msr_autoload {
241                 struct vmx_msrs guest;
242                 struct vmx_msrs host;
243         } msr_autoload;
244
245         struct msr_autostore {
246                 struct vmx_msrs guest;
247         } msr_autostore;
248
249         struct {
250                 int vm86_active;
251                 ulong save_rflags;
252                 struct kvm_segment segs[8];
253         } rmode;
254         struct {
255                 u32 bitmask; /* 4 bits per segment (1 bit per field) */
256                 struct kvm_save_segment {
257                         u16 selector;
258                         unsigned long base;
259                         u32 limit;
260                         u32 ar;
261                 } seg[8];
262         } segment_cache;
263         int vpid;
264         bool emulation_required;
265
266         u32 exit_reason;
267
268         /* Posted interrupt descriptor */
269         struct pi_desc pi_desc;
270
271         /* Support for a guest hypervisor (nested VMX) */
272         struct nested_vmx nested;
273
274         /* Dynamic PLE window. */
275         unsigned int ple_window;
276         bool ple_window_dirty;
277
278         bool req_immediate_exit;
279
280         /* Support for PML */
281 #define PML_ENTITY_NUM          512
282         struct page *pml_pg;
283
284         /* apic deadline value in host tsc */
285         u64 hv_deadline_tsc;
286
287         u64 current_tsc_ratio;
288
289         u32 host_pkru;
290
291         unsigned long host_debugctlmsr;
292
293         /*
294          * Only bits masked by msr_ia32_feature_control_valid_bits can be set in
295          * msr_ia32_feature_control. FEAT_CTL_LOCKED is always included
296          * in msr_ia32_feature_control_valid_bits.
297          */
298         u64 msr_ia32_feature_control;
299         u64 msr_ia32_feature_control_valid_bits;
300         u64 ept_pointer;
301
302         struct pt_desc pt_desc;
303 };
304
305 enum ept_pointers_status {
306         EPT_POINTERS_CHECK = 0,
307         EPT_POINTERS_MATCH = 1,
308         EPT_POINTERS_MISMATCH = 2
309 };
310
311 struct kvm_vmx {
312         struct kvm kvm;
313
314         unsigned int tss_addr;
315         bool ept_identity_pagetable_done;
316         gpa_t ept_identity_map_addr;
317
318         enum ept_pointers_status ept_pointers_match;
319         spinlock_t ept_pointer_lock;
320 };
321
322 bool nested_vmx_allowed(struct kvm_vcpu *vcpu);
323 void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu);
324 void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
325 int allocate_vpid(void);
326 void free_vpid(int vpid);
327 void vmx_set_constant_host_state(struct vcpu_vmx *vmx);
328 void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
329 void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
330                         unsigned long fs_base, unsigned long gs_base);
331 int vmx_get_cpl(struct kvm_vcpu *vcpu);
332 unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
333 void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
334 u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
335 void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask);
336 void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
337 void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
338 void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
339 int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
340 void set_cr4_guest_host_mask(struct vcpu_vmx *vmx);
341 void ept_save_pdptrs(struct kvm_vcpu *vcpu);
342 void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
343 void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
344 u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
345 void update_exception_bitmap(struct kvm_vcpu *vcpu);
346 void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
347 bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
348 void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
349 void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
350 struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr);
351 void pt_update_intercept_for_msr(struct vcpu_vmx *vmx);
352 void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
353 int vmx_find_msr_index(struct vmx_msrs *m, u32 msr);
354
355 #define POSTED_INTR_ON  0
356 #define POSTED_INTR_SN  1
357
358 static inline bool pi_test_and_set_on(struct pi_desc *pi_desc)
359 {
360         return test_and_set_bit(POSTED_INTR_ON,
361                         (unsigned long *)&pi_desc->control);
362 }
363
364 static inline bool pi_test_and_clear_on(struct pi_desc *pi_desc)
365 {
366         return test_and_clear_bit(POSTED_INTR_ON,
367                         (unsigned long *)&pi_desc->control);
368 }
369
370 static inline int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
371 {
372         return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
373 }
374
375 static inline bool pi_is_pir_empty(struct pi_desc *pi_desc)
376 {
377         return bitmap_empty((unsigned long *)pi_desc->pir, NR_VECTORS);
378 }
379
380 static inline void pi_set_sn(struct pi_desc *pi_desc)
381 {
382         set_bit(POSTED_INTR_SN,
383                 (unsigned long *)&pi_desc->control);
384 }
385
386 static inline void pi_set_on(struct pi_desc *pi_desc)
387 {
388         set_bit(POSTED_INTR_ON,
389                 (unsigned long *)&pi_desc->control);
390 }
391
392 static inline void pi_clear_on(struct pi_desc *pi_desc)
393 {
394         clear_bit(POSTED_INTR_ON,
395                 (unsigned long *)&pi_desc->control);
396 }
397
398 static inline void pi_clear_sn(struct pi_desc *pi_desc)
399 {
400         clear_bit(POSTED_INTR_SN,
401                 (unsigned long *)&pi_desc->control);
402 }
403
404 static inline int pi_test_on(struct pi_desc *pi_desc)
405 {
406         return test_bit(POSTED_INTR_ON,
407                         (unsigned long *)&pi_desc->control);
408 }
409
410 static inline int pi_test_sn(struct pi_desc *pi_desc)
411 {
412         return test_bit(POSTED_INTR_SN,
413                         (unsigned long *)&pi_desc->control);
414 }
415
416 static inline u8 vmx_get_rvi(void)
417 {
418         return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
419 }
420
421 #define BUILD_CONTROLS_SHADOW(lname, uname)                                 \
422 static inline void lname##_controls_set(struct vcpu_vmx *vmx, u32 val)      \
423 {                                                                           \
424         if (vmx->loaded_vmcs->controls_shadow.lname != val) {               \
425                 vmcs_write32(uname, val);                                   \
426                 vmx->loaded_vmcs->controls_shadow.lname = val;              \
427         }                                                                   \
428 }                                                                           \
429 static inline u32 lname##_controls_get(struct vcpu_vmx *vmx)                \
430 {                                                                           \
431         return vmx->loaded_vmcs->controls_shadow.lname;                     \
432 }                                                                           \
433 static inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u32 val)   \
434 {                                                                           \
435         lname##_controls_set(vmx, lname##_controls_get(vmx) | val);         \
436 }                                                                           \
437 static inline void lname##_controls_clearbit(struct vcpu_vmx *vmx, u32 val) \
438 {                                                                           \
439         lname##_controls_set(vmx, lname##_controls_get(vmx) & ~val);        \
440 }
441 BUILD_CONTROLS_SHADOW(vm_entry, VM_ENTRY_CONTROLS)
442 BUILD_CONTROLS_SHADOW(vm_exit, VM_EXIT_CONTROLS)
443 BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL)
444 BUILD_CONTROLS_SHADOW(exec, CPU_BASED_VM_EXEC_CONTROL)
445 BUILD_CONTROLS_SHADOW(secondary_exec, SECONDARY_VM_EXEC_CONTROL)
446
447 static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
448 {
449         vmx->segment_cache.bitmask = 0;
450 }
451
452 static inline u32 vmx_vmentry_ctrl(void)
453 {
454         u32 vmentry_ctrl = vmcs_config.vmentry_ctrl;
455         if (pt_mode == PT_MODE_SYSTEM)
456                 vmentry_ctrl &= ~(VM_ENTRY_PT_CONCEAL_PIP |
457                                   VM_ENTRY_LOAD_IA32_RTIT_CTL);
458         /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
459         return vmentry_ctrl &
460                 ~(VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VM_ENTRY_LOAD_IA32_EFER);
461 }
462
463 static inline u32 vmx_vmexit_ctrl(void)
464 {
465         u32 vmexit_ctrl = vmcs_config.vmexit_ctrl;
466         if (pt_mode == PT_MODE_SYSTEM)
467                 vmexit_ctrl &= ~(VM_EXIT_PT_CONCEAL_PIP |
468                                  VM_EXIT_CLEAR_IA32_RTIT_CTL);
469         /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
470         return vmexit_ctrl &
471                 ~(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VM_EXIT_LOAD_IA32_EFER);
472 }
473
474 u32 vmx_exec_control(struct vcpu_vmx *vmx);
475 u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx);
476
477 static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
478 {
479         return container_of(kvm, struct kvm_vmx, kvm);
480 }
481
482 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
483 {
484         return container_of(vcpu, struct vcpu_vmx, vcpu);
485 }
486
487 static inline struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
488 {
489         return &(to_vmx(vcpu)->pi_desc);
490 }
491
492 struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags);
493 void free_vmcs(struct vmcs *vmcs);
494 int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
495 void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
496 void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs);
497 void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs);
498
499 static inline struct vmcs *alloc_vmcs(bool shadow)
500 {
501         return alloc_vmcs_cpu(shadow, raw_smp_processor_id(),
502                               GFP_KERNEL_ACCOUNT);
503 }
504
505 u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
506
507 static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid,
508                                 bool invalidate_gpa)
509 {
510         if (enable_ept && (invalidate_gpa || !enable_vpid)) {
511                 if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
512                         return;
513                 ept_sync_context(construct_eptp(vcpu,
514                                                 vcpu->arch.mmu->root_hpa));
515         } else {
516                 vpid_sync_context(vpid);
517         }
518 }
519
520 static inline void vmx_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
521 {
522         __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa);
523 }
524
525 static inline void decache_tsc_multiplier(struct vcpu_vmx *vmx)
526 {
527         vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
528         vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
529 }
530
531 static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
532 {
533         return vmx->secondary_exec_control &
534                 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
535 }
536
537 void dump_vmcs(void);
538
539 #endif /* __KVM_X86_VMX_H */